]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge commit 'c2c9801bc9bce688d51d1a96f5f3ea93933e2dee'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/buffersink.h"
52 # include "libavfilter/buffersrc.h"
53 #endif
54
55 #include <SDL.h>
56 #include <SDL_thread.h>
57
58 #include "cmdutils.h"
59
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
81 #define EXTERNAL_CLOCK_SPEED_MIN  0.900
82 #define EXTERNAL_CLOCK_SPEED_MAX  1.010
83 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
84
85 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
86 #define AUDIO_DIFF_AVG_NB   20
87
88 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
89 #define REFRESH_RATE 0.01
90
91 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
92 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
93 #define SAMPLE_ARRAY_SIZE (8 * 65536)
94
95 #define CURSOR_HIDE_DELAY 1000000
96
97 static int64_t sws_flags = SWS_BICUBIC;
98
99 typedef struct MyAVPacketList {
100     AVPacket pkt;
101     struct MyAVPacketList *next;
102     int serial;
103 } MyAVPacketList;
104
105 typedef struct PacketQueue {
106     MyAVPacketList *first_pkt, *last_pkt;
107     int nb_packets;
108     int size;
109     int abort_request;
110     int serial;
111     SDL_mutex *mutex;
112     SDL_cond *cond;
113 } PacketQueue;
114
115 #define VIDEO_PICTURE_QUEUE_SIZE 4
116 #define SUBPICTURE_QUEUE_SIZE 4
117
118 typedef struct VideoPicture {
119     double pts;             // presentation timestamp for this picture
120     int64_t pos;            // byte position in file
121     SDL_Overlay *bmp;
122     int width, height; /* source height & width */
123     int allocated;
124     int reallocate;
125     int serial;
126
127     AVRational sar;
128 } VideoPicture;
129
130 typedef struct SubPicture {
131     double pts; /* presentation time stamp for this picture */
132     AVSubtitle sub;
133 } SubPicture;
134
135 typedef struct AudioParams {
136     int freq;
137     int channels;
138     int64_t channel_layout;
139     enum AVSampleFormat fmt;
140 } AudioParams;
141
142 enum {
143     AV_SYNC_AUDIO_MASTER, /* default choice */
144     AV_SYNC_VIDEO_MASTER,
145     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
146 };
147
148 typedef struct VideoState {
149     SDL_Thread *read_tid;
150     SDL_Thread *video_tid;
151     AVInputFormat *iformat;
152     int no_background;
153     int abort_request;
154     int force_refresh;
155     int paused;
156     int last_paused;
157     int queue_attachments_req;
158     int seek_req;
159     int seek_flags;
160     int64_t seek_pos;
161     int64_t seek_rel;
162     int read_pause_return;
163     AVFormatContext *ic;
164     int realtime;
165
166     int audio_stream;
167
168     int av_sync_type;
169     double external_clock;                   ///< external clock base
170     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
171     int64_t external_clock_time;             ///< last reference time
172     double external_clock_speed;             ///< speed of the external clock
173
174     double audio_clock;
175     int audio_clock_serial;
176     double audio_diff_cum; /* used for AV difference average computation */
177     double audio_diff_avg_coef;
178     double audio_diff_threshold;
179     int audio_diff_avg_count;
180     AVStream *audio_st;
181     PacketQueue audioq;
182     int audio_hw_buf_size;
183     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
184     uint8_t *audio_buf;
185     uint8_t *audio_buf1;
186     unsigned int audio_buf_size; /* in bytes */
187     unsigned int audio_buf1_size;
188     int audio_buf_index; /* in bytes */
189     int audio_write_buf_size;
190     int audio_buf_frames_pending;
191     AVPacket audio_pkt_temp;
192     AVPacket audio_pkt;
193     int audio_pkt_temp_serial;
194     int audio_last_serial;
195     struct AudioParams audio_src;
196 #if CONFIG_AVFILTER
197     struct AudioParams audio_filter_src;
198 #endif
199     struct AudioParams audio_tgt;
200     struct SwrContext *swr_ctx;
201     double audio_current_pts;
202     double audio_current_pts_drift;
203     int frame_drops_early;
204     int frame_drops_late;
205     AVFrame *frame;
206
207     enum ShowMode {
208         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
209     } show_mode;
210     int16_t sample_array[SAMPLE_ARRAY_SIZE];
211     int sample_array_index;
212     int last_i_start;
213     RDFTContext *rdft;
214     int rdft_bits;
215     FFTSample *rdft_data;
216     int xpos;
217     double last_vis_time;
218
219     SDL_Thread *subtitle_tid;
220     int subtitle_stream;
221     int subtitle_stream_changed;
222     AVStream *subtitle_st;
223     PacketQueue subtitleq;
224     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
225     int subpq_size, subpq_rindex, subpq_windex;
226     SDL_mutex *subpq_mutex;
227     SDL_cond *subpq_cond;
228
229     double frame_timer;
230     double frame_last_pts;
231     double frame_last_duration;
232     double frame_last_dropped_pts;
233     double frame_last_returned_time;
234     double frame_last_filter_delay;
235     int64_t frame_last_dropped_pos;
236     int frame_last_dropped_serial;
237     int video_stream;
238     AVStream *video_st;
239     PacketQueue videoq;
240     double video_current_pts;       // current displayed pts
241     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
242     int64_t video_current_pos;      // current displayed file pos
243     double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
244     int video_clock_serial;
245     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
246     int pictq_size, pictq_rindex, pictq_windex;
247     SDL_mutex *pictq_mutex;
248     SDL_cond *pictq_cond;
249 #if !CONFIG_AVFILTER
250     struct SwsContext *img_convert_ctx;
251 #endif
252     SDL_Rect last_display_rect;
253
254     char filename[1024];
255     int width, height, xleft, ytop;
256     int step;
257
258 #if CONFIG_AVFILTER
259     AVFilterContext *in_video_filter;   // the first filter in the video chain
260     AVFilterContext *out_video_filter;  // the last filter in the video chain
261     AVFilterContext *in_audio_filter;   // the first filter in the audio chain
262     AVFilterContext *out_audio_filter;  // the last filter in the audio chain
263     AVFilterGraph *agraph;              // audio filter graph
264 #endif
265
266     int last_video_stream, last_audio_stream, last_subtitle_stream;
267
268     SDL_cond *continue_read_thread;
269 } VideoState;
270
271 /* options specified by the user */
272 static AVInputFormat *file_iformat;
273 static const char *input_filename;
274 static const char *window_title;
275 static int fs_screen_width;
276 static int fs_screen_height;
277 static int default_width  = 640;
278 static int default_height = 480;
279 static int screen_width  = 0;
280 static int screen_height = 0;
281 static int audio_disable;
282 static int video_disable;
283 static int subtitle_disable;
284 static int wanted_stream[AVMEDIA_TYPE_NB] = {
285     [AVMEDIA_TYPE_AUDIO]    = -1,
286     [AVMEDIA_TYPE_VIDEO]    = -1,
287     [AVMEDIA_TYPE_SUBTITLE] = -1,
288 };
289 static int seek_by_bytes = -1;
290 static int display_disable;
291 static int show_status = 1;
292 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
293 static int64_t start_time = AV_NOPTS_VALUE;
294 static int64_t duration = AV_NOPTS_VALUE;
295 static int workaround_bugs = 1;
296 static int fast = 0;
297 static int genpts = 0;
298 static int lowres = 0;
299 static int idct = FF_IDCT_AUTO;
300 static int error_concealment = 3;
301 static int decoder_reorder_pts = -1;
302 static int autoexit;
303 static int exit_on_keydown;
304 static int exit_on_mousedown;
305 static int loop = 1;
306 static int framedrop = -1;
307 static int infinite_buffer = -1;
308 static enum ShowMode show_mode = SHOW_MODE_NONE;
309 static const char *audio_codec_name;
310 static const char *subtitle_codec_name;
311 static const char *video_codec_name;
312 double rdftspeed = 0.02;
313 static int64_t cursor_last_shown;
314 static int cursor_hidden = 0;
315 #if CONFIG_AVFILTER
316 static char *vfilters = NULL;
317 static char *afilters = NULL;
318 #endif
319
320 /* current context */
321 static int is_full_screen;
322 static int64_t audio_callback_time;
323
324 static AVPacket flush_pkt;
325
326 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
327 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
328
329 static SDL_Surface *screen;
330
331 static inline
332 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
333                    enum AVSampleFormat fmt2, int64_t channel_count2)
334 {
335     /* If channel count == 1, planar and non-planar formats are the same */
336     if (channel_count1 == 1 && channel_count2 == 1)
337         return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
338     else
339         return channel_count1 != channel_count2 || fmt1 != fmt2;
340 }
341
342 static inline
343 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
344 {
345     if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
346         return channel_layout;
347     else
348         return 0;
349 }
350
351 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
352
353 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
354 {
355     MyAVPacketList *pkt1;
356
357     if (q->abort_request)
358        return -1;
359
360     pkt1 = av_malloc(sizeof(MyAVPacketList));
361     if (!pkt1)
362         return -1;
363     pkt1->pkt = *pkt;
364     pkt1->next = NULL;
365     if (pkt == &flush_pkt)
366         q->serial++;
367     pkt1->serial = q->serial;
368
369     if (!q->last_pkt)
370         q->first_pkt = pkt1;
371     else
372         q->last_pkt->next = pkt1;
373     q->last_pkt = pkt1;
374     q->nb_packets++;
375     q->size += pkt1->pkt.size + sizeof(*pkt1);
376     /* XXX: should duplicate packet data in DV case */
377     SDL_CondSignal(q->cond);
378     return 0;
379 }
380
381 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
382 {
383     int ret;
384
385     /* duplicate the packet */
386     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
387         return -1;
388
389     SDL_LockMutex(q->mutex);
390     ret = packet_queue_put_private(q, pkt);
391     SDL_UnlockMutex(q->mutex);
392
393     if (pkt != &flush_pkt && ret < 0)
394         av_free_packet(pkt);
395
396     return ret;
397 }
398
399 /* packet queue handling */
400 static void packet_queue_init(PacketQueue *q)
401 {
402     memset(q, 0, sizeof(PacketQueue));
403     q->mutex = SDL_CreateMutex();
404     q->cond = SDL_CreateCond();
405     q->abort_request = 1;
406 }
407
408 static void packet_queue_flush(PacketQueue *q)
409 {
410     MyAVPacketList *pkt, *pkt1;
411
412     SDL_LockMutex(q->mutex);
413     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
414         pkt1 = pkt->next;
415         av_free_packet(&pkt->pkt);
416         av_freep(&pkt);
417     }
418     q->last_pkt = NULL;
419     q->first_pkt = NULL;
420     q->nb_packets = 0;
421     q->size = 0;
422     SDL_UnlockMutex(q->mutex);
423 }
424
425 static void packet_queue_destroy(PacketQueue *q)
426 {
427     packet_queue_flush(q);
428     SDL_DestroyMutex(q->mutex);
429     SDL_DestroyCond(q->cond);
430 }
431
432 static void packet_queue_abort(PacketQueue *q)
433 {
434     SDL_LockMutex(q->mutex);
435
436     q->abort_request = 1;
437
438     SDL_CondSignal(q->cond);
439
440     SDL_UnlockMutex(q->mutex);
441 }
442
443 static void packet_queue_start(PacketQueue *q)
444 {
445     SDL_LockMutex(q->mutex);
446     q->abort_request = 0;
447     packet_queue_put_private(q, &flush_pkt);
448     SDL_UnlockMutex(q->mutex);
449 }
450
451 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
452 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
453 {
454     MyAVPacketList *pkt1;
455     int ret;
456
457     SDL_LockMutex(q->mutex);
458
459     for (;;) {
460         if (q->abort_request) {
461             ret = -1;
462             break;
463         }
464
465         pkt1 = q->first_pkt;
466         if (pkt1) {
467             q->first_pkt = pkt1->next;
468             if (!q->first_pkt)
469                 q->last_pkt = NULL;
470             q->nb_packets--;
471             q->size -= pkt1->pkt.size + sizeof(*pkt1);
472             *pkt = pkt1->pkt;
473             if (serial)
474                 *serial = pkt1->serial;
475             av_free(pkt1);
476             ret = 1;
477             break;
478         } else if (!block) {
479             ret = 0;
480             break;
481         } else {
482             SDL_CondWait(q->cond, q->mutex);
483         }
484     }
485     SDL_UnlockMutex(q->mutex);
486     return ret;
487 }
488
489 static inline void fill_rectangle(SDL_Surface *screen,
490                                   int x, int y, int w, int h, int color, int update)
491 {
492     SDL_Rect rect;
493     rect.x = x;
494     rect.y = y;
495     rect.w = w;
496     rect.h = h;
497     SDL_FillRect(screen, &rect, color);
498     if (update && w > 0 && h > 0)
499         SDL_UpdateRect(screen, x, y, w, h);
500 }
501
502 /* draw only the border of a rectangle */
503 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
504 {
505     int w1, w2, h1, h2;
506
507     /* fill the background */
508     w1 = x;
509     if (w1 < 0)
510         w1 = 0;
511     w2 = width - (x + w);
512     if (w2 < 0)
513         w2 = 0;
514     h1 = y;
515     if (h1 < 0)
516         h1 = 0;
517     h2 = height - (y + h);
518     if (h2 < 0)
519         h2 = 0;
520     fill_rectangle(screen,
521                    xleft, ytop,
522                    w1, height,
523                    color, update);
524     fill_rectangle(screen,
525                    xleft + width - w2, ytop,
526                    w2, height,
527                    color, update);
528     fill_rectangle(screen,
529                    xleft + w1, ytop,
530                    width - w1 - w2, h1,
531                    color, update);
532     fill_rectangle(screen,
533                    xleft + w1, ytop + height - h2,
534                    width - w1 - w2, h2,
535                    color, update);
536 }
537
538 #define ALPHA_BLEND(a, oldp, newp, s)\
539 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
540
541 #define RGBA_IN(r, g, b, a, s)\
542 {\
543     unsigned int v = ((const uint32_t *)(s))[0];\
544     a = (v >> 24) & 0xff;\
545     r = (v >> 16) & 0xff;\
546     g = (v >> 8) & 0xff;\
547     b = v & 0xff;\
548 }
549
550 #define YUVA_IN(y, u, v, a, s, pal)\
551 {\
552     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
553     a = (val >> 24) & 0xff;\
554     y = (val >> 16) & 0xff;\
555     u = (val >> 8) & 0xff;\
556     v = val & 0xff;\
557 }
558
559 #define YUVA_OUT(d, y, u, v, a)\
560 {\
561     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
562 }
563
564
565 #define BPP 1
566
567 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
568 {
569     int wrap, wrap3, width2, skip2;
570     int y, u, v, a, u1, v1, a1, w, h;
571     uint8_t *lum, *cb, *cr;
572     const uint8_t *p;
573     const uint32_t *pal;
574     int dstx, dsty, dstw, dsth;
575
576     dstw = av_clip(rect->w, 0, imgw);
577     dsth = av_clip(rect->h, 0, imgh);
578     dstx = av_clip(rect->x, 0, imgw - dstw);
579     dsty = av_clip(rect->y, 0, imgh - dsth);
580     lum = dst->data[0] + dsty * dst->linesize[0];
581     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
582     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
583
584     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
585     skip2 = dstx >> 1;
586     wrap = dst->linesize[0];
587     wrap3 = rect->pict.linesize[0];
588     p = rect->pict.data[0];
589     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
590
591     if (dsty & 1) {
592         lum += dstx;
593         cb += skip2;
594         cr += skip2;
595
596         if (dstx & 1) {
597             YUVA_IN(y, u, v, a, p, pal);
598             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601             cb++;
602             cr++;
603             lum++;
604             p += BPP;
605         }
606         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612
613             YUVA_IN(y, u, v, a, p + BPP, pal);
614             u1 += u;
615             v1 += v;
616             a1 += a;
617             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
619             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
620             cb++;
621             cr++;
622             p += 2 * BPP;
623             lum += 2;
624         }
625         if (w) {
626             YUVA_IN(y, u, v, a, p, pal);
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
630             p++;
631             lum++;
632         }
633         p += wrap3 - dstw * BPP;
634         lum += wrap - dstw - dstx;
635         cb += dst->linesize[1] - width2 - skip2;
636         cr += dst->linesize[2] - width2 - skip2;
637     }
638     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
639         lum += dstx;
640         cb += skip2;
641         cr += skip2;
642
643         if (dstx & 1) {
644             YUVA_IN(y, u, v, a, p, pal);
645             u1 = u;
646             v1 = v;
647             a1 = a;
648             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
649             p += wrap3;
650             lum += wrap;
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 += u;
653             v1 += v;
654             a1 += a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
657             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
658             cb++;
659             cr++;
660             p += -wrap3 + BPP;
661             lum += -wrap + 1;
662         }
663         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
664             YUVA_IN(y, u, v, a, p, pal);
665             u1 = u;
666             v1 = v;
667             a1 = a;
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669
670             YUVA_IN(y, u, v, a, p + BPP, pal);
671             u1 += u;
672             v1 += v;
673             a1 += a;
674             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
675             p += wrap3;
676             lum += wrap;
677
678             YUVA_IN(y, u, v, a, p, pal);
679             u1 += u;
680             v1 += v;
681             a1 += a;
682             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
683
684             YUVA_IN(y, u, v, a, p + BPP, pal);
685             u1 += u;
686             v1 += v;
687             a1 += a;
688             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
689
690             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
691             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
692
693             cb++;
694             cr++;
695             p += -wrap3 + 2 * BPP;
696             lum += -wrap + 2;
697         }
698         if (w) {
699             YUVA_IN(y, u, v, a, p, pal);
700             u1 = u;
701             v1 = v;
702             a1 = a;
703             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
704             p += wrap3;
705             lum += wrap;
706             YUVA_IN(y, u, v, a, p, pal);
707             u1 += u;
708             v1 += v;
709             a1 += a;
710             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
711             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
712             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
713             cb++;
714             cr++;
715             p += -wrap3 + BPP;
716             lum += -wrap + 1;
717         }
718         p += wrap3 + (wrap3 - dstw * BPP);
719         lum += wrap + (wrap - dstw - dstx);
720         cb += dst->linesize[1] - width2 - skip2;
721         cr += dst->linesize[2] - width2 - skip2;
722     }
723     /* handle odd height */
724     if (h) {
725         lum += dstx;
726         cb += skip2;
727         cr += skip2;
728
729         if (dstx & 1) {
730             YUVA_IN(y, u, v, a, p, pal);
731             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
732             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
733             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
734             cb++;
735             cr++;
736             lum++;
737             p += BPP;
738         }
739         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
740             YUVA_IN(y, u, v, a, p, pal);
741             u1 = u;
742             v1 = v;
743             a1 = a;
744             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
745
746             YUVA_IN(y, u, v, a, p + BPP, pal);
747             u1 += u;
748             v1 += v;
749             a1 += a;
750             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
751             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
752             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
753             cb++;
754             cr++;
755             p += 2 * BPP;
756             lum += 2;
757         }
758         if (w) {
759             YUVA_IN(y, u, v, a, p, pal);
760             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
761             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
762             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
763         }
764     }
765 }
766
767 static void free_subpicture(SubPicture *sp)
768 {
769     avsubtitle_free(&sp->sub);
770 }
771
772 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
773 {
774     float aspect_ratio;
775     int width, height, x, y;
776
777     if (vp->sar.num == 0)
778         aspect_ratio = 0;
779     else
780         aspect_ratio = av_q2d(vp->sar);
781
782     if (aspect_ratio <= 0.0)
783         aspect_ratio = 1.0;
784     aspect_ratio *= (float)vp->width / (float)vp->height;
785
786     /* XXX: we suppose the screen has a 1.0 pixel ratio */
787     height = scr_height;
788     width = ((int)rint(height * aspect_ratio)) & ~1;
789     if (width > scr_width) {
790         width = scr_width;
791         height = ((int)rint(width / aspect_ratio)) & ~1;
792     }
793     x = (scr_width - width) / 2;
794     y = (scr_height - height) / 2;
795     rect->x = scr_xleft + x;
796     rect->y = scr_ytop  + y;
797     rect->w = FFMAX(width,  1);
798     rect->h = FFMAX(height, 1);
799 }
800
801 static void video_image_display(VideoState *is)
802 {
803     VideoPicture *vp;
804     SubPicture *sp;
805     AVPicture pict;
806     SDL_Rect rect;
807     int i;
808
809     vp = &is->pictq[is->pictq_rindex];
810     if (vp->bmp) {
811         if (is->subtitle_st) {
812             if (is->subpq_size > 0) {
813                 sp = &is->subpq[is->subpq_rindex];
814
815                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
816                     SDL_LockYUVOverlay (vp->bmp);
817
818                     pict.data[0] = vp->bmp->pixels[0];
819                     pict.data[1] = vp->bmp->pixels[2];
820                     pict.data[2] = vp->bmp->pixels[1];
821
822                     pict.linesize[0] = vp->bmp->pitches[0];
823                     pict.linesize[1] = vp->bmp->pitches[2];
824                     pict.linesize[2] = vp->bmp->pitches[1];
825
826                     for (i = 0; i < sp->sub.num_rects; i++)
827                         blend_subrect(&pict, sp->sub.rects[i],
828                                       vp->bmp->w, vp->bmp->h);
829
830                     SDL_UnlockYUVOverlay (vp->bmp);
831                 }
832             }
833         }
834
835         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
836
837         SDL_DisplayYUVOverlay(vp->bmp, &rect);
838
839         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
840             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
841             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
842             is->last_display_rect = rect;
843         }
844     }
845 }
846
847 static inline int compute_mod(int a, int b)
848 {
849     return a < 0 ? a%b + b : a%b;
850 }
851
852 static void video_audio_display(VideoState *s)
853 {
854     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
855     int ch, channels, h, h2, bgcolor, fgcolor;
856     int64_t time_diff;
857     int rdft_bits, nb_freq;
858
859     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
860         ;
861     nb_freq = 1 << (rdft_bits - 1);
862
863     /* compute display index : center on currently output samples */
864     channels = s->audio_tgt.channels;
865     nb_display_channels = channels;
866     if (!s->paused) {
867         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
868         n = 2 * channels;
869         delay = s->audio_write_buf_size;
870         delay /= n;
871
872         /* to be more precise, we take into account the time spent since
873            the last buffer computation */
874         if (audio_callback_time) {
875             time_diff = av_gettime() - audio_callback_time;
876             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
877         }
878
879         delay += 2 * data_used;
880         if (delay < data_used)
881             delay = data_used;
882
883         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
884         if (s->show_mode == SHOW_MODE_WAVES) {
885             h = INT_MIN;
886             for (i = 0; i < 1000; i += channels) {
887                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
888                 int a = s->sample_array[idx];
889                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
890                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
891                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
892                 int score = a - d;
893                 if (h < score && (b ^ c) < 0) {
894                     h = score;
895                     i_start = idx;
896                 }
897             }
898         }
899
900         s->last_i_start = i_start;
901     } else {
902         i_start = s->last_i_start;
903     }
904
905     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
906     if (s->show_mode == SHOW_MODE_WAVES) {
907         fill_rectangle(screen,
908                        s->xleft, s->ytop, s->width, s->height,
909                        bgcolor, 0);
910
911         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
912
913         /* total height for one channel */
914         h = s->height / nb_display_channels;
915         /* graph height / 2 */
916         h2 = (h * 9) / 20;
917         for (ch = 0; ch < nb_display_channels; ch++) {
918             i = i_start + ch;
919             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
920             for (x = 0; x < s->width; x++) {
921                 y = (s->sample_array[i] * h2) >> 15;
922                 if (y < 0) {
923                     y = -y;
924                     ys = y1 - y;
925                 } else {
926                     ys = y1;
927                 }
928                 fill_rectangle(screen,
929                                s->xleft + x, ys, 1, y,
930                                fgcolor, 0);
931                 i += channels;
932                 if (i >= SAMPLE_ARRAY_SIZE)
933                     i -= SAMPLE_ARRAY_SIZE;
934             }
935         }
936
937         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
938
939         for (ch = 1; ch < nb_display_channels; ch++) {
940             y = s->ytop + ch * h;
941             fill_rectangle(screen,
942                            s->xleft, y, s->width, 1,
943                            fgcolor, 0);
944         }
945         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
946     } else {
947         nb_display_channels= FFMIN(nb_display_channels, 2);
948         if (rdft_bits != s->rdft_bits) {
949             av_rdft_end(s->rdft);
950             av_free(s->rdft_data);
951             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
952             s->rdft_bits = rdft_bits;
953             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
954         }
955         {
956             FFTSample *data[2];
957             for (ch = 0; ch < nb_display_channels; ch++) {
958                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
959                 i = i_start + ch;
960                 for (x = 0; x < 2 * nb_freq; x++) {
961                     double w = (x-nb_freq) * (1.0 / nb_freq);
962                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
963                     i += channels;
964                     if (i >= SAMPLE_ARRAY_SIZE)
965                         i -= SAMPLE_ARRAY_SIZE;
966                 }
967                 av_rdft_calc(s->rdft, data[ch]);
968             }
969             // least efficient way to do this, we should of course directly access it but its more than fast enough
970             for (y = 0; y < s->height; y++) {
971                 double w = 1 / sqrt(nb_freq);
972                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
973                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
974                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
975                 a = FFMIN(a, 255);
976                 b = FFMIN(b, 255);
977                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
978
979                 fill_rectangle(screen,
980                             s->xpos, s->height-y, 1, 1,
981                             fgcolor, 0);
982             }
983         }
984         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
985         if (!s->paused)
986             s->xpos++;
987         if (s->xpos >= s->width)
988             s->xpos= s->xleft;
989     }
990 }
991
992 static void stream_close(VideoState *is)
993 {
994     VideoPicture *vp;
995     int i;
996     /* XXX: use a special url_shutdown call to abort parse cleanly */
997     is->abort_request = 1;
998     SDL_WaitThread(is->read_tid, NULL);
999     packet_queue_destroy(&is->videoq);
1000     packet_queue_destroy(&is->audioq);
1001     packet_queue_destroy(&is->subtitleq);
1002
1003     /* free all pictures */
1004     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1005         vp = &is->pictq[i];
1006         if (vp->bmp) {
1007             SDL_FreeYUVOverlay(vp->bmp);
1008             vp->bmp = NULL;
1009         }
1010     }
1011     SDL_DestroyMutex(is->pictq_mutex);
1012     SDL_DestroyCond(is->pictq_cond);
1013     SDL_DestroyMutex(is->subpq_mutex);
1014     SDL_DestroyCond(is->subpq_cond);
1015     SDL_DestroyCond(is->continue_read_thread);
1016 #if !CONFIG_AVFILTER
1017     sws_freeContext(is->img_convert_ctx);
1018 #endif
1019     av_free(is);
1020 }
1021
1022 static void do_exit(VideoState *is)
1023 {
1024     if (is) {
1025         stream_close(is);
1026     }
1027     av_lockmgr_register(NULL);
1028     uninit_opts();
1029 #if CONFIG_AVFILTER
1030     avfilter_uninit();
1031     av_freep(&vfilters);
1032 #endif
1033     avformat_network_deinit();
1034     if (show_status)
1035         printf("\n");
1036     SDL_Quit();
1037     av_log(NULL, AV_LOG_QUIET, "%s", "");
1038     exit(0);
1039 }
1040
1041 static void sigterm_handler(int sig)
1042 {
1043     exit(123);
1044 }
1045
1046 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1047 {
1048     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1049     int w,h;
1050     SDL_Rect rect;
1051
1052     if (is_full_screen) flags |= SDL_FULLSCREEN;
1053     else                flags |= SDL_RESIZABLE;
1054
1055     if (vp && vp->width) {
1056         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1057         default_width  = rect.w;
1058         default_height = rect.h;
1059     }
1060
1061     if (is_full_screen && fs_screen_width) {
1062         w = fs_screen_width;
1063         h = fs_screen_height;
1064     } else if (!is_full_screen && screen_width) {
1065         w = screen_width;
1066         h = screen_height;
1067     } else {
1068         w = default_width;
1069         h = default_height;
1070     }
1071     if (screen && is->width == screen->w && screen->w == w
1072        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1073         return 0;
1074     screen = SDL_SetVideoMode(w, h, 0, flags);
1075     if (!screen) {
1076         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1077         do_exit(is);
1078     }
1079     if (!window_title)
1080         window_title = input_filename;
1081     SDL_WM_SetCaption(window_title, window_title);
1082
1083     is->width  = screen->w;
1084     is->height = screen->h;
1085
1086     return 0;
1087 }
1088
1089 /* display the current picture, if any */
1090 static void video_display(VideoState *is)
1091 {
1092     if (!screen)
1093         video_open(is, 0, NULL);
1094     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1095         video_audio_display(is);
1096     else if (is->video_st)
1097         video_image_display(is);
1098 }
1099
1100 /* get the current audio clock value */
1101 static double get_audio_clock(VideoState *is)
1102 {
1103     if (is->audio_clock_serial != is->audioq.serial)
1104         return NAN;
1105     if (is->paused) {
1106         return is->audio_current_pts;
1107     } else {
1108         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1109     }
1110 }
1111
1112 /* get the current video clock value */
1113 static double get_video_clock(VideoState *is)
1114 {
1115     if (is->video_clock_serial != is->videoq.serial)
1116         return NAN;
1117     if (is->paused) {
1118         return is->video_current_pts;
1119     } else {
1120         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1121     }
1122 }
1123
1124 /* get the current external clock value */
1125 static double get_external_clock(VideoState *is)
1126 {
1127     if (is->paused) {
1128         return is->external_clock;
1129     } else {
1130         double time = av_gettime() / 1000000.0;
1131         return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
1132     }
1133 }
1134
1135 static int get_master_sync_type(VideoState *is) {
1136     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1137         if (is->video_st)
1138             return AV_SYNC_VIDEO_MASTER;
1139         else
1140             return AV_SYNC_AUDIO_MASTER;
1141     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1142         if (is->audio_st)
1143             return AV_SYNC_AUDIO_MASTER;
1144         else
1145             return AV_SYNC_EXTERNAL_CLOCK;
1146     } else {
1147         return AV_SYNC_EXTERNAL_CLOCK;
1148     }
1149 }
1150
1151 /* get the current master clock value */
1152 static double get_master_clock(VideoState *is)
1153 {
1154     double val;
1155
1156     switch (get_master_sync_type(is)) {
1157         case AV_SYNC_VIDEO_MASTER:
1158             val = get_video_clock(is);
1159             break;
1160         case AV_SYNC_AUDIO_MASTER:
1161             val = get_audio_clock(is);
1162             break;
1163         default:
1164             val = get_external_clock(is);
1165             break;
1166     }
1167     return val;
1168 }
1169
1170 static void update_external_clock_pts(VideoState *is, double pts)
1171 {
1172    is->external_clock_time = av_gettime();
1173    is->external_clock = pts;
1174    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1175 }
1176
1177 static void check_external_clock_sync(VideoState *is, double pts) {
1178     double ext_clock = get_external_clock(is);
1179     if (isnan(ext_clock) || fabs(ext_clock - pts) > AV_NOSYNC_THRESHOLD) {
1180         update_external_clock_pts(is, pts);
1181     }
1182 }
1183
1184 static void update_external_clock_speed(VideoState *is, double speed) {
1185     update_external_clock_pts(is, get_external_clock(is));
1186     is->external_clock_speed = speed;
1187 }
1188
1189 static void check_external_clock_speed(VideoState *is) {
1190    if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1191        is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1192        update_external_clock_speed(is, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->external_clock_speed - EXTERNAL_CLOCK_SPEED_STEP));
1193    } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1194               (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1195        update_external_clock_speed(is, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->external_clock_speed + EXTERNAL_CLOCK_SPEED_STEP));
1196    } else {
1197        double speed = is->external_clock_speed;
1198        if (speed != 1.0)
1199            update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1200    }
1201 }
1202
1203 /* seek in the stream */
1204 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1205 {
1206     if (!is->seek_req) {
1207         is->seek_pos = pos;
1208         is->seek_rel = rel;
1209         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1210         if (seek_by_bytes)
1211             is->seek_flags |= AVSEEK_FLAG_BYTE;
1212         is->seek_req = 1;
1213         SDL_CondSignal(is->continue_read_thread);
1214     }
1215 }
1216
1217 /* pause or resume the video */
1218 static void stream_toggle_pause(VideoState *is)
1219 {
1220     if (is->paused) {
1221         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1222         if (is->read_pause_return != AVERROR(ENOSYS)) {
1223             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1224         }
1225         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1226     }
1227     update_external_clock_pts(is, get_external_clock(is));
1228     is->paused = !is->paused;
1229 }
1230
1231 static void toggle_pause(VideoState *is)
1232 {
1233     stream_toggle_pause(is);
1234     is->step = 0;
1235 }
1236
1237 static void step_to_next_frame(VideoState *is)
1238 {
1239     /* if the stream is paused unpause it, then step */
1240     if (is->paused)
1241         stream_toggle_pause(is);
1242     is->step = 1;
1243 }
1244
1245 static double compute_target_delay(double delay, VideoState *is)
1246 {
1247     double sync_threshold, diff;
1248
1249     /* update delay to follow master synchronisation source */
1250     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1251         /* if video is slave, we try to correct big delays by
1252            duplicating or deleting a frame */
1253         diff = get_video_clock(is) - get_master_clock(is);
1254
1255         /* skip or repeat frame. We take into account the
1256            delay to compute the threshold. I still don't know
1257            if it is the best guess */
1258         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1259         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
1260             if (diff <= -sync_threshold)
1261                 delay = 0;
1262             else if (diff >= sync_threshold)
1263                 delay = 2 * delay;
1264         }
1265     }
1266
1267     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1268             delay, -diff);
1269
1270     return delay;
1271 }
1272
1273 static void pictq_next_picture(VideoState *is) {
1274     /* update queue size and signal for next picture */
1275     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1276         is->pictq_rindex = 0;
1277
1278     SDL_LockMutex(is->pictq_mutex);
1279     is->pictq_size--;
1280     SDL_CondSignal(is->pictq_cond);
1281     SDL_UnlockMutex(is->pictq_mutex);
1282 }
1283
1284 static int pictq_prev_picture(VideoState *is) {
1285     VideoPicture *prevvp;
1286     int ret = 0;
1287     /* update queue size and signal for the previous picture */
1288     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1289     if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1290         SDL_LockMutex(is->pictq_mutex);
1291         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1292             if (--is->pictq_rindex == -1)
1293                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1294             is->pictq_size++;
1295             ret = 1;
1296         }
1297         SDL_CondSignal(is->pictq_cond);
1298         SDL_UnlockMutex(is->pictq_mutex);
1299     }
1300     return ret;
1301 }
1302
1303 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1304     double time = av_gettime() / 1000000.0;
1305     /* update current video pts */
1306     is->video_current_pts = pts;
1307     is->video_current_pts_drift = is->video_current_pts - time;
1308     is->video_current_pos = pos;
1309     is->frame_last_pts = pts;
1310     is->video_clock_serial = serial;
1311     if (is->videoq.serial == serial)
1312         check_external_clock_sync(is, is->video_current_pts);
1313 }
1314
1315 /* called to display each frame */
1316 static void video_refresh(void *opaque, double *remaining_time)
1317 {
1318     VideoState *is = opaque;
1319     VideoPicture *vp;
1320     double time;
1321
1322     SubPicture *sp, *sp2;
1323
1324     if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1325         check_external_clock_speed(is);
1326
1327     if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1328         time = av_gettime() / 1000000.0;
1329         if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1330             video_display(is);
1331             is->last_vis_time = time;
1332         }
1333         *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1334     }
1335
1336     if (is->video_st) {
1337         int redisplay = 0;
1338         if (is->force_refresh)
1339             redisplay = pictq_prev_picture(is);
1340 retry:
1341         if (is->pictq_size == 0) {
1342             SDL_LockMutex(is->pictq_mutex);
1343             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1344                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
1345                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1346             }
1347             SDL_UnlockMutex(is->pictq_mutex);
1348             // nothing to do, no picture to display in the queue
1349         } else {
1350             double last_duration, duration, delay;
1351             /* dequeue the picture */
1352             vp = &is->pictq[is->pictq_rindex];
1353
1354             if (vp->serial != is->videoq.serial) {
1355                 pictq_next_picture(is);
1356                 redisplay = 0;
1357                 goto retry;
1358             }
1359
1360             if (is->paused)
1361                 goto display;
1362
1363             /* compute nominal last_duration */
1364             last_duration = vp->pts - is->frame_last_pts;
1365             if (last_duration > 0 && last_duration < is->max_frame_duration) {
1366                 /* if duration of the last frame was sane, update last_duration in video state */
1367                 is->frame_last_duration = last_duration;
1368             }
1369             delay = compute_target_delay(is->frame_last_duration, is);
1370
1371             time= av_gettime()/1000000.0;
1372             if (time < is->frame_timer + delay) {
1373                 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1374                 return;
1375             }
1376
1377             if (delay > 0)
1378                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1379
1380             SDL_LockMutex(is->pictq_mutex);
1381             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1382             SDL_UnlockMutex(is->pictq_mutex);
1383
1384             if (is->pictq_size > 1) {
1385                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1386                 duration = nextvp->pts - vp->pts;
1387                 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1388                     if (!redisplay)
1389                         is->frame_drops_late++;
1390                     pictq_next_picture(is);
1391                     redisplay = 0;
1392                     goto retry;
1393                 }
1394             }
1395
1396             if (is->subtitle_st) {
1397                 if (is->subtitle_stream_changed) {
1398                     SDL_LockMutex(is->subpq_mutex);
1399
1400                     while (is->subpq_size) {
1401                         free_subpicture(&is->subpq[is->subpq_rindex]);
1402
1403                         /* update queue size and signal for next picture */
1404                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1405                             is->subpq_rindex = 0;
1406
1407                         is->subpq_size--;
1408                     }
1409                     is->subtitle_stream_changed = 0;
1410
1411                     SDL_CondSignal(is->subpq_cond);
1412                     SDL_UnlockMutex(is->subpq_mutex);
1413                 } else {
1414                     if (is->subpq_size > 0) {
1415                         sp = &is->subpq[is->subpq_rindex];
1416
1417                         if (is->subpq_size > 1)
1418                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1419                         else
1420                             sp2 = NULL;
1421
1422                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1423                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1424                         {
1425                             free_subpicture(sp);
1426
1427                             /* update queue size and signal for next picture */
1428                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1429                                 is->subpq_rindex = 0;
1430
1431                             SDL_LockMutex(is->subpq_mutex);
1432                             is->subpq_size--;
1433                             SDL_CondSignal(is->subpq_cond);
1434                             SDL_UnlockMutex(is->subpq_mutex);
1435                         }
1436                     }
1437                 }
1438             }
1439
1440 display:
1441             /* display picture */
1442             if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1443                 video_display(is);
1444
1445             pictq_next_picture(is);
1446
1447             if (is->step && !is->paused)
1448                 stream_toggle_pause(is);
1449         }
1450     }
1451     is->force_refresh = 0;
1452     if (show_status) {
1453         static int64_t last_time;
1454         int64_t cur_time;
1455         int aqsize, vqsize, sqsize;
1456         double av_diff;
1457
1458         cur_time = av_gettime();
1459         if (!last_time || (cur_time - last_time) >= 30000) {
1460             aqsize = 0;
1461             vqsize = 0;
1462             sqsize = 0;
1463             if (is->audio_st)
1464                 aqsize = is->audioq.size;
1465             if (is->video_st)
1466                 vqsize = is->videoq.size;
1467             if (is->subtitle_st)
1468                 sqsize = is->subtitleq.size;
1469             av_diff = 0;
1470             if (is->audio_st && is->video_st)
1471                 av_diff = get_audio_clock(is) - get_video_clock(is);
1472             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1473                    get_master_clock(is),
1474                    av_diff,
1475                    is->frame_drops_early + is->frame_drops_late,
1476                    aqsize / 1024,
1477                    vqsize / 1024,
1478                    sqsize,
1479                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1480                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1481             fflush(stdout);
1482             last_time = cur_time;
1483         }
1484     }
1485 }
1486
1487 /* allocate a picture (needs to do that in main thread to avoid
1488    potential locking problems */
1489 static void alloc_picture(VideoState *is)
1490 {
1491     VideoPicture *vp;
1492
1493     vp = &is->pictq[is->pictq_windex];
1494
1495     if (vp->bmp)
1496         SDL_FreeYUVOverlay(vp->bmp);
1497
1498     video_open(is, 0, vp);
1499
1500     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1501                                    SDL_YV12_OVERLAY,
1502                                    screen);
1503     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1504         /* SDL allocates a buffer smaller than requested if the video
1505          * overlay hardware is unable to support the requested size. */
1506         fprintf(stderr, "Error: the video system does not support an image\n"
1507                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1508                         "to reduce the image size.\n", vp->width, vp->height );
1509         do_exit(is);
1510     }
1511
1512     SDL_LockMutex(is->pictq_mutex);
1513     vp->allocated = 1;
1514     SDL_CondSignal(is->pictq_cond);
1515     SDL_UnlockMutex(is->pictq_mutex);
1516 }
1517
1518 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1519     int i, width, height;
1520     Uint8 *p, *maxp;
1521     for (i = 0; i < 3; i++) {
1522         width  = bmp->w;
1523         height = bmp->h;
1524         if (i > 0) {
1525             width  >>= 1;
1526             height >>= 1;
1527         }
1528         if (bmp->pitches[i] > width) {
1529             maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1530             for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1531                 *(p+1) = *p;
1532         }
1533     }
1534 }
1535
1536 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1537 {
1538     VideoPicture *vp;
1539
1540 #if defined(DEBUG_SYNC) && 0
1541     printf("frame_type=%c pts=%0.3f\n",
1542            av_get_picture_type_char(src_frame->pict_type), pts);
1543 #endif
1544
1545     /* wait until we have space to put a new picture */
1546     SDL_LockMutex(is->pictq_mutex);
1547
1548     /* keep the last already displayed picture in the queue */
1549     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1550            !is->videoq.abort_request) {
1551         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1552     }
1553     SDL_UnlockMutex(is->pictq_mutex);
1554
1555     if (is->videoq.abort_request)
1556         return -1;
1557
1558     vp = &is->pictq[is->pictq_windex];
1559
1560 #if CONFIG_AVFILTER
1561     vp->sar = src_frame->sample_aspect_ratio;
1562 #else
1563     vp->sar = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1564 #endif
1565
1566     /* alloc or resize hardware picture buffer */
1567     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1568         vp->width  != src_frame->width ||
1569         vp->height != src_frame->height) {
1570         SDL_Event event;
1571
1572         vp->allocated  = 0;
1573         vp->reallocate = 0;
1574         vp->width = src_frame->width;
1575         vp->height = src_frame->height;
1576
1577         /* the allocation must be done in the main thread to avoid
1578            locking problems. */
1579         event.type = FF_ALLOC_EVENT;
1580         event.user.data1 = is;
1581         SDL_PushEvent(&event);
1582
1583         /* wait until the picture is allocated */
1584         SDL_LockMutex(is->pictq_mutex);
1585         while (!vp->allocated && !is->videoq.abort_request) {
1586             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1587         }
1588         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1589         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1590             while (!vp->allocated) {
1591                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1592             }
1593         }
1594         SDL_UnlockMutex(is->pictq_mutex);
1595
1596         if (is->videoq.abort_request)
1597             return -1;
1598     }
1599
1600     /* if the frame is not skipped, then display it */
1601     if (vp->bmp) {
1602         AVPicture pict = { { 0 } };
1603
1604         /* get a pointer on the bitmap */
1605         SDL_LockYUVOverlay (vp->bmp);
1606
1607         pict.data[0] = vp->bmp->pixels[0];
1608         pict.data[1] = vp->bmp->pixels[2];
1609         pict.data[2] = vp->bmp->pixels[1];
1610
1611         pict.linesize[0] = vp->bmp->pitches[0];
1612         pict.linesize[1] = vp->bmp->pitches[2];
1613         pict.linesize[2] = vp->bmp->pitches[1];
1614
1615 #if CONFIG_AVFILTER
1616         // FIXME use direct rendering
1617         av_picture_copy(&pict, (AVPicture *)src_frame,
1618                         src_frame->format, vp->width, vp->height);
1619 #else
1620         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1621         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1622             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1623             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1624         if (is->img_convert_ctx == NULL) {
1625             fprintf(stderr, "Cannot initialize the conversion context\n");
1626             exit(1);
1627         }
1628         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1629                   0, vp->height, pict.data, pict.linesize);
1630 #endif
1631         /* workaround SDL PITCH_WORKAROUND */
1632         duplicate_right_border_pixels(vp->bmp);
1633         /* update the bitmap content */
1634         SDL_UnlockYUVOverlay(vp->bmp);
1635
1636         vp->pts = pts;
1637         vp->pos = pos;
1638         vp->serial = serial;
1639
1640         /* now we can update the picture count */
1641         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1642             is->pictq_windex = 0;
1643         SDL_LockMutex(is->pictq_mutex);
1644         is->pictq_size++;
1645         SDL_UnlockMutex(is->pictq_mutex);
1646     }
1647     return 0;
1648 }
1649
1650 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1651 {
1652     int got_picture;
1653
1654     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1655         return -1;
1656
1657     if (pkt->data == flush_pkt.data) {
1658         avcodec_flush_buffers(is->video_st->codec);
1659
1660         SDL_LockMutex(is->pictq_mutex);
1661         // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1662         while (is->pictq_size && !is->videoq.abort_request) {
1663             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1664         }
1665         is->video_current_pos = -1;
1666         is->frame_last_pts = AV_NOPTS_VALUE;
1667         is->frame_last_duration = 0;
1668         is->frame_timer = (double)av_gettime() / 1000000.0;
1669         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1670         SDL_UnlockMutex(is->pictq_mutex);
1671         return 0;
1672     }
1673
1674     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1675         return 0;
1676
1677     if (got_picture) {
1678         int ret = 1;
1679
1680         if (decoder_reorder_pts == -1) {
1681             frame->pts = av_frame_get_best_effort_timestamp(frame);
1682         } else if (decoder_reorder_pts) {
1683             frame->pts = frame->pkt_pts;
1684         } else {
1685             frame->pts = frame->pkt_dts;
1686         }
1687
1688         if (frame->pts == AV_NOPTS_VALUE) {
1689             frame->pts = 0;
1690         }
1691
1692         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1693             SDL_LockMutex(is->pictq_mutex);
1694             if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts) {
1695                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1696                 double dpts = av_q2d(is->video_st->time_base) * frame->pts;
1697                 double ptsdiff = dpts - is->frame_last_pts;
1698                 if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1699                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1700                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1701                     is->frame_last_dropped_pos = pkt->pos;
1702                     is->frame_last_dropped_pts = dpts;
1703                     is->frame_last_dropped_serial = *serial;
1704                     is->frame_drops_early++;
1705                     av_frame_unref(frame);
1706                     ret = 0;
1707                 }
1708             }
1709             SDL_UnlockMutex(is->pictq_mutex);
1710         }
1711
1712         return ret;
1713     }
1714     return 0;
1715 }
1716
1717 #if CONFIG_AVFILTER
1718 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1719                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1720 {
1721     int ret;
1722     AVFilterInOut *outputs = NULL, *inputs = NULL;
1723
1724     if (filtergraph) {
1725         outputs = avfilter_inout_alloc();
1726         inputs  = avfilter_inout_alloc();
1727         if (!outputs || !inputs) {
1728             ret = AVERROR(ENOMEM);
1729             goto fail;
1730         }
1731
1732         outputs->name       = av_strdup("in");
1733         outputs->filter_ctx = source_ctx;
1734         outputs->pad_idx    = 0;
1735         outputs->next       = NULL;
1736
1737         inputs->name        = av_strdup("out");
1738         inputs->filter_ctx  = sink_ctx;
1739         inputs->pad_idx     = 0;
1740         inputs->next        = NULL;
1741
1742         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1743             goto fail;
1744     } else {
1745         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1746             goto fail;
1747     }
1748
1749     ret = avfilter_graph_config(graph, NULL);
1750 fail:
1751     avfilter_inout_free(&outputs);
1752     avfilter_inout_free(&inputs);
1753     return ret;
1754 }
1755
1756 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1757 {
1758     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1759     char sws_flags_str[128];
1760     char buffersrc_args[256];
1761     int ret;
1762     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1763     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1764     AVCodecContext *codec = is->video_st->codec;
1765     AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1766
1767     if (!buffersink_params)
1768         return AVERROR(ENOMEM);
1769
1770     av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1771     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1772     graph->scale_sws_opts = av_strdup(sws_flags_str);
1773
1774     snprintf(buffersrc_args, sizeof(buffersrc_args),
1775              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1776              frame->width, frame->height, frame->format,
1777              is->video_st->time_base.num, is->video_st->time_base.den,
1778              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1779     if (fr.num && fr.den)
1780         av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1781
1782     if ((ret = avfilter_graph_create_filter(&filt_src,
1783                                             avfilter_get_by_name("buffer"),
1784                                             "ffplay_buffer", buffersrc_args, NULL,
1785                                             graph)) < 0)
1786         goto fail;
1787
1788     buffersink_params->pixel_fmts = pix_fmts;
1789     ret = avfilter_graph_create_filter(&filt_out,
1790                                        avfilter_get_by_name("buffersink"),
1791                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1792     if (ret < 0)
1793         goto fail;
1794
1795     /* SDL YUV code is not handling odd width/height for some driver
1796      * combinations, therefore we crop the picture to an even width/height. */
1797     if ((ret = avfilter_graph_create_filter(&filt_crop,
1798                                             avfilter_get_by_name("crop"),
1799                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1800         goto fail;
1801     if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1802         goto fail;
1803
1804     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1805         goto fail;
1806
1807     is->in_video_filter  = filt_src;
1808     is->out_video_filter = filt_out;
1809
1810 fail:
1811     av_freep(&buffersink_params);
1812     return ret;
1813 }
1814
1815 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1816 {
1817     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1818     int sample_rates[2] = { 0, -1 };
1819     int64_t channel_layouts[2] = { 0, -1 };
1820     int channels[2] = { 0, -1 };
1821     AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1822     char asrc_args[256];
1823     AVABufferSinkParams *asink_params = NULL;
1824     int ret;
1825
1826     avfilter_graph_free(&is->agraph);
1827     if (!(is->agraph = avfilter_graph_alloc()))
1828         return AVERROR(ENOMEM);
1829
1830     ret = snprintf(asrc_args, sizeof(asrc_args),
1831                    "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1832                    is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1833                    is->audio_filter_src.channels,
1834                    1, is->audio_filter_src.freq);
1835     if (is->audio_filter_src.channel_layout)
1836         snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1837                  ":channel_layout=0x%"PRIx64,  is->audio_filter_src.channel_layout);
1838
1839     ret = avfilter_graph_create_filter(&filt_asrc,
1840                                        avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1841                                        asrc_args, NULL, is->agraph);
1842     if (ret < 0)
1843         goto end;
1844
1845     if (!(asink_params = av_abuffersink_params_alloc())) {
1846         ret = AVERROR(ENOMEM);
1847         goto end;
1848     }
1849     asink_params->sample_fmts = sample_fmts;
1850
1851     asink_params->all_channel_counts = 1;
1852     if (force_output_format) {
1853         channel_layouts[0] = is->audio_tgt.channel_layout;
1854         asink_params->channel_layouts = channel_layouts;
1855         asink_params->all_channel_counts = 0;
1856         channels[0] = is->audio_tgt.channels;
1857         asink_params->channel_counts = channels;
1858         asink_params->all_channel_counts = 0;
1859         sample_rates[0] = is->audio_tgt.freq;
1860         asink_params->sample_rates = sample_rates;
1861     }
1862
1863     ret = avfilter_graph_create_filter(&filt_asink,
1864                                        avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1865                                        NULL, asink_params, is->agraph);
1866     if (ret < 0)
1867         goto end;
1868
1869     if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1870         goto end;
1871
1872     is->in_audio_filter  = filt_asrc;
1873     is->out_audio_filter = filt_asink;
1874
1875 end:
1876     av_freep(&asink_params);
1877     if (ret < 0)
1878         avfilter_graph_free(&is->agraph);
1879     return ret;
1880 }
1881 #endif  /* CONFIG_AVFILTER */
1882
1883 static int video_thread(void *arg)
1884 {
1885     AVPacket pkt = { 0 };
1886     VideoState *is = arg;
1887     AVFrame *frame = av_frame_alloc();
1888     double pts;
1889     int ret;
1890     int serial = 0;
1891
1892 #if CONFIG_AVFILTER
1893     AVFilterGraph *graph = avfilter_graph_alloc();
1894     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1895     int last_w = 0;
1896     int last_h = 0;
1897     enum AVPixelFormat last_format = -2;
1898     int last_serial = -1;
1899 #endif
1900
1901     for (;;) {
1902         while (is->paused && !is->videoq.abort_request)
1903             SDL_Delay(10);
1904
1905         avcodec_get_frame_defaults(frame);
1906         av_free_packet(&pkt);
1907
1908         ret = get_video_frame(is, frame, &pkt, &serial);
1909         if (ret < 0)
1910             goto the_end;
1911         if (!ret)
1912             continue;
1913
1914 #if CONFIG_AVFILTER
1915         if (   last_w != frame->width
1916             || last_h != frame->height
1917             || last_format != frame->format
1918             || last_serial != serial) {
1919             av_log(NULL, AV_LOG_DEBUG,
1920                    "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1921                    last_w, last_h,
1922                    (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1923                    frame->width, frame->height,
1924                    (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1925             avfilter_graph_free(&graph);
1926             graph = avfilter_graph_alloc();
1927             if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1928                 SDL_Event event;
1929                 event.type = FF_QUIT_EVENT;
1930                 event.user.data1 = is;
1931                 SDL_PushEvent(&event);
1932                 av_free_packet(&pkt);
1933                 goto the_end;
1934             }
1935             filt_in  = is->in_video_filter;
1936             filt_out = is->out_video_filter;
1937             last_w = frame->width;
1938             last_h = frame->height;
1939             last_format = frame->format;
1940             last_serial = serial;
1941         }
1942
1943         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1944         ret = av_buffersrc_add_frame(filt_in, frame);
1945         if (ret < 0)
1946             goto the_end;
1947         av_frame_unref(frame);
1948         avcodec_get_frame_defaults(frame);
1949         av_free_packet(&pkt);
1950
1951         while (ret >= 0) {
1952             is->frame_last_returned_time = av_gettime() / 1000000.0;
1953
1954             ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1955             if (ret < 0) {
1956                 ret = 0;
1957                 break;
1958             }
1959
1960             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1961             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1962                 is->frame_last_filter_delay = 0;
1963
1964             pts = frame->pts * av_q2d(filt_out->inputs[0]->time_base);
1965             ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1966             av_frame_unref(frame);
1967         }
1968 #else
1969         pts = frame->pts * av_q2d(is->video_st->time_base);
1970         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1971         av_frame_unref(frame);
1972 #endif
1973
1974         if (ret < 0)
1975             goto the_end;
1976     }
1977  the_end:
1978     avcodec_flush_buffers(is->video_st->codec);
1979 #if CONFIG_AVFILTER
1980     avfilter_graph_free(&graph);
1981 #endif
1982     av_free_packet(&pkt);
1983     av_frame_free(&frame);
1984     return 0;
1985 }
1986
1987 static int subtitle_thread(void *arg)
1988 {
1989     VideoState *is = arg;
1990     SubPicture *sp;
1991     AVPacket pkt1, *pkt = &pkt1;
1992     int got_subtitle;
1993     double pts;
1994     int i, j;
1995     int r, g, b, y, u, v, a;
1996
1997     for (;;) {
1998         while (is->paused && !is->subtitleq.abort_request) {
1999             SDL_Delay(10);
2000         }
2001         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
2002             break;
2003
2004         if (pkt->data == flush_pkt.data) {
2005             avcodec_flush_buffers(is->subtitle_st->codec);
2006             continue;
2007         }
2008         SDL_LockMutex(is->subpq_mutex);
2009         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2010                !is->subtitleq.abort_request) {
2011             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2012         }
2013         SDL_UnlockMutex(is->subpq_mutex);
2014
2015         if (is->subtitleq.abort_request)
2016             return 0;
2017
2018         sp = &is->subpq[is->subpq_windex];
2019
2020        /* NOTE: ipts is the PTS of the _first_ picture beginning in
2021            this packet, if any */
2022         pts = 0;
2023         if (pkt->pts != AV_NOPTS_VALUE)
2024             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2025
2026         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2027                                  &got_subtitle, pkt);
2028         if (got_subtitle && sp->sub.format == 0) {
2029             if (sp->sub.pts != AV_NOPTS_VALUE)
2030                 pts = sp->sub.pts / (double)AV_TIME_BASE;
2031             sp->pts = pts;
2032
2033             for (i = 0; i < sp->sub.num_rects; i++)
2034             {
2035                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2036                 {
2037                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2038                     y = RGB_TO_Y_CCIR(r, g, b);
2039                     u = RGB_TO_U_CCIR(r, g, b, 0);
2040                     v = RGB_TO_V_CCIR(r, g, b, 0);
2041                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2042                 }
2043             }
2044
2045             /* now we can update the picture count */
2046             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2047                 is->subpq_windex = 0;
2048             SDL_LockMutex(is->subpq_mutex);
2049             is->subpq_size++;
2050             SDL_UnlockMutex(is->subpq_mutex);
2051         }
2052         av_free_packet(pkt);
2053     }
2054     return 0;
2055 }
2056
2057 /* copy samples for viewing in editor window */
2058 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2059 {
2060     int size, len;
2061
2062     size = samples_size / sizeof(short);
2063     while (size > 0) {
2064         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2065         if (len > size)
2066             len = size;
2067         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2068         samples += len;
2069         is->sample_array_index += len;
2070         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2071             is->sample_array_index = 0;
2072         size -= len;
2073     }
2074 }
2075
2076 /* return the wanted number of samples to get better sync if sync_type is video
2077  * or external master clock */
2078 static int synchronize_audio(VideoState *is, int nb_samples)
2079 {
2080     int wanted_nb_samples = nb_samples;
2081
2082     /* if not master, then we try to remove or add samples to correct the clock */
2083     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2084         double diff, avg_diff;
2085         int min_nb_samples, max_nb_samples;
2086
2087         diff = get_audio_clock(is) - get_master_clock(is);
2088
2089         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2090             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2091             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2092                 /* not enough measures to have a correct estimate */
2093                 is->audio_diff_avg_count++;
2094             } else {
2095                 /* estimate the A-V difference */
2096                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2097
2098                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2099                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2100                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2101                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2102                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2103                 }
2104                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2105                         diff, avg_diff, wanted_nb_samples - nb_samples,
2106                         is->audio_clock, is->audio_diff_threshold);
2107             }
2108         } else {
2109             /* too big difference : may be initial PTS errors, so
2110                reset A-V filter */
2111             is->audio_diff_avg_count = 0;
2112             is->audio_diff_cum       = 0;
2113         }
2114     }
2115
2116     return wanted_nb_samples;
2117 }
2118
2119 /**
2120  * Decode one audio frame and return its uncompressed size.
2121  *
2122  * The processed audio frame is decoded, converted if required, and
2123  * stored in is->audio_buf, with size in bytes given by the return
2124  * value.
2125  */
2126 static int audio_decode_frame(VideoState *is)
2127 {
2128     AVPacket *pkt_temp = &is->audio_pkt_temp;
2129     AVPacket *pkt = &is->audio_pkt;
2130     AVCodecContext *dec = is->audio_st->codec;
2131     int len1, data_size, resampled_data_size;
2132     int64_t dec_channel_layout;
2133     int got_frame;
2134     av_unused double audio_clock0;
2135     int new_packet = 0;
2136     int flush_complete = 0;
2137     int wanted_nb_samples;
2138     AVRational tb;
2139     int ret;
2140     int reconfigure;
2141
2142     for (;;) {
2143         /* NOTE: the audio packet can contain several frames */
2144         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) {
2145             if (!is->frame) {
2146                 if (!(is->frame = avcodec_alloc_frame()))
2147                     return AVERROR(ENOMEM);
2148             } else {
2149                 av_frame_unref(is->frame);
2150                 avcodec_get_frame_defaults(is->frame);
2151             }
2152
2153             if (is->audioq.serial != is->audio_pkt_temp_serial)
2154                 break;
2155
2156             if (is->paused)
2157                 return -1;
2158
2159             if (!is->audio_buf_frames_pending) {
2160                 if (flush_complete)
2161                     break;
2162                 new_packet = 0;
2163                 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2164                 if (len1 < 0) {
2165                     /* if error, we skip the frame */
2166                     pkt_temp->size = 0;
2167                     break;
2168                 }
2169
2170                 pkt_temp->data += len1;
2171                 pkt_temp->size -= len1;
2172
2173                 if (!got_frame) {
2174                     /* stop sending empty packets if the decoder is finished */
2175                     if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2176                         flush_complete = 1;
2177                     continue;
2178                 }
2179
2180                 tb = (AVRational){1, is->frame->sample_rate};
2181                 if (is->frame->pts != AV_NOPTS_VALUE)
2182                     is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2183                 if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
2184                     is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, tb);
2185                 if (pkt_temp->pts != AV_NOPTS_VALUE)
2186                     pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2187
2188 #if CONFIG_AVFILTER
2189                 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2190
2191                 reconfigure =
2192                     cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2193                                    is->frame->format, av_frame_get_channels(is->frame))    ||
2194                     is->audio_filter_src.channel_layout != dec_channel_layout ||
2195                     is->audio_filter_src.freq           != is->frame->sample_rate ||
2196                     is->audio_pkt_temp_serial           != is->audio_last_serial;
2197
2198                 if (reconfigure) {
2199                     char buf1[1024], buf2[1024];
2200                     av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2201                     av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2202                     av_log(NULL, AV_LOG_DEBUG,
2203                            "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2204                            is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2205                            is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2206
2207                     is->audio_filter_src.fmt            = is->frame->format;
2208                     is->audio_filter_src.channels       = av_frame_get_channels(is->frame);
2209                     is->audio_filter_src.channel_layout = dec_channel_layout;
2210                     is->audio_filter_src.freq           = is->frame->sample_rate;
2211                     is->audio_last_serial               = is->audio_pkt_temp_serial;
2212
2213                     if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2214                         return ret;
2215                 }
2216
2217                 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2218                     return ret;
2219                 av_frame_unref(is->frame);
2220 #endif
2221             }
2222 #if CONFIG_AVFILTER
2223             if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2224                 if (ret == AVERROR(EAGAIN)) {
2225                     is->audio_buf_frames_pending = 0;
2226                     continue;
2227                 }
2228                 return ret;
2229             }
2230             is->audio_buf_frames_pending = 1;
2231             tb = is->out_audio_filter->inputs[0]->time_base;
2232 #endif
2233
2234             data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2235                                                    is->frame->nb_samples,
2236                                                    is->frame->format, 1);
2237
2238             dec_channel_layout =
2239                 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2240                 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2241             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2242
2243             if (is->frame->format        != is->audio_src.fmt            ||
2244                 dec_channel_layout       != is->audio_src.channel_layout ||
2245                 is->frame->sample_rate   != is->audio_src.freq           ||
2246                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2247                 swr_free(&is->swr_ctx);
2248                 is->swr_ctx = swr_alloc_set_opts(NULL,
2249                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2250                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2251                                                  0, NULL);
2252                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2253                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2254                             is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2255                             is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2256                     break;
2257                 }
2258                 is->audio_src.channel_layout = dec_channel_layout;
2259                 is->audio_src.channels       = av_frame_get_channels(is->frame);
2260                 is->audio_src.freq = is->frame->sample_rate;
2261                 is->audio_src.fmt = is->frame->format;
2262             }
2263
2264             if (is->swr_ctx) {
2265                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2266                 uint8_t **out = &is->audio_buf1;
2267                 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2268                 int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2269                 int len2;
2270                 if (wanted_nb_samples != is->frame->nb_samples) {
2271                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2272                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2273                         fprintf(stderr, "swr_set_compensation() failed\n");
2274                         break;
2275                     }
2276                 }
2277                 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2278                 if (!is->audio_buf1)
2279                     return AVERROR(ENOMEM);
2280                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2281                 if (len2 < 0) {
2282                     fprintf(stderr, "swr_convert() failed\n");
2283                     break;
2284                 }
2285                 if (len2 == out_count) {
2286                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2287                     swr_init(is->swr_ctx);
2288                 }
2289                 is->audio_buf = is->audio_buf1;
2290                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2291             } else {
2292                 is->audio_buf = is->frame->data[0];
2293                 resampled_data_size = data_size;
2294             }
2295
2296             audio_clock0 = is->audio_clock;
2297             /* update the audio clock with the pts */
2298             if (is->frame->pts != AV_NOPTS_VALUE) {
2299                 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2300                 is->audio_clock_serial = is->audio_pkt_temp_serial;
2301             }
2302 #ifdef DEBUG
2303             {
2304                 static double last_clock;
2305                 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2306                        is->audio_clock - last_clock,
2307                        is->audio_clock, audio_clock0);
2308                 last_clock = is->audio_clock;
2309             }
2310 #endif
2311             return resampled_data_size;
2312         }
2313
2314         /* free the current packet */
2315         if (pkt->data)
2316             av_free_packet(pkt);
2317         memset(pkt_temp, 0, sizeof(*pkt_temp));
2318
2319         if (is->audioq.abort_request) {
2320             return -1;
2321         }
2322
2323         if (is->audioq.nb_packets == 0)
2324             SDL_CondSignal(is->continue_read_thread);
2325
2326         /* read next packet */
2327         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2328             return -1;
2329
2330         if (pkt->data == flush_pkt.data) {
2331             avcodec_flush_buffers(dec);
2332             flush_complete = 0;
2333             is->audio_buf_frames_pending = 0;
2334         }
2335
2336         *pkt_temp = *pkt;
2337     }
2338 }
2339
2340 /* prepare a new audio buffer */
2341 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2342 {
2343     VideoState *is = opaque;
2344     int audio_size, len1;
2345     int bytes_per_sec;
2346     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2347
2348     audio_callback_time = av_gettime();
2349
2350     while (len > 0) {
2351         if (is->audio_buf_index >= is->audio_buf_size) {
2352            audio_size = audio_decode_frame(is);
2353            if (audio_size < 0) {
2354                 /* if error, just output silence */
2355                is->audio_buf      = is->silence_buf;
2356                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2357            } else {
2358                if (is->show_mode != SHOW_MODE_VIDEO)
2359                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2360                is->audio_buf_size = audio_size;
2361            }
2362            is->audio_buf_index = 0;
2363         }
2364         len1 = is->audio_buf_size - is->audio_buf_index;
2365         if (len1 > len)
2366             len1 = len;
2367         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2368         len -= len1;
2369         stream += len1;
2370         is->audio_buf_index += len1;
2371     }
2372     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2373     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2374     /* Let's assume the audio driver that is used by SDL has two periods. */
2375     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2376     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2377     if (is->audioq.serial == is->audio_clock_serial)
2378         check_external_clock_sync(is, is->audio_current_pts);
2379 }
2380
2381 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2382 {
2383     SDL_AudioSpec wanted_spec, spec;
2384     const char *env;
2385     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2386
2387     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2388     if (env) {
2389         wanted_nb_channels = atoi(env);
2390         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2391     }
2392     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2393         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2394         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2395     }
2396     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2397     wanted_spec.freq = wanted_sample_rate;
2398     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2399         fprintf(stderr, "Invalid sample rate or channel count!\n");
2400         return -1;
2401     }
2402     wanted_spec.format = AUDIO_S16SYS;
2403     wanted_spec.silence = 0;
2404     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2405     wanted_spec.callback = sdl_audio_callback;
2406     wanted_spec.userdata = opaque;
2407     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2408         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2409         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2410         if (!wanted_spec.channels) {
2411             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2412             return -1;
2413         }
2414         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2415     }
2416     if (spec.format != AUDIO_S16SYS) {
2417         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2418         return -1;
2419     }
2420     if (spec.channels != wanted_spec.channels) {
2421         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2422         if (!wanted_channel_layout) {
2423             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2424             return -1;
2425         }
2426     }
2427
2428     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2429     audio_hw_params->freq = spec.freq;
2430     audio_hw_params->channel_layout = wanted_channel_layout;
2431     audio_hw_params->channels =  spec.channels;
2432     return spec.size;
2433 }
2434
2435 /* open a given stream. Return 0 if OK */
2436 static int stream_component_open(VideoState *is, int stream_index)
2437 {
2438     AVFormatContext *ic = is->ic;
2439     AVCodecContext *avctx;
2440     AVCodec *codec;
2441     const char *forced_codec_name = NULL;
2442     AVDictionary *opts;
2443     AVDictionaryEntry *t = NULL;
2444     int sample_rate, nb_channels;
2445     int64_t channel_layout;
2446     int ret;
2447
2448     if (stream_index < 0 || stream_index >= ic->nb_streams)
2449         return -1;
2450     avctx = ic->streams[stream_index]->codec;
2451
2452     codec = avcodec_find_decoder(avctx->codec_id);
2453
2454     switch(avctx->codec_type){
2455         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; forced_codec_name =    audio_codec_name; break;
2456         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2457         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; forced_codec_name =    video_codec_name; break;
2458     }
2459     if (forced_codec_name)
2460         codec = avcodec_find_decoder_by_name(forced_codec_name);
2461     if (!codec) {
2462         if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2463         else                   fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2464         return -1;
2465     }
2466
2467     avctx->codec_id = codec->id;
2468     avctx->workaround_bugs   = workaround_bugs;
2469     avctx->lowres            = lowres;
2470     if(avctx->lowres > codec->max_lowres){
2471         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2472                 codec->max_lowres);
2473         avctx->lowres= codec->max_lowres;
2474     }
2475     avctx->idct_algo         = idct;
2476     avctx->error_concealment = error_concealment;
2477
2478     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2479     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2480     if(codec->capabilities & CODEC_CAP_DR1)
2481         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2482
2483     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2484     if (!av_dict_get(opts, "threads", NULL, 0))
2485         av_dict_set(&opts, "threads", "auto", 0);
2486     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2487         av_dict_set(&opts, "refcounted_frames", "1", 0);
2488     if (avcodec_open2(avctx, codec, &opts) < 0)
2489         return -1;
2490     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2491         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2492         return AVERROR_OPTION_NOT_FOUND;
2493     }
2494
2495     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2496     switch (avctx->codec_type) {
2497     case AVMEDIA_TYPE_AUDIO:
2498 #if CONFIG_AVFILTER
2499         {
2500             AVFilterLink *link;
2501
2502             is->audio_filter_src.freq           = avctx->sample_rate;
2503             is->audio_filter_src.channels       = avctx->channels;
2504             is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2505             is->audio_filter_src.fmt            = avctx->sample_fmt;
2506             if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2507                 return ret;
2508             link = is->out_audio_filter->inputs[0];
2509             sample_rate    = link->sample_rate;
2510             nb_channels    = link->channels;
2511             channel_layout = link->channel_layout;
2512         }
2513 #else
2514         sample_rate    = avctx->sample_rate;
2515         nb_channels    = avctx->channels;
2516         channel_layout = avctx->channel_layout;
2517 #endif
2518
2519         /* prepare audio output */
2520         if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2521             return ret;
2522         is->audio_hw_buf_size = ret;
2523         is->audio_src = is->audio_tgt;
2524         is->audio_buf_size  = 0;
2525         is->audio_buf_index = 0;
2526
2527         /* init averaging filter */
2528         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2529         is->audio_diff_avg_count = 0;
2530         /* since we do not have a precise anough audio fifo fullness,
2531            we correct audio sync only if larger than this threshold */
2532         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2533
2534         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2535         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2536
2537         is->audio_stream = stream_index;
2538         is->audio_st = ic->streams[stream_index];
2539
2540         packet_queue_start(&is->audioq);
2541         SDL_PauseAudio(0);
2542         break;
2543     case AVMEDIA_TYPE_VIDEO:
2544         is->video_stream = stream_index;
2545         is->video_st = ic->streams[stream_index];
2546
2547         packet_queue_start(&is->videoq);
2548         is->video_tid = SDL_CreateThread(video_thread, is);
2549         is->queue_attachments_req = 1;
2550         break;
2551     case AVMEDIA_TYPE_SUBTITLE:
2552         is->subtitle_stream = stream_index;
2553         is->subtitle_st = ic->streams[stream_index];
2554         packet_queue_start(&is->subtitleq);
2555
2556         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2557         break;
2558     default:
2559         break;
2560     }
2561     return 0;
2562 }
2563
2564 static void stream_component_close(VideoState *is, int stream_index)
2565 {
2566     AVFormatContext *ic = is->ic;
2567     AVCodecContext *avctx;
2568
2569     if (stream_index < 0 || stream_index >= ic->nb_streams)
2570         return;
2571     avctx = ic->streams[stream_index]->codec;
2572
2573     switch (avctx->codec_type) {
2574     case AVMEDIA_TYPE_AUDIO:
2575         packet_queue_abort(&is->audioq);
2576
2577         SDL_CloseAudio();
2578
2579         packet_queue_flush(&is->audioq);
2580         av_free_packet(&is->audio_pkt);
2581         swr_free(&is->swr_ctx);
2582         av_freep(&is->audio_buf1);
2583         is->audio_buf1_size = 0;
2584         is->audio_buf = NULL;
2585         av_frame_free(&is->frame);
2586
2587         if (is->rdft) {
2588             av_rdft_end(is->rdft);
2589             av_freep(&is->rdft_data);
2590             is->rdft = NULL;
2591             is->rdft_bits = 0;
2592         }
2593 #if CONFIG_AVFILTER
2594         avfilter_graph_free(&is->agraph);
2595 #endif
2596         break;
2597     case AVMEDIA_TYPE_VIDEO:
2598         packet_queue_abort(&is->videoq);
2599
2600         /* note: we also signal this mutex to make sure we deblock the
2601            video thread in all cases */
2602         SDL_LockMutex(is->pictq_mutex);
2603         SDL_CondSignal(is->pictq_cond);
2604         SDL_UnlockMutex(is->pictq_mutex);
2605
2606         SDL_WaitThread(is->video_tid, NULL);
2607
2608         packet_queue_flush(&is->videoq);
2609         break;
2610     case AVMEDIA_TYPE_SUBTITLE:
2611         packet_queue_abort(&is->subtitleq);
2612
2613         /* note: we also signal this mutex to make sure we deblock the
2614            video thread in all cases */
2615         SDL_LockMutex(is->subpq_mutex);
2616         is->subtitle_stream_changed = 1;
2617
2618         SDL_CondSignal(is->subpq_cond);
2619         SDL_UnlockMutex(is->subpq_mutex);
2620
2621         SDL_WaitThread(is->subtitle_tid, NULL);
2622
2623         packet_queue_flush(&is->subtitleq);
2624         break;
2625     default:
2626         break;
2627     }
2628
2629     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2630     avcodec_close(avctx);
2631     switch (avctx->codec_type) {
2632     case AVMEDIA_TYPE_AUDIO:
2633         is->audio_st = NULL;
2634         is->audio_stream = -1;
2635         break;
2636     case AVMEDIA_TYPE_VIDEO:
2637         is->video_st = NULL;
2638         is->video_stream = -1;
2639         break;
2640     case AVMEDIA_TYPE_SUBTITLE:
2641         is->subtitle_st = NULL;
2642         is->subtitle_stream = -1;
2643         break;
2644     default:
2645         break;
2646     }
2647 }
2648
2649 static int decode_interrupt_cb(void *ctx)
2650 {
2651     VideoState *is = ctx;
2652     return is->abort_request;
2653 }
2654
2655 static int is_realtime(AVFormatContext *s)
2656 {
2657     if(   !strcmp(s->iformat->name, "rtp")
2658        || !strcmp(s->iformat->name, "rtsp")
2659        || !strcmp(s->iformat->name, "sdp")
2660     )
2661         return 1;
2662
2663     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2664                  || !strncmp(s->filename, "udp:", 4)
2665                 )
2666     )
2667         return 1;
2668     return 0;
2669 }
2670
2671 /* this thread gets the stream from the disk or the network */
2672 static int read_thread(void *arg)
2673 {
2674     VideoState *is = arg;
2675     AVFormatContext *ic = NULL;
2676     int err, i, ret;
2677     int st_index[AVMEDIA_TYPE_NB];
2678     AVPacket pkt1, *pkt = &pkt1;
2679     int eof = 0;
2680     int pkt_in_play_range = 0;
2681     AVDictionaryEntry *t;
2682     AVDictionary **opts;
2683     int orig_nb_streams;
2684     SDL_mutex *wait_mutex = SDL_CreateMutex();
2685
2686     memset(st_index, -1, sizeof(st_index));
2687     is->last_video_stream = is->video_stream = -1;
2688     is->last_audio_stream = is->audio_stream = -1;
2689     is->last_subtitle_stream = is->subtitle_stream = -1;
2690
2691     ic = avformat_alloc_context();
2692     ic->interrupt_callback.callback = decode_interrupt_cb;
2693     ic->interrupt_callback.opaque = is;
2694     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2695     if (err < 0) {
2696         print_error(is->filename, err);
2697         ret = -1;
2698         goto fail;
2699     }
2700     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2701         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2702         ret = AVERROR_OPTION_NOT_FOUND;
2703         goto fail;
2704     }
2705     is->ic = ic;
2706
2707     if (genpts)
2708         ic->flags |= AVFMT_FLAG_GENPTS;
2709
2710     opts = setup_find_stream_info_opts(ic, codec_opts);
2711     orig_nb_streams = ic->nb_streams;
2712
2713     err = avformat_find_stream_info(ic, opts);
2714     if (err < 0) {
2715         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2716         ret = -1;
2717         goto fail;
2718     }
2719     for (i = 0; i < orig_nb_streams; i++)
2720         av_dict_free(&opts[i]);
2721     av_freep(&opts);
2722
2723     if (ic->pb)
2724         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2725
2726     if (seek_by_bytes < 0)
2727         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2728
2729     is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2730
2731     /* if seeking requested, we execute it */
2732     if (start_time != AV_NOPTS_VALUE) {
2733         int64_t timestamp;
2734
2735         timestamp = start_time;
2736         /* add the stream start time */
2737         if (ic->start_time != AV_NOPTS_VALUE)
2738             timestamp += ic->start_time;
2739         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2740         if (ret < 0) {
2741             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2742                     is->filename, (double)timestamp / AV_TIME_BASE);
2743         }
2744     }
2745
2746     is->realtime = is_realtime(ic);
2747
2748     for (i = 0; i < ic->nb_streams; i++)
2749         ic->streams[i]->discard = AVDISCARD_ALL;
2750     if (!video_disable)
2751         st_index[AVMEDIA_TYPE_VIDEO] =
2752             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2753                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2754     if (!audio_disable)
2755         st_index[AVMEDIA_TYPE_AUDIO] =
2756             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2757                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2758                                 st_index[AVMEDIA_TYPE_VIDEO],
2759                                 NULL, 0);
2760     if (!video_disable && !subtitle_disable)
2761         st_index[AVMEDIA_TYPE_SUBTITLE] =
2762             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2763                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2764                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2765                                  st_index[AVMEDIA_TYPE_AUDIO] :
2766                                  st_index[AVMEDIA_TYPE_VIDEO]),
2767                                 NULL, 0);
2768     if (show_status) {
2769         av_dump_format(ic, 0, is->filename, 0);
2770     }
2771
2772     is->show_mode = show_mode;
2773
2774     /* open the streams */
2775     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2776         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2777     }
2778
2779     ret = -1;
2780     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2781         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2782     }
2783     if (is->show_mode == SHOW_MODE_NONE)
2784         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2785
2786     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2787         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2788     }
2789
2790     if (is->video_stream < 0 && is->audio_stream < 0) {
2791         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2792         ret = -1;
2793         goto fail;
2794     }
2795
2796     if (infinite_buffer < 0 && is->realtime)
2797         infinite_buffer = 1;
2798
2799     for (;;) {
2800         if (is->abort_request)
2801             break;
2802         if (is->paused != is->last_paused) {
2803             is->last_paused = is->paused;
2804             if (is->paused)
2805                 is->read_pause_return = av_read_pause(ic);
2806             else
2807                 av_read_play(ic);
2808         }
2809 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2810         if (is->paused &&
2811                 (!strcmp(ic->iformat->name, "rtsp") ||
2812                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2813             /* wait 10 ms to avoid trying to get another packet */
2814             /* XXX: horrible */
2815             SDL_Delay(10);
2816             continue;
2817         }
2818 #endif
2819         if (is->seek_req) {
2820             int64_t seek_target = is->seek_pos;
2821             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2822             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2823 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2824 //      of the seek_pos/seek_rel variables
2825
2826             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2827             if (ret < 0) {
2828                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2829             } else {
2830                 if (is->audio_stream >= 0) {
2831                     packet_queue_flush(&is->audioq);
2832                     packet_queue_put(&is->audioq, &flush_pkt);
2833                 }
2834                 if (is->subtitle_stream >= 0) {
2835                     packet_queue_flush(&is->subtitleq);
2836                     packet_queue_put(&is->subtitleq, &flush_pkt);
2837                 }
2838                 if (is->video_stream >= 0) {
2839                     packet_queue_flush(&is->videoq);
2840                     packet_queue_put(&is->videoq, &flush_pkt);
2841                 }
2842                 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2843                    update_external_clock_pts(is, NAN);
2844                 } else {
2845                    update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
2846                 }
2847             }
2848             is->seek_req = 0;
2849             is->queue_attachments_req = 1;
2850             eof = 0;
2851             if (is->paused)
2852                 step_to_next_frame(is);
2853         }
2854         if (is->queue_attachments_req) {
2855             if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2856                 AVPacket copy;
2857                 if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2858                     goto fail;
2859                 packet_queue_put(&is->videoq, &copy);
2860             }
2861             is->queue_attachments_req = 0;
2862         }
2863
2864         /* if the queue are full, no need to read more */
2865         if (infinite_buffer<1 &&
2866               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2867             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2868                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2869                     || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2870                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2871             /* wait 10 ms */
2872             SDL_LockMutex(wait_mutex);
2873             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2874             SDL_UnlockMutex(wait_mutex);
2875             continue;
2876         }
2877         if (eof) {
2878             if (is->video_stream >= 0) {
2879                 av_init_packet(pkt);
2880                 pkt->data = NULL;
2881                 pkt->size = 0;
2882                 pkt->stream_index = is->video_stream;
2883                 packet_queue_put(&is->videoq, pkt);
2884             }
2885             if (is->audio_stream >= 0 &&
2886                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2887                 av_init_packet(pkt);
2888                 pkt->data = NULL;
2889                 pkt->size = 0;
2890                 pkt->stream_index = is->audio_stream;
2891                 packet_queue_put(&is->audioq, pkt);
2892             }
2893             SDL_Delay(10);
2894             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2895                 if (loop != 1 && (!loop || --loop)) {
2896                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2897                 } else if (autoexit) {
2898                     ret = AVERROR_EOF;
2899                     goto fail;
2900                 }
2901             }
2902             eof=0;
2903             continue;
2904         }
2905         ret = av_read_frame(ic, pkt);
2906         if (ret < 0) {
2907             if (ret == AVERROR_EOF || url_feof(ic->pb))
2908                 eof = 1;
2909             if (ic->pb && ic->pb->error)
2910                 break;
2911             SDL_LockMutex(wait_mutex);
2912             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2913             SDL_UnlockMutex(wait_mutex);
2914             continue;
2915         }
2916         /* check if packet is in play range specified by user, then queue, otherwise discard */
2917         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2918                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2919                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2920                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2921                 <= ((double)duration / 1000000);
2922         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2923             packet_queue_put(&is->audioq, pkt);
2924         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2925                    && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
2926             packet_queue_put(&is->videoq, pkt);
2927         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2928             packet_queue_put(&is->subtitleq, pkt);
2929         } else {
2930             av_free_packet(pkt);
2931         }
2932     }
2933     /* wait until the end */
2934     while (!is->abort_request) {
2935         SDL_Delay(100);
2936     }
2937
2938     ret = 0;
2939  fail:
2940     /* close each stream */
2941     if (is->audio_stream >= 0)
2942         stream_component_close(is, is->audio_stream);
2943     if (is->video_stream >= 0)
2944         stream_component_close(is, is->video_stream);
2945     if (is->subtitle_stream >= 0)
2946         stream_component_close(is, is->subtitle_stream);
2947     if (is->ic) {
2948         avformat_close_input(&is->ic);
2949     }
2950
2951     if (ret != 0) {
2952         SDL_Event event;
2953
2954         event.type = FF_QUIT_EVENT;
2955         event.user.data1 = is;
2956         SDL_PushEvent(&event);
2957     }
2958     SDL_DestroyMutex(wait_mutex);
2959     return 0;
2960 }
2961
2962 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2963 {
2964     VideoState *is;
2965
2966     is = av_mallocz(sizeof(VideoState));
2967     if (!is)
2968         return NULL;
2969     av_strlcpy(is->filename, filename, sizeof(is->filename));
2970     is->iformat = iformat;
2971     is->ytop    = 0;
2972     is->xleft   = 0;
2973
2974     /* start video display */
2975     is->pictq_mutex = SDL_CreateMutex();
2976     is->pictq_cond  = SDL_CreateCond();
2977
2978     is->subpq_mutex = SDL_CreateMutex();
2979     is->subpq_cond  = SDL_CreateCond();
2980
2981     packet_queue_init(&is->videoq);
2982     packet_queue_init(&is->audioq);
2983     packet_queue_init(&is->subtitleq);
2984
2985     is->continue_read_thread = SDL_CreateCond();
2986
2987     update_external_clock_pts(is, NAN);
2988     update_external_clock_speed(is, 1.0);
2989     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2990     is->video_current_pts_drift = is->audio_current_pts_drift;
2991     is->audio_clock_serial = -1;
2992     is->video_clock_serial = -1;
2993     is->audio_last_serial = -1;
2994     is->av_sync_type = av_sync_type;
2995     is->read_tid     = SDL_CreateThread(read_thread, is);
2996     if (!is->read_tid) {
2997         av_free(is);
2998         return NULL;
2999     }
3000     return is;
3001 }
3002
3003 static void stream_cycle_channel(VideoState *is, int codec_type)
3004 {
3005     AVFormatContext *ic = is->ic;
3006     int start_index, stream_index;
3007     int old_index;
3008     AVStream *st;
3009
3010     if (codec_type == AVMEDIA_TYPE_VIDEO) {
3011         start_index = is->last_video_stream;
3012         old_index = is->video_stream;
3013     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3014         start_index = is->last_audio_stream;
3015         old_index = is->audio_stream;
3016     } else {
3017         start_index = is->last_subtitle_stream;
3018         old_index = is->subtitle_stream;
3019     }
3020     stream_index = start_index;
3021     for (;;) {
3022         if (++stream_index >= is->ic->nb_streams)
3023         {
3024             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3025             {
3026                 stream_index = -1;
3027                 is->last_subtitle_stream = -1;
3028                 goto the_end;
3029             }
3030             if (start_index == -1)
3031                 return;
3032             stream_index = 0;
3033         }
3034         if (stream_index == start_index)
3035             return;
3036         st = ic->streams[stream_index];
3037         if (st->codec->codec_type == codec_type) {
3038             /* check that parameters are OK */
3039             switch (codec_type) {
3040             case AVMEDIA_TYPE_AUDIO:
3041                 if (st->codec->sample_rate != 0 &&
3042                     st->codec->channels != 0)
3043                     goto the_end;
3044                 break;
3045             case AVMEDIA_TYPE_VIDEO:
3046             case AVMEDIA_TYPE_SUBTITLE:
3047                 goto the_end;
3048             default:
3049                 break;
3050             }
3051         }
3052     }
3053  the_end:
3054     stream_component_close(is, old_index);
3055     stream_component_open(is, stream_index);
3056 }
3057
3058
3059 static void toggle_full_screen(VideoState *is)
3060 {
3061 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3062     /* OS X needs to reallocate the SDL overlays */
3063     int i;
3064     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3065         is->pictq[i].reallocate = 1;
3066 #endif
3067     is_full_screen = !is_full_screen;
3068     video_open(is, 1, NULL);
3069 }
3070
3071 static void toggle_audio_display(VideoState *is)
3072 {
3073     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3074     int next = is->show_mode;
3075     do {
3076         next = (next + 1) % SHOW_MODE_NB;
3077     } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3078     if (is->show_mode != next) {
3079         fill_rectangle(screen,
3080                     is->xleft, is->ytop, is->width, is->height,
3081                     bgcolor, 1);
3082         is->force_refresh = 1;
3083         is->show_mode = next;
3084     }
3085 }
3086
3087 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3088     double remaining_time = 0.0;
3089     SDL_PumpEvents();
3090     while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3091         if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3092             SDL_ShowCursor(0);
3093             cursor_hidden = 1;
3094         }
3095         if (remaining_time > 0.0)
3096             av_usleep((int64_t)(remaining_time * 1000000.0));
3097         remaining_time = REFRESH_RATE;
3098         if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3099             video_refresh(is, &remaining_time);
3100         SDL_PumpEvents();
3101     }
3102 }
3103
3104 /* handle an event sent by the GUI */
3105 static void event_loop(VideoState *cur_stream)
3106 {
3107     SDL_Event event;
3108     double incr, pos, frac;
3109
3110     for (;;) {
3111         double x;
3112         refresh_loop_wait_event(cur_stream, &event);
3113         switch (event.type) {
3114         case SDL_KEYDOWN:
3115             if (exit_on_keydown) {
3116                 do_exit(cur_stream);
3117                 break;
3118             }
3119             switch (event.key.keysym.sym) {
3120             case SDLK_ESCAPE:
3121             case SDLK_q:
3122                 do_exit(cur_stream);
3123                 break;
3124             case SDLK_f:
3125                 toggle_full_screen(cur_stream);
3126                 cur_stream->force_refresh = 1;
3127                 break;
3128             case SDLK_p:
3129             case SDLK_SPACE:
3130                 toggle_pause(cur_stream);
3131                 break;
3132             case SDLK_s: // S: Step to next frame
3133                 step_to_next_frame(cur_stream);
3134                 break;
3135             case SDLK_a:
3136                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3137                 break;
3138             case SDLK_v:
3139                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3140                 break;
3141             case SDLK_t:
3142                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3143                 break;
3144             case SDLK_w:
3145                 toggle_audio_display(cur_stream);
3146                 break;
3147             case SDLK_PAGEUP:
3148                 incr = 600.0;
3149                 goto do_seek;
3150             case SDLK_PAGEDOWN:
3151                 incr = -600.0;
3152                 goto do_seek;
3153             case SDLK_LEFT:
3154                 incr = -10.0;
3155                 goto do_seek;
3156             case SDLK_RIGHT:
3157                 incr = 10.0;
3158                 goto do_seek;
3159             case SDLK_UP:
3160                 incr = 60.0;
3161                 goto do_seek;
3162             case SDLK_DOWN:
3163                 incr = -60.0;
3164             do_seek:
3165                     if (seek_by_bytes) {
3166                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3167                             pos = cur_stream->video_current_pos;
3168                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3169                             pos = cur_stream->audio_pkt.pos;
3170                         } else
3171                             pos = avio_tell(cur_stream->ic->pb);
3172                         if (cur_stream->ic->bit_rate)
3173                             incr *= cur_stream->ic->bit_rate / 8.0;
3174                         else
3175                             incr *= 180000.0;
3176                         pos += incr;
3177                         stream_seek(cur_stream, pos, incr, 1);
3178                     } else {
3179                         pos = get_master_clock(cur_stream);
3180                         if (isnan(pos))
3181                             pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3182                         pos += incr;
3183                         if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3184                             pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3185                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3186                     }
3187                 break;
3188             default:
3189                 break;
3190             }
3191             break;
3192         case SDL_VIDEOEXPOSE:
3193             cur_stream->force_refresh = 1;
3194             break;
3195         case SDL_MOUSEBUTTONDOWN:
3196             if (exit_on_mousedown) {
3197                 do_exit(cur_stream);
3198                 break;
3199             }
3200         case SDL_MOUSEMOTION:
3201             if (cursor_hidden) {
3202                 SDL_ShowCursor(1);
3203                 cursor_hidden = 0;
3204             }
3205             cursor_last_shown = av_gettime();
3206             if (event.type == SDL_MOUSEBUTTONDOWN) {
3207                 x = event.button.x;
3208             } else {
3209                 if (event.motion.state != SDL_PRESSED)
3210                     break;
3211                 x = event.motion.x;
3212             }
3213                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3214                     uint64_t size =  avio_size(cur_stream->ic->pb);
3215                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3216                 } else {
3217                     int64_t ts;
3218                     int ns, hh, mm, ss;
3219                     int tns, thh, tmm, tss;
3220                     tns  = cur_stream->ic->duration / 1000000LL;
3221                     thh  = tns / 3600;
3222                     tmm  = (tns % 3600) / 60;
3223                     tss  = (tns % 60);
3224                     frac = x / cur_stream->width;
3225                     ns   = frac * tns;
3226                     hh   = ns / 3600;
3227                     mm   = (ns % 3600) / 60;
3228                     ss   = (ns % 60);
3229                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
3230                             hh, mm, ss, thh, tmm, tss);
3231                     ts = frac * cur_stream->ic->duration;
3232                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3233                         ts += cur_stream->ic->start_time;
3234                     stream_seek(cur_stream, ts, 0, 0);
3235                 }
3236             break;
3237         case SDL_VIDEORESIZE:
3238                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3239                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3240                 screen_width  = cur_stream->width  = event.resize.w;
3241                 screen_height = cur_stream->height = event.resize.h;
3242                 cur_stream->force_refresh = 1;
3243             break;
3244         case SDL_QUIT:
3245         case FF_QUIT_EVENT:
3246             do_exit(cur_stream);
3247             break;
3248         case FF_ALLOC_EVENT:
3249             alloc_picture(event.user.data1);
3250             break;
3251         default:
3252             break;
3253         }
3254     }
3255 }
3256
3257 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3258 {
3259     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3260     return opt_default(NULL, "video_size", arg);
3261 }
3262
3263 static int opt_width(void *optctx, const char *opt, const char *arg)
3264 {
3265     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3266     return 0;
3267 }
3268
3269 static int opt_height(void *optctx, const char *opt, const char *arg)
3270 {
3271     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3272     return 0;
3273 }
3274
3275 static int opt_format(void *optctx, const char *opt, const char *arg)
3276 {
3277     file_iformat = av_find_input_format(arg);
3278     if (!file_iformat) {
3279         fprintf(stderr, "Unknown input format: %s\n", arg);
3280         return AVERROR(EINVAL);
3281     }
3282     return 0;
3283 }
3284
3285 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3286 {
3287     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3288     return opt_default(NULL, "pixel_format", arg);
3289 }
3290
3291 static int opt_sync(void *optctx, const char *opt, const char *arg)
3292 {
3293     if (!strcmp(arg, "audio"))
3294         av_sync_type = AV_SYNC_AUDIO_MASTER;
3295     else if (!strcmp(arg, "video"))
3296         av_sync_type = AV_SYNC_VIDEO_MASTER;
3297     else if (!strcmp(arg, "ext"))
3298         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3299     else {
3300         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3301         exit(1);
3302     }
3303     return 0;
3304 }
3305
3306 static int opt_seek(void *optctx, const char *opt, const char *arg)
3307 {
3308     start_time = parse_time_or_die(opt, arg, 1);
3309     return 0;
3310 }
3311
3312 static int opt_duration(void *optctx, const char *opt, const char *arg)
3313 {
3314     duration = parse_time_or_die(opt, arg, 1);
3315     return 0;
3316 }
3317
3318 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3319 {
3320     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3321                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3322                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3323                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3324     return 0;
3325 }
3326
3327 static void opt_input_file(void *optctx, const char *filename)
3328 {
3329     if (input_filename) {
3330         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3331                 filename, input_filename);
3332         exit(1);
3333     }
3334     if (!strcmp(filename, "-"))
3335         filename = "pipe:";
3336     input_filename = filename;
3337 }
3338
3339 static int opt_codec(void *optctx, const char *opt, const char *arg)
3340 {
3341    const char *spec = strchr(opt, ':');
3342    if (!spec) {
3343        fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3344                arg, opt);
3345        return AVERROR(EINVAL);
3346    }
3347    spec++;
3348    switch (spec[0]) {
3349    case 'a' :    audio_codec_name = arg; break;
3350    case 's' : subtitle_codec_name = arg; break;
3351    case 'v' :    video_codec_name = arg; break;
3352    default:
3353        fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3354        return AVERROR(EINVAL);
3355    }
3356    return 0;
3357 }
3358
3359 static int dummy;
3360
3361 static const OptionDef options[] = {
3362 #include "cmdutils_common_opts.h"
3363     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3364     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3365     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3366     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3367     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3368     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3369     { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3370     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3371     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3372     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3373     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3374     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3375     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3376     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3377     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3378     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3379     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3380     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3381     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3382     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3383     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3384     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3385     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3386     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3387     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3388     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3389     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3390     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3391     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3392     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3393     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3394     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3395 #if CONFIG_AVFILTER
3396     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3397     { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3398 #endif
3399     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3400     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3401     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3402     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3403     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3404     { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &audio_codec_name }, "force audio decoder",    "decoder_name" },
3405     { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3406     { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &video_codec_name }, "force video decoder",    "decoder_name" },
3407     { NULL, },
3408 };
3409
3410 static void show_usage(void)
3411 {
3412     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3413     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3414     av_log(NULL, AV_LOG_INFO, "\n");
3415 }
3416
3417 void show_help_default(const char *opt, const char *arg)
3418 {
3419     av_log_set_callback(log_callback_help);
3420     show_usage();
3421     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3422     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3423     printf("\n");
3424     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3425     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3426 #if !CONFIG_AVFILTER
3427     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3428 #else
3429     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3430 #endif
3431     printf("\nWhile playing:\n"
3432            "q, ESC              quit\n"
3433            "f                   toggle full screen\n"
3434            "p, SPC              pause\n"
3435            "a                   cycle audio channel\n"
3436            "v                   cycle video channel\n"
3437            "t                   cycle subtitle channel\n"
3438            "w                   show audio waves\n"
3439            "s                   activate frame-step mode\n"
3440            "left/right          seek backward/forward 10 seconds\n"
3441            "down/up             seek backward/forward 1 minute\n"
3442            "page down/page up   seek backward/forward 10 minutes\n"
3443            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3444            );
3445 }
3446
3447 static int lockmgr(void **mtx, enum AVLockOp op)
3448 {
3449    switch(op) {
3450       case AV_LOCK_CREATE:
3451           *mtx = SDL_CreateMutex();
3452           if(!*mtx)
3453               return 1;
3454           return 0;
3455       case AV_LOCK_OBTAIN:
3456           return !!SDL_LockMutex(*mtx);
3457       case AV_LOCK_RELEASE:
3458           return !!SDL_UnlockMutex(*mtx);
3459       case AV_LOCK_DESTROY:
3460           SDL_DestroyMutex(*mtx);
3461           return 0;
3462    }
3463    return 1;
3464 }
3465
3466 /* Called from the main */
3467 int main(int argc, char **argv)
3468 {
3469     int flags;
3470     VideoState *is;
3471     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3472
3473     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3474     parse_loglevel(argc, argv, options);
3475
3476     /* register all codecs, demux and protocols */
3477     avcodec_register_all();
3478 #if CONFIG_AVDEVICE
3479     avdevice_register_all();
3480 #endif
3481 #if CONFIG_AVFILTER
3482     avfilter_register_all();
3483 #endif
3484     av_register_all();
3485     avformat_network_init();
3486
3487     init_opts();
3488
3489     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3490     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3491
3492     show_banner(argc, argv, options);
3493
3494     parse_options(NULL, argc, argv, options, opt_input_file);
3495
3496     if (!input_filename) {
3497         show_usage();
3498         fprintf(stderr, "An input file must be specified\n");
3499         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3500         exit(1);
3501     }
3502
3503     if (display_disable) {
3504         video_disable = 1;
3505     }
3506     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3507     if (audio_disable)
3508         flags &= ~SDL_INIT_AUDIO;
3509     if (display_disable)
3510         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3511 #if !defined(__MINGW32__) && !defined(__APPLE__)
3512     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3513 #endif
3514     if (SDL_Init (flags)) {
3515         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3516         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3517         exit(1);
3518     }
3519
3520     if (!display_disable) {
3521         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3522         fs_screen_width = vi->current_w;
3523         fs_screen_height = vi->current_h;
3524     }
3525
3526     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3527     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3528     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3529
3530     if (av_lockmgr_register(lockmgr)) {
3531         fprintf(stderr, "Could not initialize lock manager!\n");
3532         do_exit(NULL);
3533     }
3534
3535     av_init_packet(&flush_pkt);
3536     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3537
3538     is = stream_open(input_filename, file_iformat);
3539     if (!is) {
3540         fprintf(stderr, "Failed to initialize VideoState!\n");
3541         do_exit(NULL);
3542     }
3543
3544     event_loop(is);
3545
3546     /* never returns */
3547
3548     return 0;
3549 }