]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: remove unused variable "codec"
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
82 #define EXTERNAL_CLOCK_SPEED_MIN  0.900
83 #define EXTERNAL_CLOCK_SPEED_MAX  1.010
84 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
85
86 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
87 #define AUDIO_DIFF_AVG_NB   20
88
89 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
90 #define REFRESH_RATE 0.01
91
92 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
93 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
94 #define SAMPLE_ARRAY_SIZE (8 * 65536)
95
96 #define CURSOR_HIDE_DELAY 1000000
97
98 static int64_t sws_flags = SWS_BICUBIC;
99
100 typedef struct MyAVPacketList {
101     AVPacket pkt;
102     struct MyAVPacketList *next;
103     int serial;
104 } MyAVPacketList;
105
106 typedef struct PacketQueue {
107     MyAVPacketList *first_pkt, *last_pkt;
108     int nb_packets;
109     int size;
110     int abort_request;
111     int serial;
112     SDL_mutex *mutex;
113     SDL_cond *cond;
114 } PacketQueue;
115
116 #define VIDEO_PICTURE_QUEUE_SIZE 4
117 #define SUBPICTURE_QUEUE_SIZE 4
118
119 typedef struct VideoPicture {
120     double pts;             // presentation timestamp for this picture
121     int64_t pos;            // byte position in file
122     SDL_Overlay *bmp;
123     int width, height; /* source height & width */
124     int allocated;
125     int reallocate;
126     int serial;
127
128     AVRational sar;
129 } VideoPicture;
130
131 typedef struct SubPicture {
132     double pts; /* presentation time stamp for this picture */
133     AVSubtitle sub;
134 } SubPicture;
135
136 typedef struct AudioParams {
137     int freq;
138     int channels;
139     int64_t channel_layout;
140     enum AVSampleFormat fmt;
141 } AudioParams;
142
143 enum {
144     AV_SYNC_AUDIO_MASTER, /* default choice */
145     AV_SYNC_VIDEO_MASTER,
146     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
147 };
148
149 typedef struct VideoState {
150     SDL_Thread *read_tid;
151     SDL_Thread *video_tid;
152     AVInputFormat *iformat;
153     int no_background;
154     int abort_request;
155     int force_refresh;
156     int paused;
157     int last_paused;
158     int queue_attachments_req;
159     int seek_req;
160     int seek_flags;
161     int64_t seek_pos;
162     int64_t seek_rel;
163     int read_pause_return;
164     AVFormatContext *ic;
165     int realtime;
166
167     int audio_stream;
168
169     int av_sync_type;
170     double external_clock;                   ///< external clock base
171     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
172     int64_t external_clock_time;             ///< last reference time
173     double external_clock_speed;             ///< speed of the external clock
174
175     double audio_clock;
176     int audio_clock_serial;
177     double audio_diff_cum; /* used for AV difference average computation */
178     double audio_diff_avg_coef;
179     double audio_diff_threshold;
180     int audio_diff_avg_count;
181     AVStream *audio_st;
182     PacketQueue audioq;
183     int audio_hw_buf_size;
184     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
185     uint8_t *audio_buf;
186     uint8_t *audio_buf1;
187     unsigned int audio_buf_size; /* in bytes */
188     unsigned int audio_buf1_size;
189     int audio_buf_index; /* in bytes */
190     int audio_write_buf_size;
191     AVPacket audio_pkt_temp;
192     AVPacket audio_pkt;
193     int audio_pkt_temp_serial;
194     int audio_last_serial;
195     struct AudioParams audio_src;
196 #if CONFIG_AVFILTER
197     struct AudioParams audio_filter_src;
198 #endif
199     struct AudioParams audio_tgt;
200     struct SwrContext *swr_ctx;
201     double audio_current_pts;
202     double audio_current_pts_drift;
203     int frame_drops_early;
204     int frame_drops_late;
205     AVFrame *frame;
206
207     enum ShowMode {
208         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
209     } show_mode;
210     int16_t sample_array[SAMPLE_ARRAY_SIZE];
211     int sample_array_index;
212     int last_i_start;
213     RDFTContext *rdft;
214     int rdft_bits;
215     FFTSample *rdft_data;
216     int xpos;
217     double last_vis_time;
218
219     SDL_Thread *subtitle_tid;
220     int subtitle_stream;
221     int subtitle_stream_changed;
222     AVStream *subtitle_st;
223     PacketQueue subtitleq;
224     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
225     int subpq_size, subpq_rindex, subpq_windex;
226     SDL_mutex *subpq_mutex;
227     SDL_cond *subpq_cond;
228
229     double frame_timer;
230     double frame_last_pts;
231     double frame_last_duration;
232     double frame_last_dropped_pts;
233     double frame_last_returned_time;
234     double frame_last_filter_delay;
235     int64_t frame_last_dropped_pos;
236     int video_stream;
237     AVStream *video_st;
238     PacketQueue videoq;
239     double video_current_pts;       // current displayed pts
240     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
241     int64_t video_current_pos;      // current displayed file pos
242     double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
243     int video_clock_serial;
244     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
245     int pictq_size, pictq_rindex, pictq_windex;
246     SDL_mutex *pictq_mutex;
247     SDL_cond *pictq_cond;
248 #if !CONFIG_AVFILTER
249     struct SwsContext *img_convert_ctx;
250 #endif
251     SDL_Rect last_display_rect;
252
253     char filename[1024];
254     int width, height, xleft, ytop;
255     int step;
256
257 #if CONFIG_AVFILTER
258     AVFilterContext *in_video_filter;   // the first filter in the video chain
259     AVFilterContext *out_video_filter;  // the last filter in the video chain
260     AVFilterContext *in_audio_filter;   // the first filter in the audio chain
261     AVFilterContext *out_audio_filter;  // the last filter in the audio chain
262     AVFilterGraph *agraph;              // audio filter graph
263 #endif
264
265     int last_video_stream, last_audio_stream, last_subtitle_stream;
266
267     SDL_cond *continue_read_thread;
268 } VideoState;
269
270 /* options specified by the user */
271 static AVInputFormat *file_iformat;
272 static const char *input_filename;
273 static const char *window_title;
274 static int fs_screen_width;
275 static int fs_screen_height;
276 static int default_width  = 640;
277 static int default_height = 480;
278 static int screen_width  = 0;
279 static int screen_height = 0;
280 static int audio_disable;
281 static int video_disable;
282 static int subtitle_disable;
283 static int wanted_stream[AVMEDIA_TYPE_NB] = {
284     [AVMEDIA_TYPE_AUDIO]    = -1,
285     [AVMEDIA_TYPE_VIDEO]    = -1,
286     [AVMEDIA_TYPE_SUBTITLE] = -1,
287 };
288 static int seek_by_bytes = -1;
289 static int display_disable;
290 static int show_status = 1;
291 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
292 static int64_t start_time = AV_NOPTS_VALUE;
293 static int64_t duration = AV_NOPTS_VALUE;
294 static int workaround_bugs = 1;
295 static int fast = 0;
296 static int genpts = 0;
297 static int lowres = 0;
298 static int idct = FF_IDCT_AUTO;
299 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
300 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
301 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
302 static int error_concealment = 3;
303 static int decoder_reorder_pts = -1;
304 static int autoexit;
305 static int exit_on_keydown;
306 static int exit_on_mousedown;
307 static int loop = 1;
308 static int framedrop = -1;
309 static int infinite_buffer = -1;
310 static enum ShowMode show_mode = SHOW_MODE_NONE;
311 static const char *audio_codec_name;
312 static const char *subtitle_codec_name;
313 static const char *video_codec_name;
314 double rdftspeed = 0.02;
315 static int64_t cursor_last_shown;
316 static int cursor_hidden = 0;
317 #if CONFIG_AVFILTER
318 static char *vfilters = NULL;
319 static char *afilters = NULL;
320 #endif
321
322 /* current context */
323 static int is_full_screen;
324 static int64_t audio_callback_time;
325
326 static AVPacket flush_pkt;
327
328 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
329 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
330
331 static SDL_Surface *screen;
332
333 static inline
334 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
335                    enum AVSampleFormat fmt2, int64_t channel_count2)
336 {
337     /* If channel count == 1, planar and non-planar formats are the same */
338     if (channel_count1 == 1 && channel_count2 == 1)
339         return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
340     else
341         return channel_count1 != channel_count2 || fmt1 != fmt2;
342 }
343
344 static inline
345 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
346 {
347     if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
348         return channel_layout;
349     else
350         return 0;
351 }
352
353 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
354
355 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
356 {
357     MyAVPacketList *pkt1;
358
359     if (q->abort_request)
360        return -1;
361
362     pkt1 = av_malloc(sizeof(MyAVPacketList));
363     if (!pkt1)
364         return -1;
365     pkt1->pkt = *pkt;
366     pkt1->next = NULL;
367     if (pkt == &flush_pkt)
368         q->serial++;
369     pkt1->serial = q->serial;
370
371     if (!q->last_pkt)
372         q->first_pkt = pkt1;
373     else
374         q->last_pkt->next = pkt1;
375     q->last_pkt = pkt1;
376     q->nb_packets++;
377     q->size += pkt1->pkt.size + sizeof(*pkt1);
378     /* XXX: should duplicate packet data in DV case */
379     SDL_CondSignal(q->cond);
380     return 0;
381 }
382
383 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
384 {
385     int ret;
386
387     /* duplicate the packet */
388     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
389         return -1;
390
391     SDL_LockMutex(q->mutex);
392     ret = packet_queue_put_private(q, pkt);
393     SDL_UnlockMutex(q->mutex);
394
395     if (pkt != &flush_pkt && ret < 0)
396         av_free_packet(pkt);
397
398     return ret;
399 }
400
401 /* packet queue handling */
402 static void packet_queue_init(PacketQueue *q)
403 {
404     memset(q, 0, sizeof(PacketQueue));
405     q->mutex = SDL_CreateMutex();
406     q->cond = SDL_CreateCond();
407     q->abort_request = 1;
408 }
409
410 static void packet_queue_flush(PacketQueue *q)
411 {
412     MyAVPacketList *pkt, *pkt1;
413
414     SDL_LockMutex(q->mutex);
415     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
416         pkt1 = pkt->next;
417         av_free_packet(&pkt->pkt);
418         av_freep(&pkt);
419     }
420     q->last_pkt = NULL;
421     q->first_pkt = NULL;
422     q->nb_packets = 0;
423     q->size = 0;
424     SDL_UnlockMutex(q->mutex);
425 }
426
427 static void packet_queue_destroy(PacketQueue *q)
428 {
429     packet_queue_flush(q);
430     SDL_DestroyMutex(q->mutex);
431     SDL_DestroyCond(q->cond);
432 }
433
434 static void packet_queue_abort(PacketQueue *q)
435 {
436     SDL_LockMutex(q->mutex);
437
438     q->abort_request = 1;
439
440     SDL_CondSignal(q->cond);
441
442     SDL_UnlockMutex(q->mutex);
443 }
444
445 static void packet_queue_start(PacketQueue *q)
446 {
447     SDL_LockMutex(q->mutex);
448     q->abort_request = 0;
449     packet_queue_put_private(q, &flush_pkt);
450     SDL_UnlockMutex(q->mutex);
451 }
452
453 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
454 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
455 {
456     MyAVPacketList *pkt1;
457     int ret;
458
459     SDL_LockMutex(q->mutex);
460
461     for (;;) {
462         if (q->abort_request) {
463             ret = -1;
464             break;
465         }
466
467         pkt1 = q->first_pkt;
468         if (pkt1) {
469             q->first_pkt = pkt1->next;
470             if (!q->first_pkt)
471                 q->last_pkt = NULL;
472             q->nb_packets--;
473             q->size -= pkt1->pkt.size + sizeof(*pkt1);
474             *pkt = pkt1->pkt;
475             if (serial)
476                 *serial = pkt1->serial;
477             av_free(pkt1);
478             ret = 1;
479             break;
480         } else if (!block) {
481             ret = 0;
482             break;
483         } else {
484             SDL_CondWait(q->cond, q->mutex);
485         }
486     }
487     SDL_UnlockMutex(q->mutex);
488     return ret;
489 }
490
491 static inline void fill_rectangle(SDL_Surface *screen,
492                                   int x, int y, int w, int h, int color, int update)
493 {
494     SDL_Rect rect;
495     rect.x = x;
496     rect.y = y;
497     rect.w = w;
498     rect.h = h;
499     SDL_FillRect(screen, &rect, color);
500     if (update && w > 0 && h > 0)
501         SDL_UpdateRect(screen, x, y, w, h);
502 }
503
504 /* draw only the border of a rectangle */
505 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
506 {
507     int w1, w2, h1, h2;
508
509     /* fill the background */
510     w1 = x;
511     if (w1 < 0)
512         w1 = 0;
513     w2 = width - (x + w);
514     if (w2 < 0)
515         w2 = 0;
516     h1 = y;
517     if (h1 < 0)
518         h1 = 0;
519     h2 = height - (y + h);
520     if (h2 < 0)
521         h2 = 0;
522     fill_rectangle(screen,
523                    xleft, ytop,
524                    w1, height,
525                    color, update);
526     fill_rectangle(screen,
527                    xleft + width - w2, ytop,
528                    w2, height,
529                    color, update);
530     fill_rectangle(screen,
531                    xleft + w1, ytop,
532                    width - w1 - w2, h1,
533                    color, update);
534     fill_rectangle(screen,
535                    xleft + w1, ytop + height - h2,
536                    width - w1 - w2, h2,
537                    color, update);
538 }
539
540 #define ALPHA_BLEND(a, oldp, newp, s)\
541 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
542
543 #define RGBA_IN(r, g, b, a, s)\
544 {\
545     unsigned int v = ((const uint32_t *)(s))[0];\
546     a = (v >> 24) & 0xff;\
547     r = (v >> 16) & 0xff;\
548     g = (v >> 8) & 0xff;\
549     b = v & 0xff;\
550 }
551
552 #define YUVA_IN(y, u, v, a, s, pal)\
553 {\
554     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
555     a = (val >> 24) & 0xff;\
556     y = (val >> 16) & 0xff;\
557     u = (val >> 8) & 0xff;\
558     v = val & 0xff;\
559 }
560
561 #define YUVA_OUT(d, y, u, v, a)\
562 {\
563     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
564 }
565
566
567 #define BPP 1
568
569 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
570 {
571     int wrap, wrap3, width2, skip2;
572     int y, u, v, a, u1, v1, a1, w, h;
573     uint8_t *lum, *cb, *cr;
574     const uint8_t *p;
575     const uint32_t *pal;
576     int dstx, dsty, dstw, dsth;
577
578     dstw = av_clip(rect->w, 0, imgw);
579     dsth = av_clip(rect->h, 0, imgh);
580     dstx = av_clip(rect->x, 0, imgw - dstw);
581     dsty = av_clip(rect->y, 0, imgh - dsth);
582     lum = dst->data[0] + dsty * dst->linesize[0];
583     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
584     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
585
586     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
587     skip2 = dstx >> 1;
588     wrap = dst->linesize[0];
589     wrap3 = rect->pict.linesize[0];
590     p = rect->pict.data[0];
591     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
592
593     if (dsty & 1) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632             p++;
633             lum++;
634         }
635         p += wrap3 - dstw * BPP;
636         lum += wrap - dstw - dstx;
637         cb += dst->linesize[1] - width2 - skip2;
638         cr += dst->linesize[2] - width2 - skip2;
639     }
640     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
641         lum += dstx;
642         cb += skip2;
643         cr += skip2;
644
645         if (dstx & 1) {
646             YUVA_IN(y, u, v, a, p, pal);
647             u1 = u;
648             v1 = v;
649             a1 = a;
650             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
651             p += wrap3;
652             lum += wrap;
653             YUVA_IN(y, u, v, a, p, pal);
654             u1 += u;
655             v1 += v;
656             a1 += a;
657             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
659             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
660             cb++;
661             cr++;
662             p += -wrap3 + BPP;
663             lum += -wrap + 1;
664         }
665         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
666             YUVA_IN(y, u, v, a, p, pal);
667             u1 = u;
668             v1 = v;
669             a1 = a;
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671
672             YUVA_IN(y, u, v, a, p + BPP, pal);
673             u1 += u;
674             v1 += v;
675             a1 += a;
676             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
677             p += wrap3;
678             lum += wrap;
679
680             YUVA_IN(y, u, v, a, p, pal);
681             u1 += u;
682             v1 += v;
683             a1 += a;
684             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
685
686             YUVA_IN(y, u, v, a, p + BPP, pal);
687             u1 += u;
688             v1 += v;
689             a1 += a;
690             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
691
692             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
693             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
694
695             cb++;
696             cr++;
697             p += -wrap3 + 2 * BPP;
698             lum += -wrap + 2;
699         }
700         if (w) {
701             YUVA_IN(y, u, v, a, p, pal);
702             u1 = u;
703             v1 = v;
704             a1 = a;
705             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
706             p += wrap3;
707             lum += wrap;
708             YUVA_IN(y, u, v, a, p, pal);
709             u1 += u;
710             v1 += v;
711             a1 += a;
712             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
713             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
714             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
715             cb++;
716             cr++;
717             p += -wrap3 + BPP;
718             lum += -wrap + 1;
719         }
720         p += wrap3 + (wrap3 - dstw * BPP);
721         lum += wrap + (wrap - dstw - dstx);
722         cb += dst->linesize[1] - width2 - skip2;
723         cr += dst->linesize[2] - width2 - skip2;
724     }
725     /* handle odd height */
726     if (h) {
727         lum += dstx;
728         cb += skip2;
729         cr += skip2;
730
731         if (dstx & 1) {
732             YUVA_IN(y, u, v, a, p, pal);
733             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
734             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
735             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
736             cb++;
737             cr++;
738             lum++;
739             p += BPP;
740         }
741         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
742             YUVA_IN(y, u, v, a, p, pal);
743             u1 = u;
744             v1 = v;
745             a1 = a;
746             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
747
748             YUVA_IN(y, u, v, a, p + BPP, pal);
749             u1 += u;
750             v1 += v;
751             a1 += a;
752             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
753             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
754             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
755             cb++;
756             cr++;
757             p += 2 * BPP;
758             lum += 2;
759         }
760         if (w) {
761             YUVA_IN(y, u, v, a, p, pal);
762             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
763             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
764             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
765         }
766     }
767 }
768
769 static void free_subpicture(SubPicture *sp)
770 {
771     avsubtitle_free(&sp->sub);
772 }
773
774 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
775 {
776     float aspect_ratio;
777     int width, height, x, y;
778
779     if (vp->sar.num == 0)
780         aspect_ratio = 0;
781     else
782         aspect_ratio = av_q2d(vp->sar);
783
784     if (aspect_ratio <= 0.0)
785         aspect_ratio = 1.0;
786     aspect_ratio *= (float)vp->width / (float)vp->height;
787
788     /* XXX: we suppose the screen has a 1.0 pixel ratio */
789     height = scr_height;
790     width = ((int)rint(height * aspect_ratio)) & ~1;
791     if (width > scr_width) {
792         width = scr_width;
793         height = ((int)rint(width / aspect_ratio)) & ~1;
794     }
795     x = (scr_width - width) / 2;
796     y = (scr_height - height) / 2;
797     rect->x = scr_xleft + x;
798     rect->y = scr_ytop  + y;
799     rect->w = FFMAX(width,  1);
800     rect->h = FFMAX(height, 1);
801 }
802
803 static void video_image_display(VideoState *is)
804 {
805     VideoPicture *vp;
806     SubPicture *sp;
807     AVPicture pict;
808     SDL_Rect rect;
809     int i;
810
811     vp = &is->pictq[is->pictq_rindex];
812     if (vp->bmp) {
813         if (is->subtitle_st) {
814             if (is->subpq_size > 0) {
815                 sp = &is->subpq[is->subpq_rindex];
816
817                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
818                     SDL_LockYUVOverlay (vp->bmp);
819
820                     pict.data[0] = vp->bmp->pixels[0];
821                     pict.data[1] = vp->bmp->pixels[2];
822                     pict.data[2] = vp->bmp->pixels[1];
823
824                     pict.linesize[0] = vp->bmp->pitches[0];
825                     pict.linesize[1] = vp->bmp->pitches[2];
826                     pict.linesize[2] = vp->bmp->pitches[1];
827
828                     for (i = 0; i < sp->sub.num_rects; i++)
829                         blend_subrect(&pict, sp->sub.rects[i],
830                                       vp->bmp->w, vp->bmp->h);
831
832                     SDL_UnlockYUVOverlay (vp->bmp);
833                 }
834             }
835         }
836
837         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
838
839         SDL_DisplayYUVOverlay(vp->bmp, &rect);
840
841         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
842             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
843             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
844             is->last_display_rect = rect;
845         }
846     }
847 }
848
849 static inline int compute_mod(int a, int b)
850 {
851     return a < 0 ? a%b + b : a%b;
852 }
853
854 static void video_audio_display(VideoState *s)
855 {
856     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
857     int ch, channels, h, h2, bgcolor, fgcolor;
858     int64_t time_diff;
859     int rdft_bits, nb_freq;
860
861     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
862         ;
863     nb_freq = 1 << (rdft_bits - 1);
864
865     /* compute display index : center on currently output samples */
866     channels = s->audio_tgt.channels;
867     nb_display_channels = channels;
868     if (!s->paused) {
869         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
870         n = 2 * channels;
871         delay = s->audio_write_buf_size;
872         delay /= n;
873
874         /* to be more precise, we take into account the time spent since
875            the last buffer computation */
876         if (audio_callback_time) {
877             time_diff = av_gettime() - audio_callback_time;
878             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
879         }
880
881         delay += 2 * data_used;
882         if (delay < data_used)
883             delay = data_used;
884
885         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
886         if (s->show_mode == SHOW_MODE_WAVES) {
887             h = INT_MIN;
888             for (i = 0; i < 1000; i += channels) {
889                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
890                 int a = s->sample_array[idx];
891                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
892                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
893                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
894                 int score = a - d;
895                 if (h < score && (b ^ c) < 0) {
896                     h = score;
897                     i_start = idx;
898                 }
899             }
900         }
901
902         s->last_i_start = i_start;
903     } else {
904         i_start = s->last_i_start;
905     }
906
907     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
908     if (s->show_mode == SHOW_MODE_WAVES) {
909         fill_rectangle(screen,
910                        s->xleft, s->ytop, s->width, s->height,
911                        bgcolor, 0);
912
913         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
914
915         /* total height for one channel */
916         h = s->height / nb_display_channels;
917         /* graph height / 2 */
918         h2 = (h * 9) / 20;
919         for (ch = 0; ch < nb_display_channels; ch++) {
920             i = i_start + ch;
921             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
922             for (x = 0; x < s->width; x++) {
923                 y = (s->sample_array[i] * h2) >> 15;
924                 if (y < 0) {
925                     y = -y;
926                     ys = y1 - y;
927                 } else {
928                     ys = y1;
929                 }
930                 fill_rectangle(screen,
931                                s->xleft + x, ys, 1, y,
932                                fgcolor, 0);
933                 i += channels;
934                 if (i >= SAMPLE_ARRAY_SIZE)
935                     i -= SAMPLE_ARRAY_SIZE;
936             }
937         }
938
939         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
940
941         for (ch = 1; ch < nb_display_channels; ch++) {
942             y = s->ytop + ch * h;
943             fill_rectangle(screen,
944                            s->xleft, y, s->width, 1,
945                            fgcolor, 0);
946         }
947         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
948     } else {
949         nb_display_channels= FFMIN(nb_display_channels, 2);
950         if (rdft_bits != s->rdft_bits) {
951             av_rdft_end(s->rdft);
952             av_free(s->rdft_data);
953             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
954             s->rdft_bits = rdft_bits;
955             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
956         }
957         {
958             FFTSample *data[2];
959             for (ch = 0; ch < nb_display_channels; ch++) {
960                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
961                 i = i_start + ch;
962                 for (x = 0; x < 2 * nb_freq; x++) {
963                     double w = (x-nb_freq) * (1.0 / nb_freq);
964                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
965                     i += channels;
966                     if (i >= SAMPLE_ARRAY_SIZE)
967                         i -= SAMPLE_ARRAY_SIZE;
968                 }
969                 av_rdft_calc(s->rdft, data[ch]);
970             }
971             // least efficient way to do this, we should of course directly access it but its more than fast enough
972             for (y = 0; y < s->height; y++) {
973                 double w = 1 / sqrt(nb_freq);
974                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
975                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
976                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
977                 a = FFMIN(a, 255);
978                 b = FFMIN(b, 255);
979                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
980
981                 fill_rectangle(screen,
982                             s->xpos, s->height-y, 1, 1,
983                             fgcolor, 0);
984             }
985         }
986         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
987         if (!s->paused)
988             s->xpos++;
989         if (s->xpos >= s->width)
990             s->xpos= s->xleft;
991     }
992 }
993
994 static void stream_close(VideoState *is)
995 {
996     VideoPicture *vp;
997     int i;
998     /* XXX: use a special url_shutdown call to abort parse cleanly */
999     is->abort_request = 1;
1000     SDL_WaitThread(is->read_tid, NULL);
1001     packet_queue_destroy(&is->videoq);
1002     packet_queue_destroy(&is->audioq);
1003     packet_queue_destroy(&is->subtitleq);
1004
1005     /* free all pictures */
1006     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1007         vp = &is->pictq[i];
1008         if (vp->bmp) {
1009             SDL_FreeYUVOverlay(vp->bmp);
1010             vp->bmp = NULL;
1011         }
1012     }
1013     SDL_DestroyMutex(is->pictq_mutex);
1014     SDL_DestroyCond(is->pictq_cond);
1015     SDL_DestroyMutex(is->subpq_mutex);
1016     SDL_DestroyCond(is->subpq_cond);
1017     SDL_DestroyCond(is->continue_read_thread);
1018 #if !CONFIG_AVFILTER
1019     sws_freeContext(is->img_convert_ctx);
1020 #endif
1021     av_free(is);
1022 }
1023
1024 static void do_exit(VideoState *is)
1025 {
1026     if (is) {
1027         stream_close(is);
1028     }
1029     av_lockmgr_register(NULL);
1030     uninit_opts();
1031 #if CONFIG_AVFILTER
1032     avfilter_uninit();
1033     av_freep(&vfilters);
1034 #endif
1035     avformat_network_deinit();
1036     if (show_status)
1037         printf("\n");
1038     SDL_Quit();
1039     av_log(NULL, AV_LOG_QUIET, "%s", "");
1040     exit(0);
1041 }
1042
1043 static void sigterm_handler(int sig)
1044 {
1045     exit(123);
1046 }
1047
1048 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1049 {
1050     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1051     int w,h;
1052     SDL_Rect rect;
1053
1054     if (is_full_screen) flags |= SDL_FULLSCREEN;
1055     else                flags |= SDL_RESIZABLE;
1056
1057     if (vp && vp->width) {
1058         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1059         default_width  = rect.w;
1060         default_height = rect.h;
1061     }
1062
1063     if (is_full_screen && fs_screen_width) {
1064         w = fs_screen_width;
1065         h = fs_screen_height;
1066     } else if (!is_full_screen && screen_width) {
1067         w = screen_width;
1068         h = screen_height;
1069     } else {
1070         w = default_width;
1071         h = default_height;
1072     }
1073     if (screen && is->width == screen->w && screen->w == w
1074        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1075         return 0;
1076     screen = SDL_SetVideoMode(w, h, 0, flags);
1077     if (!screen) {
1078         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1079         do_exit(is);
1080     }
1081     if (!window_title)
1082         window_title = input_filename;
1083     SDL_WM_SetCaption(window_title, window_title);
1084
1085     is->width  = screen->w;
1086     is->height = screen->h;
1087
1088     return 0;
1089 }
1090
1091 /* display the current picture, if any */
1092 static void video_display(VideoState *is)
1093 {
1094     if (!screen)
1095         video_open(is, 0, NULL);
1096     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1097         video_audio_display(is);
1098     else if (is->video_st)
1099         video_image_display(is);
1100 }
1101
1102 /* get the current audio clock value */
1103 static double get_audio_clock(VideoState *is)
1104 {
1105     if (is->audio_clock_serial != is->audioq.serial)
1106         return NAN;
1107     if (is->paused) {
1108         return is->audio_current_pts;
1109     } else {
1110         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1111     }
1112 }
1113
1114 /* get the current video clock value */
1115 static double get_video_clock(VideoState *is)
1116 {
1117     if (is->video_clock_serial != is->videoq.serial)
1118         return NAN;
1119     if (is->paused) {
1120         return is->video_current_pts;
1121     } else {
1122         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1123     }
1124 }
1125
1126 /* get the current external clock value */
1127 static double get_external_clock(VideoState *is)
1128 {
1129     if (is->paused) {
1130         return is->external_clock;
1131     } else {
1132         double time = av_gettime() / 1000000.0;
1133         return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
1134     }
1135 }
1136
1137 static int get_master_sync_type(VideoState *is) {
1138     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1139         if (is->video_st)
1140             return AV_SYNC_VIDEO_MASTER;
1141         else
1142             return AV_SYNC_AUDIO_MASTER;
1143     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1144         if (is->audio_st)
1145             return AV_SYNC_AUDIO_MASTER;
1146         else
1147             return AV_SYNC_EXTERNAL_CLOCK;
1148     } else {
1149         return AV_SYNC_EXTERNAL_CLOCK;
1150     }
1151 }
1152
1153 /* get the current master clock value */
1154 static double get_master_clock(VideoState *is)
1155 {
1156     double val;
1157
1158     switch (get_master_sync_type(is)) {
1159         case AV_SYNC_VIDEO_MASTER:
1160             val = get_video_clock(is);
1161             break;
1162         case AV_SYNC_AUDIO_MASTER:
1163             val = get_audio_clock(is);
1164             break;
1165         default:
1166             val = get_external_clock(is);
1167             break;
1168     }
1169     return val;
1170 }
1171
1172 static void update_external_clock_pts(VideoState *is, double pts)
1173 {
1174    is->external_clock_time = av_gettime();
1175    is->external_clock = pts;
1176    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1177 }
1178
1179 static void check_external_clock_sync(VideoState *is, double pts) {
1180     double ext_clock = get_external_clock(is);
1181     if (isnan(ext_clock) || fabs(ext_clock - pts) > AV_NOSYNC_THRESHOLD) {
1182         update_external_clock_pts(is, pts);
1183     }
1184 }
1185
1186 static void update_external_clock_speed(VideoState *is, double speed) {
1187     update_external_clock_pts(is, get_external_clock(is));
1188     is->external_clock_speed = speed;
1189 }
1190
1191 static void check_external_clock_speed(VideoState *is) {
1192    if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1193        is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1194        update_external_clock_speed(is, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->external_clock_speed - EXTERNAL_CLOCK_SPEED_STEP));
1195    } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1196               (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1197        update_external_clock_speed(is, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->external_clock_speed + EXTERNAL_CLOCK_SPEED_STEP));
1198    } else {
1199        double speed = is->external_clock_speed;
1200        if (speed != 1.0)
1201            update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1202    }
1203 }
1204
1205 /* seek in the stream */
1206 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1207 {
1208     if (!is->seek_req) {
1209         is->seek_pos = pos;
1210         is->seek_rel = rel;
1211         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1212         if (seek_by_bytes)
1213             is->seek_flags |= AVSEEK_FLAG_BYTE;
1214         is->seek_req = 1;
1215         SDL_CondSignal(is->continue_read_thread);
1216     }
1217 }
1218
1219 /* pause or resume the video */
1220 static void stream_toggle_pause(VideoState *is)
1221 {
1222     if (is->paused) {
1223         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1224         if (is->read_pause_return != AVERROR(ENOSYS)) {
1225             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1226         }
1227         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1228     }
1229     update_external_clock_pts(is, get_external_clock(is));
1230     is->paused = !is->paused;
1231 }
1232
1233 static void toggle_pause(VideoState *is)
1234 {
1235     stream_toggle_pause(is);
1236     is->step = 0;
1237 }
1238
1239 static void step_to_next_frame(VideoState *is)
1240 {
1241     /* if the stream is paused unpause it, then step */
1242     if (is->paused)
1243         stream_toggle_pause(is);
1244     is->step = 1;
1245 }
1246
1247 static double compute_target_delay(double delay, VideoState *is)
1248 {
1249     double sync_threshold, diff;
1250
1251     /* update delay to follow master synchronisation source */
1252     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1253         /* if video is slave, we try to correct big delays by
1254            duplicating or deleting a frame */
1255         diff = get_video_clock(is) - get_master_clock(is);
1256
1257         /* skip or repeat frame. We take into account the
1258            delay to compute the threshold. I still don't know
1259            if it is the best guess */
1260         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1261         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
1262             if (diff <= -sync_threshold)
1263                 delay = 0;
1264             else if (diff >= sync_threshold)
1265                 delay = 2 * delay;
1266         }
1267     }
1268
1269     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1270             delay, -diff);
1271
1272     return delay;
1273 }
1274
1275 static void pictq_next_picture(VideoState *is) {
1276     /* update queue size and signal for next picture */
1277     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1278         is->pictq_rindex = 0;
1279
1280     SDL_LockMutex(is->pictq_mutex);
1281     is->pictq_size--;
1282     SDL_CondSignal(is->pictq_cond);
1283     SDL_UnlockMutex(is->pictq_mutex);
1284 }
1285
1286 static int pictq_prev_picture(VideoState *is) {
1287     VideoPicture *prevvp;
1288     int ret = 0;
1289     /* update queue size and signal for the previous picture */
1290     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1291     if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1292         SDL_LockMutex(is->pictq_mutex);
1293         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1294             if (--is->pictq_rindex == -1)
1295                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1296             is->pictq_size++;
1297             ret = 1;
1298         }
1299         SDL_CondSignal(is->pictq_cond);
1300         SDL_UnlockMutex(is->pictq_mutex);
1301     }
1302     return ret;
1303 }
1304
1305 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1306     double time = av_gettime() / 1000000.0;
1307     /* update current video pts */
1308     is->video_current_pts = pts;
1309     is->video_current_pts_drift = is->video_current_pts - time;
1310     is->video_current_pos = pos;
1311     is->frame_last_pts = pts;
1312     is->video_clock_serial = serial;
1313     if (is->videoq.serial == serial)
1314         check_external_clock_sync(is, is->video_current_pts);
1315 }
1316
1317 /* called to display each frame */
1318 static void video_refresh(void *opaque, double *remaining_time)
1319 {
1320     VideoState *is = opaque;
1321     VideoPicture *vp;
1322     double time;
1323
1324     SubPicture *sp, *sp2;
1325
1326     if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1327         check_external_clock_speed(is);
1328
1329     if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1330         time = av_gettime() / 1000000.0;
1331         if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1332             video_display(is);
1333             is->last_vis_time = time;
1334         }
1335         *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1336     }
1337
1338     if (is->video_st) {
1339         int redisplay = 0;
1340         if (is->force_refresh)
1341             redisplay = pictq_prev_picture(is);
1342 retry:
1343         if (is->pictq_size == 0) {
1344             SDL_LockMutex(is->pictq_mutex);
1345             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1346                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, 0);
1347                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1348             }
1349             SDL_UnlockMutex(is->pictq_mutex);
1350             // nothing to do, no picture to display in the queue
1351         } else {
1352             double last_duration, duration, delay;
1353             /* dequeue the picture */
1354             vp = &is->pictq[is->pictq_rindex];
1355
1356             if (vp->serial != is->videoq.serial) {
1357                 pictq_next_picture(is);
1358                 redisplay = 0;
1359                 goto retry;
1360             }
1361
1362             if (is->paused)
1363                 goto display;
1364
1365             /* compute nominal last_duration */
1366             last_duration = vp->pts - is->frame_last_pts;
1367             if (last_duration > 0 && last_duration < is->max_frame_duration) {
1368                 /* if duration of the last frame was sane, update last_duration in video state */
1369                 is->frame_last_duration = last_duration;
1370             }
1371             delay = compute_target_delay(is->frame_last_duration, is);
1372
1373             time= av_gettime()/1000000.0;
1374             if (time < is->frame_timer + delay) {
1375                 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1376                 return;
1377             }
1378
1379             if (delay > 0)
1380                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1381
1382             SDL_LockMutex(is->pictq_mutex);
1383             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1384             SDL_UnlockMutex(is->pictq_mutex);
1385
1386             if (is->pictq_size > 1) {
1387                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1388                 duration = nextvp->pts - vp->pts;
1389                 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1390                     if (!redisplay)
1391                         is->frame_drops_late++;
1392                     pictq_next_picture(is);
1393                     redisplay = 0;
1394                     goto retry;
1395                 }
1396             }
1397
1398             if (is->subtitle_st) {
1399                 if (is->subtitle_stream_changed) {
1400                     SDL_LockMutex(is->subpq_mutex);
1401
1402                     while (is->subpq_size) {
1403                         free_subpicture(&is->subpq[is->subpq_rindex]);
1404
1405                         /* update queue size and signal for next picture */
1406                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1407                             is->subpq_rindex = 0;
1408
1409                         is->subpq_size--;
1410                     }
1411                     is->subtitle_stream_changed = 0;
1412
1413                     SDL_CondSignal(is->subpq_cond);
1414                     SDL_UnlockMutex(is->subpq_mutex);
1415                 } else {
1416                     if (is->subpq_size > 0) {
1417                         sp = &is->subpq[is->subpq_rindex];
1418
1419                         if (is->subpq_size > 1)
1420                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1421                         else
1422                             sp2 = NULL;
1423
1424                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1425                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1426                         {
1427                             free_subpicture(sp);
1428
1429                             /* update queue size and signal for next picture */
1430                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1431                                 is->subpq_rindex = 0;
1432
1433                             SDL_LockMutex(is->subpq_mutex);
1434                             is->subpq_size--;
1435                             SDL_CondSignal(is->subpq_cond);
1436                             SDL_UnlockMutex(is->subpq_mutex);
1437                         }
1438                     }
1439                 }
1440             }
1441
1442 display:
1443             /* display picture */
1444             if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1445                 video_display(is);
1446
1447             pictq_next_picture(is);
1448
1449             if (is->step && !is->paused)
1450                 stream_toggle_pause(is);
1451         }
1452     }
1453     is->force_refresh = 0;
1454     if (show_status) {
1455         static int64_t last_time;
1456         int64_t cur_time;
1457         int aqsize, vqsize, sqsize;
1458         double av_diff;
1459
1460         cur_time = av_gettime();
1461         if (!last_time || (cur_time - last_time) >= 30000) {
1462             aqsize = 0;
1463             vqsize = 0;
1464             sqsize = 0;
1465             if (is->audio_st)
1466                 aqsize = is->audioq.size;
1467             if (is->video_st)
1468                 vqsize = is->videoq.size;
1469             if (is->subtitle_st)
1470                 sqsize = is->subtitleq.size;
1471             av_diff = 0;
1472             if (is->audio_st && is->video_st)
1473                 av_diff = get_audio_clock(is) - get_video_clock(is);
1474             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1475                    get_master_clock(is),
1476                    av_diff,
1477                    is->frame_drops_early + is->frame_drops_late,
1478                    aqsize / 1024,
1479                    vqsize / 1024,
1480                    sqsize,
1481                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1482                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1483             fflush(stdout);
1484             last_time = cur_time;
1485         }
1486     }
1487 }
1488
1489 /* allocate a picture (needs to do that in main thread to avoid
1490    potential locking problems */
1491 static void alloc_picture(VideoState *is)
1492 {
1493     VideoPicture *vp;
1494
1495     vp = &is->pictq[is->pictq_windex];
1496
1497     if (vp->bmp)
1498         SDL_FreeYUVOverlay(vp->bmp);
1499
1500     video_open(is, 0, vp);
1501
1502     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1503                                    SDL_YV12_OVERLAY,
1504                                    screen);
1505     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1506         /* SDL allocates a buffer smaller than requested if the video
1507          * overlay hardware is unable to support the requested size. */
1508         fprintf(stderr, "Error: the video system does not support an image\n"
1509                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1510                         "to reduce the image size.\n", vp->width, vp->height );
1511         do_exit(is);
1512     }
1513
1514     SDL_LockMutex(is->pictq_mutex);
1515     vp->allocated = 1;
1516     SDL_CondSignal(is->pictq_cond);
1517     SDL_UnlockMutex(is->pictq_mutex);
1518 }
1519
1520 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1521     int i, width, height;
1522     Uint8 *p, *maxp;
1523     for (i = 0; i < 3; i++) {
1524         width  = bmp->w;
1525         height = bmp->h;
1526         if (i > 0) {
1527             width  >>= 1;
1528             height >>= 1;
1529         }
1530         if (bmp->pitches[i] > width) {
1531             maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1532             for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1533                 *(p+1) = *p;
1534         }
1535     }
1536 }
1537
1538 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1539 {
1540     VideoPicture *vp;
1541
1542 #if defined(DEBUG_SYNC) && 0
1543     printf("frame_type=%c pts=%0.3f\n",
1544            av_get_picture_type_char(src_frame->pict_type), pts);
1545 #endif
1546
1547     /* wait until we have space to put a new picture */
1548     SDL_LockMutex(is->pictq_mutex);
1549
1550     /* keep the last already displayed picture in the queue */
1551     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1552            !is->videoq.abort_request) {
1553         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1554     }
1555     SDL_UnlockMutex(is->pictq_mutex);
1556
1557     if (is->videoq.abort_request)
1558         return -1;
1559
1560     vp = &is->pictq[is->pictq_windex];
1561
1562 #if CONFIG_AVFILTER
1563     vp->sar = src_frame->sample_aspect_ratio;
1564 #else
1565     vp->sar = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1566 #endif
1567
1568     /* alloc or resize hardware picture buffer */
1569     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1570         vp->width  != src_frame->width ||
1571         vp->height != src_frame->height) {
1572         SDL_Event event;
1573
1574         vp->allocated  = 0;
1575         vp->reallocate = 0;
1576         vp->width = src_frame->width;
1577         vp->height = src_frame->height;
1578
1579         /* the allocation must be done in the main thread to avoid
1580            locking problems. */
1581         event.type = FF_ALLOC_EVENT;
1582         event.user.data1 = is;
1583         SDL_PushEvent(&event);
1584
1585         /* wait until the picture is allocated */
1586         SDL_LockMutex(is->pictq_mutex);
1587         while (!vp->allocated && !is->videoq.abort_request) {
1588             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1589         }
1590         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1591         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1592             while (!vp->allocated) {
1593                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1594             }
1595         }
1596         SDL_UnlockMutex(is->pictq_mutex);
1597
1598         if (is->videoq.abort_request)
1599             return -1;
1600     }
1601
1602     /* if the frame is not skipped, then display it */
1603     if (vp->bmp) {
1604         AVPicture pict = { { 0 } };
1605
1606         /* get a pointer on the bitmap */
1607         SDL_LockYUVOverlay (vp->bmp);
1608
1609         pict.data[0] = vp->bmp->pixels[0];
1610         pict.data[1] = vp->bmp->pixels[2];
1611         pict.data[2] = vp->bmp->pixels[1];
1612
1613         pict.linesize[0] = vp->bmp->pitches[0];
1614         pict.linesize[1] = vp->bmp->pitches[2];
1615         pict.linesize[2] = vp->bmp->pitches[1];
1616
1617 #if CONFIG_AVFILTER
1618         // FIXME use direct rendering
1619         av_picture_copy(&pict, (AVPicture *)src_frame,
1620                         src_frame->format, vp->width, vp->height);
1621 #else
1622         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1623         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1624             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1625             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1626         if (is->img_convert_ctx == NULL) {
1627             fprintf(stderr, "Cannot initialize the conversion context\n");
1628             exit(1);
1629         }
1630         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1631                   0, vp->height, pict.data, pict.linesize);
1632 #endif
1633         /* workaround SDL PITCH_WORKAROUND */
1634         duplicate_right_border_pixels(vp->bmp);
1635         /* update the bitmap content */
1636         SDL_UnlockYUVOverlay(vp->bmp);
1637
1638         vp->pts = pts;
1639         vp->pos = pos;
1640         vp->serial = serial;
1641
1642         /* now we can update the picture count */
1643         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1644             is->pictq_windex = 0;
1645         SDL_LockMutex(is->pictq_mutex);
1646         is->pictq_size++;
1647         SDL_UnlockMutex(is->pictq_mutex);
1648     }
1649     return 0;
1650 }
1651
1652 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1653 {
1654     int got_picture;
1655
1656     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1657         return -1;
1658
1659     if (pkt->data == flush_pkt.data) {
1660         avcodec_flush_buffers(is->video_st->codec);
1661
1662         SDL_LockMutex(is->pictq_mutex);
1663         // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1664         while (is->pictq_size && !is->videoq.abort_request) {
1665             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1666         }
1667         is->video_current_pos = -1;
1668         is->frame_last_pts = AV_NOPTS_VALUE;
1669         is->frame_last_duration = 0;
1670         is->frame_timer = (double)av_gettime() / 1000000.0;
1671         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1672         SDL_UnlockMutex(is->pictq_mutex);
1673         return 0;
1674     }
1675
1676     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1677         return 0;
1678
1679     if (got_picture) {
1680         int ret = 1;
1681
1682         if (decoder_reorder_pts == -1) {
1683             *pts = av_frame_get_best_effort_timestamp(frame);
1684         } else if (decoder_reorder_pts) {
1685             *pts = frame->pkt_pts;
1686         } else {
1687             *pts = frame->pkt_dts;
1688         }
1689
1690         if (*pts == AV_NOPTS_VALUE) {
1691             *pts = 0;
1692         }
1693
1694         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1695             SDL_LockMutex(is->pictq_mutex);
1696             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1697                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1698                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1699                 double ptsdiff = dpts - is->frame_last_pts;
1700                 if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1701                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1702                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1703                     is->frame_last_dropped_pos = pkt->pos;
1704                     is->frame_last_dropped_pts = dpts;
1705                     is->frame_drops_early++;
1706                     ret = 0;
1707                 }
1708             }
1709             SDL_UnlockMutex(is->pictq_mutex);
1710         }
1711
1712         return ret;
1713     }
1714     return 0;
1715 }
1716
1717 #if CONFIG_AVFILTER
1718 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1719                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1720 {
1721     int ret;
1722     AVFilterInOut *outputs = NULL, *inputs = NULL;
1723
1724     if (filtergraph) {
1725         outputs = avfilter_inout_alloc();
1726         inputs  = avfilter_inout_alloc();
1727         if (!outputs || !inputs) {
1728             ret = AVERROR(ENOMEM);
1729             goto fail;
1730         }
1731
1732         outputs->name       = av_strdup("in");
1733         outputs->filter_ctx = source_ctx;
1734         outputs->pad_idx    = 0;
1735         outputs->next       = NULL;
1736
1737         inputs->name        = av_strdup("out");
1738         inputs->filter_ctx  = sink_ctx;
1739         inputs->pad_idx     = 0;
1740         inputs->next        = NULL;
1741
1742         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1743             goto fail;
1744     } else {
1745         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1746             goto fail;
1747     }
1748
1749     ret = avfilter_graph_config(graph, NULL);
1750 fail:
1751     avfilter_inout_free(&outputs);
1752     avfilter_inout_free(&inputs);
1753     return ret;
1754 }
1755
1756 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1757 {
1758     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1759     char sws_flags_str[128];
1760     char buffersrc_args[256];
1761     int ret;
1762     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1763     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1764     AVCodecContext *codec = is->video_st->codec;
1765
1766     if (!buffersink_params)
1767         return AVERROR(ENOMEM);
1768
1769     av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1770     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1771     graph->scale_sws_opts = av_strdup(sws_flags_str);
1772
1773     snprintf(buffersrc_args, sizeof(buffersrc_args),
1774              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1775              frame->width, frame->height, frame->format,
1776              is->video_st->time_base.num, is->video_st->time_base.den,
1777              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1778
1779     if ((ret = avfilter_graph_create_filter(&filt_src,
1780                                             avfilter_get_by_name("buffer"),
1781                                             "ffplay_buffer", buffersrc_args, NULL,
1782                                             graph)) < 0)
1783         goto fail;
1784
1785     buffersink_params->pixel_fmts = pix_fmts;
1786     ret = avfilter_graph_create_filter(&filt_out,
1787                                        avfilter_get_by_name("buffersink"),
1788                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1789     if (ret < 0)
1790         goto fail;
1791
1792     /* SDL YUV code is not handling odd width/height for some driver
1793      * combinations, therefore we crop the picture to an even width/height. */
1794     if ((ret = avfilter_graph_create_filter(&filt_crop,
1795                                             avfilter_get_by_name("crop"),
1796                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1797         goto fail;
1798     if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1799         goto fail;
1800
1801     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1802         goto fail;
1803
1804     is->in_video_filter  = filt_src;
1805     is->out_video_filter = filt_out;
1806
1807 fail:
1808     av_freep(&buffersink_params);
1809     return ret;
1810 }
1811
1812 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1813 {
1814     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, PIX_FMT_NONE };
1815     int sample_rates[2] = { 0, -1 };
1816     int64_t channel_layouts[2] = { 0, -1 };
1817     int channels[2] = { 0, -1 };
1818     AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1819     char asrc_args[256];
1820     AVABufferSinkParams *asink_params = NULL;
1821     int ret;
1822
1823     avfilter_graph_free(&is->agraph);
1824     if (!(is->agraph = avfilter_graph_alloc()))
1825         return AVERROR(ENOMEM);
1826
1827     ret = snprintf(asrc_args, sizeof(asrc_args),
1828                    "sample_rate=%d:sample_fmt=%s:channels=%d",
1829                    is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1830                    is->audio_filter_src.channels);
1831     if (is->audio_filter_src.channel_layout)
1832         snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1833                  ":channel_layout=0x%"PRIx64,  is->audio_filter_src.channel_layout);
1834
1835     ret = avfilter_graph_create_filter(&filt_asrc,
1836                                        avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1837                                        asrc_args, NULL, is->agraph);
1838     if (ret < 0)
1839         goto end;
1840
1841     if (!(asink_params = av_abuffersink_params_alloc())) {
1842         ret = AVERROR(ENOMEM);
1843         goto end;
1844     }
1845     asink_params->sample_fmts = sample_fmts;
1846
1847     asink_params->all_channel_counts = 1;
1848     if (force_output_format) {
1849         channel_layouts[0] = is->audio_tgt.channel_layout;
1850         asink_params->channel_layouts = channel_layouts;
1851         asink_params->all_channel_counts = 0;
1852         channels[0] = is->audio_tgt.channels;
1853         asink_params->channel_counts = channels;
1854         asink_params->all_channel_counts = 0;
1855         sample_rates[0] = is->audio_tgt.freq;
1856         asink_params->sample_rates = sample_rates;
1857     }
1858
1859     ret = avfilter_graph_create_filter(&filt_asink,
1860                                        avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1861                                        NULL, asink_params, is->agraph);
1862     if (ret < 0)
1863         goto end;
1864
1865     if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1866         goto end;
1867
1868     is->in_audio_filter  = filt_asrc;
1869     is->out_audio_filter = filt_asink;
1870
1871 end:
1872     av_freep(&asink_params);
1873     if (ret < 0)
1874         avfilter_graph_free(&is->agraph);
1875     return ret;
1876 }
1877 #endif  /* CONFIG_AVFILTER */
1878
1879 static int video_thread(void *arg)
1880 {
1881     AVPacket pkt = { 0 };
1882     VideoState *is = arg;
1883     AVFrame *frame = av_frame_alloc();
1884     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1885     double pts;
1886     int ret;
1887     int serial = 0;
1888
1889 #if CONFIG_AVFILTER
1890     AVFilterGraph *graph = avfilter_graph_alloc();
1891     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1892     int last_w = 0;
1893     int last_h = 0;
1894     enum AVPixelFormat last_format = -2;
1895     int last_serial = -1;
1896 #endif
1897
1898     for (;;) {
1899 #if CONFIG_AVFILTER
1900         AVRational tb;
1901 #endif
1902         while (is->paused && !is->videoq.abort_request)
1903             SDL_Delay(10);
1904
1905         avcodec_get_frame_defaults(frame);
1906         av_free_packet(&pkt);
1907
1908         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1909         if (ret < 0)
1910             goto the_end;
1911
1912         if (!ret)
1913             continue;
1914
1915 #if CONFIG_AVFILTER
1916         if (   last_w != frame->width
1917             || last_h != frame->height
1918             || last_format != frame->format
1919             || last_serial != serial) {
1920             av_log(NULL, AV_LOG_DEBUG,
1921                    "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1922                    last_w, last_h,
1923                    (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1924                    frame->width, frame->height,
1925                    (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1926             avfilter_graph_free(&graph);
1927             graph = avfilter_graph_alloc();
1928             if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1929                 SDL_Event event;
1930                 event.type = FF_QUIT_EVENT;
1931                 event.user.data1 = is;
1932                 SDL_PushEvent(&event);
1933                 av_free_packet(&pkt);
1934                 goto the_end;
1935             }
1936             filt_in  = is->in_video_filter;
1937             filt_out = is->out_video_filter;
1938             last_w = frame->width;
1939             last_h = frame->height;
1940             last_format = frame->format;
1941             last_serial = serial;
1942         }
1943
1944         frame->pts = pts_int;
1945         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1946         ret = av_buffersrc_add_frame(filt_in, frame);
1947         if (ret < 0)
1948             goto the_end;
1949         av_frame_unref(frame);
1950         avcodec_get_frame_defaults(frame);
1951         av_free_packet(&pkt);
1952
1953         while (ret >= 0) {
1954             is->frame_last_returned_time = av_gettime() / 1000000.0;
1955
1956             ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1957             if (ret < 0) {
1958                 ret = 0;
1959                 break;
1960             }
1961
1962             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1963             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1964                 is->frame_last_filter_delay = 0;
1965
1966             pts_int = frame->pts;
1967             tb      = filt_out->inputs[0]->time_base;
1968             pos     = av_frame_get_pkt_pos(frame);
1969             if (av_cmp_q(tb, is->video_st->time_base)) {
1970                 av_unused int64_t pts1 = pts_int;
1971                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1972                 av_dlog(NULL, "video_thread(): "
1973                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1974                         tb.num, tb.den, pts1,
1975                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1976             }
1977             pts = pts_int * av_q2d(is->video_st->time_base);
1978             ret = queue_picture(is, frame, pts, pos, serial);
1979             av_frame_unref(frame);
1980         }
1981 #else
1982         pts = pts_int * av_q2d(is->video_st->time_base);
1983         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1984         av_frame_unref(frame);
1985 #endif
1986
1987         if (ret < 0)
1988             goto the_end;
1989     }
1990  the_end:
1991     avcodec_flush_buffers(is->video_st->codec);
1992 #if CONFIG_AVFILTER
1993     avfilter_graph_free(&graph);
1994 #endif
1995     av_free_packet(&pkt);
1996     av_frame_free(&frame);
1997     return 0;
1998 }
1999
2000 static int subtitle_thread(void *arg)
2001 {
2002     VideoState *is = arg;
2003     SubPicture *sp;
2004     AVPacket pkt1, *pkt = &pkt1;
2005     int got_subtitle;
2006     double pts;
2007     int i, j;
2008     int r, g, b, y, u, v, a;
2009
2010     for (;;) {
2011         while (is->paused && !is->subtitleq.abort_request) {
2012             SDL_Delay(10);
2013         }
2014         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
2015             break;
2016
2017         if (pkt->data == flush_pkt.data) {
2018             avcodec_flush_buffers(is->subtitle_st->codec);
2019             continue;
2020         }
2021         SDL_LockMutex(is->subpq_mutex);
2022         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2023                !is->subtitleq.abort_request) {
2024             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2025         }
2026         SDL_UnlockMutex(is->subpq_mutex);
2027
2028         if (is->subtitleq.abort_request)
2029             return 0;
2030
2031         sp = &is->subpq[is->subpq_windex];
2032
2033        /* NOTE: ipts is the PTS of the _first_ picture beginning in
2034            this packet, if any */
2035         pts = 0;
2036         if (pkt->pts != AV_NOPTS_VALUE)
2037             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2038
2039         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2040                                  &got_subtitle, pkt);
2041         if (got_subtitle && sp->sub.format == 0) {
2042             if (sp->sub.pts != AV_NOPTS_VALUE)
2043                 pts = sp->sub.pts / (double)AV_TIME_BASE;
2044             sp->pts = pts;
2045
2046             for (i = 0; i < sp->sub.num_rects; i++)
2047             {
2048                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2049                 {
2050                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2051                     y = RGB_TO_Y_CCIR(r, g, b);
2052                     u = RGB_TO_U_CCIR(r, g, b, 0);
2053                     v = RGB_TO_V_CCIR(r, g, b, 0);
2054                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2055                 }
2056             }
2057
2058             /* now we can update the picture count */
2059             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2060                 is->subpq_windex = 0;
2061             SDL_LockMutex(is->subpq_mutex);
2062             is->subpq_size++;
2063             SDL_UnlockMutex(is->subpq_mutex);
2064         }
2065         av_free_packet(pkt);
2066     }
2067     return 0;
2068 }
2069
2070 /* copy samples for viewing in editor window */
2071 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2072 {
2073     int size, len;
2074
2075     size = samples_size / sizeof(short);
2076     while (size > 0) {
2077         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2078         if (len > size)
2079             len = size;
2080         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2081         samples += len;
2082         is->sample_array_index += len;
2083         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2084             is->sample_array_index = 0;
2085         size -= len;
2086     }
2087 }
2088
2089 /* return the wanted number of samples to get better sync if sync_type is video
2090  * or external master clock */
2091 static int synchronize_audio(VideoState *is, int nb_samples)
2092 {
2093     int wanted_nb_samples = nb_samples;
2094
2095     /* if not master, then we try to remove or add samples to correct the clock */
2096     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2097         double diff, avg_diff;
2098         int min_nb_samples, max_nb_samples;
2099
2100         diff = get_audio_clock(is) - get_master_clock(is);
2101
2102         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2103             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2104             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2105                 /* not enough measures to have a correct estimate */
2106                 is->audio_diff_avg_count++;
2107             } else {
2108                 /* estimate the A-V difference */
2109                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2110
2111                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2112                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2113                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2114                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2115                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2116                 }
2117                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2118                         diff, avg_diff, wanted_nb_samples - nb_samples,
2119                         is->audio_clock, is->audio_diff_threshold);
2120             }
2121         } else {
2122             /* too big difference : may be initial PTS errors, so
2123                reset A-V filter */
2124             is->audio_diff_avg_count = 0;
2125             is->audio_diff_cum       = 0;
2126         }
2127     }
2128
2129     return wanted_nb_samples;
2130 }
2131
2132 /**
2133  * Decode one audio frame and return its uncompressed size.
2134  *
2135  * The processed audio frame is decoded, converted if required, and
2136  * stored in is->audio_buf, with size in bytes given by the return
2137  * value.
2138  */
2139 static int audio_decode_frame(VideoState *is)
2140 {
2141     AVPacket *pkt_temp = &is->audio_pkt_temp;
2142     AVPacket *pkt = &is->audio_pkt;
2143     AVCodecContext *dec = is->audio_st->codec;
2144     int len1, data_size, resampled_data_size;
2145     int64_t dec_channel_layout;
2146     int got_frame;
2147     av_unused double audio_clock0;
2148     int new_packet = 0;
2149     int flush_complete = 0;
2150     int wanted_nb_samples;
2151     AVRational tb;
2152
2153     for (;;) {
2154         /* NOTE: the audio packet can contain several frames */
2155         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2156             if (!is->frame) {
2157                 if (!(is->frame = avcodec_alloc_frame()))
2158                     return AVERROR(ENOMEM);
2159             } else {
2160                 av_frame_unref(is->frame);
2161                 avcodec_get_frame_defaults(is->frame);
2162             }
2163
2164             if (is->audioq.serial != is->audio_pkt_temp_serial)
2165                 break;
2166
2167             if (is->paused)
2168                 return -1;
2169
2170             if (flush_complete)
2171                 break;
2172             new_packet = 0;
2173             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2174             if (len1 < 0) {
2175                 /* if error, we skip the frame */
2176                 pkt_temp->size = 0;
2177                 break;
2178             }
2179
2180             pkt_temp->data += len1;
2181             pkt_temp->size -= len1;
2182
2183             if (!got_frame) {
2184                 /* stop sending empty packets if the decoder is finished */
2185                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2186                     flush_complete = 1;
2187                 continue;
2188             }
2189
2190             if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
2191                 is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, dec->time_base);
2192             if (pkt_temp->pts != AV_NOPTS_VALUE)
2193                 pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2194             tb = dec->time_base;
2195
2196 #if CONFIG_AVFILTER
2197             {
2198                 int ret;
2199                 int reconfigure;
2200
2201                 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2202
2203                 reconfigure =
2204                     cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2205                                    is->frame->format, av_frame_get_channels(is->frame))    ||
2206                     is->audio_filter_src.channel_layout != dec_channel_layout ||
2207                     is->audio_filter_src.freq           != is->frame->sample_rate ||
2208                     is->audio_pkt_temp_serial           != is->audio_last_serial;
2209
2210                 if (reconfigure) {
2211                     char buf1[1024], buf2[1024];
2212                     av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2213                     av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2214                     av_log(NULL, AV_LOG_DEBUG,
2215                            "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2216                            is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2217                            is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2218
2219                     is->audio_filter_src.fmt            = is->frame->format;
2220                     is->audio_filter_src.channels       = av_frame_get_channels(is->frame);
2221                     is->audio_filter_src.channel_layout = dec_channel_layout;
2222                     is->audio_filter_src.freq           = is->frame->sample_rate;
2223                     is->audio_last_serial               = is->audio_pkt_temp_serial;
2224
2225                     if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2226                         return ret;
2227                 }
2228
2229                 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2230                     return ret;
2231                 av_frame_unref(is->frame);
2232                 if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0)
2233                     return ret;
2234                 tb = is->out_audio_filter->inputs[0]->time_base;
2235             }
2236 #endif
2237
2238             data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2239                                                    is->frame->nb_samples,
2240                                                    is->frame->format, 1);
2241
2242             dec_channel_layout =
2243                 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2244                 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2245             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2246
2247             if (is->frame->format        != is->audio_src.fmt            ||
2248                 dec_channel_layout       != is->audio_src.channel_layout ||
2249                 is->frame->sample_rate   != is->audio_src.freq           ||
2250                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2251                 swr_free(&is->swr_ctx);
2252                 is->swr_ctx = swr_alloc_set_opts(NULL,
2253                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2254                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2255                                                  0, NULL);
2256                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2257                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2258                             is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2259                             is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2260                     break;
2261                 }
2262                 is->audio_src.channel_layout = dec_channel_layout;
2263                 is->audio_src.channels       = av_frame_get_channels(is->frame);
2264                 is->audio_src.freq = is->frame->sample_rate;
2265                 is->audio_src.fmt = is->frame->format;
2266             }
2267
2268             if (is->swr_ctx) {
2269                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2270                 uint8_t **out = &is->audio_buf1;
2271                 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2272                 int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2273                 int len2;
2274                 if (wanted_nb_samples != is->frame->nb_samples) {
2275                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2276                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2277                         fprintf(stderr, "swr_set_compensation() failed\n");
2278                         break;
2279                     }
2280                 }
2281                 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2282                 if (!is->audio_buf1)
2283                     return AVERROR(ENOMEM);
2284                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2285                 if (len2 < 0) {
2286                     fprintf(stderr, "swr_convert() failed\n");
2287                     break;
2288                 }
2289                 if (len2 == out_count) {
2290                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2291                     swr_init(is->swr_ctx);
2292                 }
2293                 is->audio_buf = is->audio_buf1;
2294                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2295             } else {
2296                 is->audio_buf = is->frame->data[0];
2297                 resampled_data_size = data_size;
2298             }
2299
2300             audio_clock0 = is->audio_clock;
2301             /* update the audio clock with the pts */
2302             if (is->frame->pts != AV_NOPTS_VALUE) {
2303                 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2304                 is->audio_clock_serial = is->audio_pkt_temp_serial;
2305             }
2306 #ifdef DEBUG
2307             {
2308                 static double last_clock;
2309                 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2310                        is->audio_clock - last_clock,
2311                        is->audio_clock, audio_clock0);
2312                 last_clock = is->audio_clock;
2313             }
2314 #endif
2315             return resampled_data_size;
2316         }
2317
2318         /* free the current packet */
2319         if (pkt->data)
2320             av_free_packet(pkt);
2321         memset(pkt_temp, 0, sizeof(*pkt_temp));
2322
2323         if (is->audioq.abort_request) {
2324             return -1;
2325         }
2326
2327         if (is->audioq.nb_packets == 0)
2328             SDL_CondSignal(is->continue_read_thread);
2329
2330         /* read next packet */
2331         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2332             return -1;
2333
2334         if (pkt->data == flush_pkt.data) {
2335             avcodec_flush_buffers(dec);
2336             flush_complete = 0;
2337         }
2338
2339         *pkt_temp = *pkt;
2340     }
2341 }
2342
2343 /* prepare a new audio buffer */
2344 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2345 {
2346     VideoState *is = opaque;
2347     int audio_size, len1;
2348     int bytes_per_sec;
2349     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2350
2351     audio_callback_time = av_gettime();
2352
2353     while (len > 0) {
2354         if (is->audio_buf_index >= is->audio_buf_size) {
2355            audio_size = audio_decode_frame(is);
2356            if (audio_size < 0) {
2357                 /* if error, just output silence */
2358                is->audio_buf      = is->silence_buf;
2359                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2360            } else {
2361                if (is->show_mode != SHOW_MODE_VIDEO)
2362                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2363                is->audio_buf_size = audio_size;
2364            }
2365            is->audio_buf_index = 0;
2366         }
2367         len1 = is->audio_buf_size - is->audio_buf_index;
2368         if (len1 > len)
2369             len1 = len;
2370         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2371         len -= len1;
2372         stream += len1;
2373         is->audio_buf_index += len1;
2374     }
2375     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2376     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2377     /* Let's assume the audio driver that is used by SDL has two periods. */
2378     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2379     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2380     if (is->audioq.serial == is->audio_clock_serial)
2381         check_external_clock_sync(is, is->audio_current_pts);
2382 }
2383
2384 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2385 {
2386     SDL_AudioSpec wanted_spec, spec;
2387     const char *env;
2388     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2389
2390     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2391     if (env) {
2392         wanted_nb_channels = atoi(env);
2393         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2394     }
2395     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2396         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2397         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2398     }
2399     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2400     wanted_spec.freq = wanted_sample_rate;
2401     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2402         fprintf(stderr, "Invalid sample rate or channel count!\n");
2403         return -1;
2404     }
2405     wanted_spec.format = AUDIO_S16SYS;
2406     wanted_spec.silence = 0;
2407     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2408     wanted_spec.callback = sdl_audio_callback;
2409     wanted_spec.userdata = opaque;
2410     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2411         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2412         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2413         if (!wanted_spec.channels) {
2414             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2415             return -1;
2416         }
2417         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2418     }
2419     if (spec.format != AUDIO_S16SYS) {
2420         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2421         return -1;
2422     }
2423     if (spec.channels != wanted_spec.channels) {
2424         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2425         if (!wanted_channel_layout) {
2426             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2427             return -1;
2428         }
2429     }
2430
2431     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2432     audio_hw_params->freq = spec.freq;
2433     audio_hw_params->channel_layout = wanted_channel_layout;
2434     audio_hw_params->channels =  spec.channels;
2435     return spec.size;
2436 }
2437
2438 /* open a given stream. Return 0 if OK */
2439 static int stream_component_open(VideoState *is, int stream_index)
2440 {
2441     AVFormatContext *ic = is->ic;
2442     AVCodecContext *avctx;
2443     AVCodec *codec;
2444     const char *forced_codec_name = NULL;
2445     AVDictionary *opts;
2446     AVDictionaryEntry *t = NULL;
2447     int sample_rate, nb_channels;
2448     int64_t channel_layout;
2449     int ret;
2450
2451     if (stream_index < 0 || stream_index >= ic->nb_streams)
2452         return -1;
2453     avctx = ic->streams[stream_index]->codec;
2454
2455     codec = avcodec_find_decoder(avctx->codec_id);
2456
2457     switch(avctx->codec_type){
2458         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; forced_codec_name =    audio_codec_name; break;
2459         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2460         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; forced_codec_name =    video_codec_name; break;
2461     }
2462     if (forced_codec_name)
2463         codec = avcodec_find_decoder_by_name(forced_codec_name);
2464     if (!codec) {
2465         if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2466         else                   fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2467         return -1;
2468     }
2469
2470     avctx->codec_id = codec->id;
2471     avctx->workaround_bugs   = workaround_bugs;
2472     avctx->lowres            = lowres;
2473     if(avctx->lowres > codec->max_lowres){
2474         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2475                 codec->max_lowres);
2476         avctx->lowres= codec->max_lowres;
2477     }
2478     avctx->idct_algo         = idct;
2479     avctx->skip_frame        = skip_frame;
2480     avctx->skip_idct         = skip_idct;
2481     avctx->skip_loop_filter  = skip_loop_filter;
2482     avctx->error_concealment = error_concealment;
2483
2484     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2485     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2486     if(codec->capabilities & CODEC_CAP_DR1)
2487         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2488
2489     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2490     if (!av_dict_get(opts, "threads", NULL, 0))
2491         av_dict_set(&opts, "threads", "auto", 0);
2492     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2493         av_dict_set(&opts, "refcounted_frames", "1", 0);
2494     if (avcodec_open2(avctx, codec, &opts) < 0)
2495         return -1;
2496     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2497         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2498         return AVERROR_OPTION_NOT_FOUND;
2499     }
2500
2501     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2502     switch (avctx->codec_type) {
2503     case AVMEDIA_TYPE_AUDIO:
2504 #if CONFIG_AVFILTER
2505         {
2506             AVFilterLink *link;
2507
2508             is->audio_filter_src.freq           = avctx->sample_rate;
2509             is->audio_filter_src.channels       = avctx->channels;
2510             is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2511             is->audio_filter_src.fmt            = avctx->sample_fmt;
2512             if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2513                 return ret;
2514             link = is->out_audio_filter->inputs[0];
2515             sample_rate    = link->sample_rate;
2516             nb_channels    = link->channels;
2517             channel_layout = link->channel_layout;
2518         }
2519 #else
2520         sample_rate    = avctx->sample_rate;
2521         nb_channels    = avctx->channels;
2522         channel_layout = avctx->channel_layout;
2523 #endif
2524
2525         /* prepare audio output */
2526         if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2527             return ret;
2528         is->audio_hw_buf_size = ret;
2529         is->audio_src = is->audio_tgt;
2530         is->audio_buf_size  = 0;
2531         is->audio_buf_index = 0;
2532
2533         /* init averaging filter */
2534         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2535         is->audio_diff_avg_count = 0;
2536         /* since we do not have a precise anough audio fifo fullness,
2537            we correct audio sync only if larger than this threshold */
2538         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2539
2540         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2541         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2542
2543         is->audio_stream = stream_index;
2544         is->audio_st = ic->streams[stream_index];
2545
2546         packet_queue_start(&is->audioq);
2547         SDL_PauseAudio(0);
2548         break;
2549     case AVMEDIA_TYPE_VIDEO:
2550         is->video_stream = stream_index;
2551         is->video_st = ic->streams[stream_index];
2552
2553         packet_queue_start(&is->videoq);
2554         is->video_tid = SDL_CreateThread(video_thread, is);
2555         break;
2556     case AVMEDIA_TYPE_SUBTITLE:
2557         is->subtitle_stream = stream_index;
2558         is->subtitle_st = ic->streams[stream_index];
2559         packet_queue_start(&is->subtitleq);
2560
2561         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2562         break;
2563     default:
2564         break;
2565     }
2566     return 0;
2567 }
2568
2569 static void stream_component_close(VideoState *is, int stream_index)
2570 {
2571     AVFormatContext *ic = is->ic;
2572     AVCodecContext *avctx;
2573
2574     if (stream_index < 0 || stream_index >= ic->nb_streams)
2575         return;
2576     avctx = ic->streams[stream_index]->codec;
2577
2578     switch (avctx->codec_type) {
2579     case AVMEDIA_TYPE_AUDIO:
2580         packet_queue_abort(&is->audioq);
2581
2582         SDL_CloseAudio();
2583
2584         packet_queue_flush(&is->audioq);
2585         av_free_packet(&is->audio_pkt);
2586         swr_free(&is->swr_ctx);
2587         av_freep(&is->audio_buf1);
2588         is->audio_buf1_size = 0;
2589         is->audio_buf = NULL;
2590         av_frame_free(&is->frame);
2591
2592         if (is->rdft) {
2593             av_rdft_end(is->rdft);
2594             av_freep(&is->rdft_data);
2595             is->rdft = NULL;
2596             is->rdft_bits = 0;
2597         }
2598 #if CONFIG_AVFILTER
2599         avfilter_graph_free(&is->agraph);
2600 #endif
2601         break;
2602     case AVMEDIA_TYPE_VIDEO:
2603         packet_queue_abort(&is->videoq);
2604
2605         /* note: we also signal this mutex to make sure we deblock the
2606            video thread in all cases */
2607         SDL_LockMutex(is->pictq_mutex);
2608         SDL_CondSignal(is->pictq_cond);
2609         SDL_UnlockMutex(is->pictq_mutex);
2610
2611         SDL_WaitThread(is->video_tid, NULL);
2612
2613         packet_queue_flush(&is->videoq);
2614         break;
2615     case AVMEDIA_TYPE_SUBTITLE:
2616         packet_queue_abort(&is->subtitleq);
2617
2618         /* note: we also signal this mutex to make sure we deblock the
2619            video thread in all cases */
2620         SDL_LockMutex(is->subpq_mutex);
2621         is->subtitle_stream_changed = 1;
2622
2623         SDL_CondSignal(is->subpq_cond);
2624         SDL_UnlockMutex(is->subpq_mutex);
2625
2626         SDL_WaitThread(is->subtitle_tid, NULL);
2627
2628         packet_queue_flush(&is->subtitleq);
2629         break;
2630     default:
2631         break;
2632     }
2633
2634     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2635     avcodec_close(avctx);
2636     switch (avctx->codec_type) {
2637     case AVMEDIA_TYPE_AUDIO:
2638         is->audio_st = NULL;
2639         is->audio_stream = -1;
2640         break;
2641     case AVMEDIA_TYPE_VIDEO:
2642         is->video_st = NULL;
2643         is->video_stream = -1;
2644         break;
2645     case AVMEDIA_TYPE_SUBTITLE:
2646         is->subtitle_st = NULL;
2647         is->subtitle_stream = -1;
2648         break;
2649     default:
2650         break;
2651     }
2652 }
2653
2654 static int decode_interrupt_cb(void *ctx)
2655 {
2656     VideoState *is = ctx;
2657     return is->abort_request;
2658 }
2659
2660 static int is_realtime(AVFormatContext *s)
2661 {
2662     if(   !strcmp(s->iformat->name, "rtp")
2663        || !strcmp(s->iformat->name, "rtsp")
2664        || !strcmp(s->iformat->name, "sdp")
2665     )
2666         return 1;
2667
2668     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2669                  || !strncmp(s->filename, "udp:", 4)
2670                 )
2671     )
2672         return 1;
2673     return 0;
2674 }
2675
2676 /* this thread gets the stream from the disk or the network */
2677 static int read_thread(void *arg)
2678 {
2679     VideoState *is = arg;
2680     AVFormatContext *ic = NULL;
2681     int err, i, ret;
2682     int st_index[AVMEDIA_TYPE_NB];
2683     AVPacket pkt1, *pkt = &pkt1;
2684     int eof = 0;
2685     int pkt_in_play_range = 0;
2686     AVDictionaryEntry *t;
2687     AVDictionary **opts;
2688     int orig_nb_streams;
2689     SDL_mutex *wait_mutex = SDL_CreateMutex();
2690
2691     memset(st_index, -1, sizeof(st_index));
2692     is->last_video_stream = is->video_stream = -1;
2693     is->last_audio_stream = is->audio_stream = -1;
2694     is->last_subtitle_stream = is->subtitle_stream = -1;
2695
2696     ic = avformat_alloc_context();
2697     ic->interrupt_callback.callback = decode_interrupt_cb;
2698     ic->interrupt_callback.opaque = is;
2699     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2700     if (err < 0) {
2701         print_error(is->filename, err);
2702         ret = -1;
2703         goto fail;
2704     }
2705     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2706         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2707         ret = AVERROR_OPTION_NOT_FOUND;
2708         goto fail;
2709     }
2710     is->ic = ic;
2711
2712     if (genpts)
2713         ic->flags |= AVFMT_FLAG_GENPTS;
2714
2715     opts = setup_find_stream_info_opts(ic, codec_opts);
2716     orig_nb_streams = ic->nb_streams;
2717
2718     err = avformat_find_stream_info(ic, opts);
2719     if (err < 0) {
2720         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2721         ret = -1;
2722         goto fail;
2723     }
2724     for (i = 0; i < orig_nb_streams; i++)
2725         av_dict_free(&opts[i]);
2726     av_freep(&opts);
2727
2728     if (ic->pb)
2729         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2730
2731     if (seek_by_bytes < 0)
2732         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2733
2734     is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2735
2736     /* if seeking requested, we execute it */
2737     if (start_time != AV_NOPTS_VALUE) {
2738         int64_t timestamp;
2739
2740         timestamp = start_time;
2741         /* add the stream start time */
2742         if (ic->start_time != AV_NOPTS_VALUE)
2743             timestamp += ic->start_time;
2744         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2745         if (ret < 0) {
2746             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2747                     is->filename, (double)timestamp / AV_TIME_BASE);
2748         }
2749     }
2750
2751     is->realtime = is_realtime(ic);
2752
2753     for (i = 0; i < ic->nb_streams; i++)
2754         ic->streams[i]->discard = AVDISCARD_ALL;
2755     if (!video_disable)
2756         st_index[AVMEDIA_TYPE_VIDEO] =
2757             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2758                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2759     if (!audio_disable)
2760         st_index[AVMEDIA_TYPE_AUDIO] =
2761             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2762                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2763                                 st_index[AVMEDIA_TYPE_VIDEO],
2764                                 NULL, 0);
2765     if (!video_disable && !subtitle_disable)
2766         st_index[AVMEDIA_TYPE_SUBTITLE] =
2767             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2768                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2769                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2770                                  st_index[AVMEDIA_TYPE_AUDIO] :
2771                                  st_index[AVMEDIA_TYPE_VIDEO]),
2772                                 NULL, 0);
2773     if (show_status) {
2774         av_dump_format(ic, 0, is->filename, 0);
2775     }
2776
2777     is->show_mode = show_mode;
2778
2779     /* open the streams */
2780     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2781         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2782     }
2783
2784     ret = -1;
2785     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2786         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2787     }
2788     if (is->show_mode == SHOW_MODE_NONE)
2789         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2790
2791     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2792         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2793     }
2794
2795     if (is->video_stream < 0 && is->audio_stream < 0) {
2796         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2797         ret = -1;
2798         goto fail;
2799     }
2800
2801     if (infinite_buffer < 0 && is->realtime)
2802         infinite_buffer = 1;
2803
2804     for (;;) {
2805         if (is->abort_request)
2806             break;
2807         if (is->paused != is->last_paused) {
2808             is->last_paused = is->paused;
2809             if (is->paused)
2810                 is->read_pause_return = av_read_pause(ic);
2811             else
2812                 av_read_play(ic);
2813         }
2814 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2815         if (is->paused &&
2816                 (!strcmp(ic->iformat->name, "rtsp") ||
2817                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2818             /* wait 10 ms to avoid trying to get another packet */
2819             /* XXX: horrible */
2820             SDL_Delay(10);
2821             continue;
2822         }
2823 #endif
2824         if (is->seek_req) {
2825             int64_t seek_target = is->seek_pos;
2826             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2827             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2828 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2829 //      of the seek_pos/seek_rel variables
2830
2831             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2832             if (ret < 0) {
2833                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2834             } else {
2835                 if (is->audio_stream >= 0) {
2836                     packet_queue_flush(&is->audioq);
2837                     packet_queue_put(&is->audioq, &flush_pkt);
2838                 }
2839                 if (is->subtitle_stream >= 0) {
2840                     packet_queue_flush(&is->subtitleq);
2841                     packet_queue_put(&is->subtitleq, &flush_pkt);
2842                 }
2843                 if (is->video_stream >= 0) {
2844                     packet_queue_flush(&is->videoq);
2845                     packet_queue_put(&is->videoq, &flush_pkt);
2846                 }
2847                 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2848                    update_external_clock_pts(is, NAN);
2849                 } else {
2850                    update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
2851                 }
2852             }
2853             is->seek_req = 0;
2854             eof = 0;
2855             if (is->paused)
2856                 step_to_next_frame(is);
2857         }
2858         if (is->queue_attachments_req) {
2859             avformat_queue_attached_pictures(ic);
2860             is->queue_attachments_req = 0;
2861         }
2862
2863         /* if the queue are full, no need to read more */
2864         if (infinite_buffer<1 &&
2865               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2866             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2867                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2868                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2869             /* wait 10 ms */
2870             SDL_LockMutex(wait_mutex);
2871             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2872             SDL_UnlockMutex(wait_mutex);
2873             continue;
2874         }
2875         if (eof) {
2876             if (is->video_stream >= 0) {
2877                 av_init_packet(pkt);
2878                 pkt->data = NULL;
2879                 pkt->size = 0;
2880                 pkt->stream_index = is->video_stream;
2881                 packet_queue_put(&is->videoq, pkt);
2882             }
2883             if (is->audio_stream >= 0 &&
2884                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2885                 av_init_packet(pkt);
2886                 pkt->data = NULL;
2887                 pkt->size = 0;
2888                 pkt->stream_index = is->audio_stream;
2889                 packet_queue_put(&is->audioq, pkt);
2890             }
2891             SDL_Delay(10);
2892             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2893                 if (loop != 1 && (!loop || --loop)) {
2894                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2895                 } else if (autoexit) {
2896                     ret = AVERROR_EOF;
2897                     goto fail;
2898                 }
2899             }
2900             eof=0;
2901             continue;
2902         }
2903         ret = av_read_frame(ic, pkt);
2904         if (ret < 0) {
2905             if (ret == AVERROR_EOF || url_feof(ic->pb))
2906                 eof = 1;
2907             if (ic->pb && ic->pb->error)
2908                 break;
2909             SDL_LockMutex(wait_mutex);
2910             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2911             SDL_UnlockMutex(wait_mutex);
2912             continue;
2913         }
2914         /* check if packet is in play range specified by user, then queue, otherwise discard */
2915         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2916                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2917                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2918                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2919                 <= ((double)duration / 1000000);
2920         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2921             packet_queue_put(&is->audioq, pkt);
2922         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2923             packet_queue_put(&is->videoq, pkt);
2924         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2925             packet_queue_put(&is->subtitleq, pkt);
2926         } else {
2927             av_free_packet(pkt);
2928         }
2929     }
2930     /* wait until the end */
2931     while (!is->abort_request) {
2932         SDL_Delay(100);
2933     }
2934
2935     ret = 0;
2936  fail:
2937     /* close each stream */
2938     if (is->audio_stream >= 0)
2939         stream_component_close(is, is->audio_stream);
2940     if (is->video_stream >= 0)
2941         stream_component_close(is, is->video_stream);
2942     if (is->subtitle_stream >= 0)
2943         stream_component_close(is, is->subtitle_stream);
2944     if (is->ic) {
2945         avformat_close_input(&is->ic);
2946     }
2947
2948     if (ret != 0) {
2949         SDL_Event event;
2950
2951         event.type = FF_QUIT_EVENT;
2952         event.user.data1 = is;
2953         SDL_PushEvent(&event);
2954     }
2955     SDL_DestroyMutex(wait_mutex);
2956     return 0;
2957 }
2958
2959 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2960 {
2961     VideoState *is;
2962
2963     is = av_mallocz(sizeof(VideoState));
2964     if (!is)
2965         return NULL;
2966     av_strlcpy(is->filename, filename, sizeof(is->filename));
2967     is->iformat = iformat;
2968     is->ytop    = 0;
2969     is->xleft   = 0;
2970
2971     /* start video display */
2972     is->pictq_mutex = SDL_CreateMutex();
2973     is->pictq_cond  = SDL_CreateCond();
2974
2975     is->subpq_mutex = SDL_CreateMutex();
2976     is->subpq_cond  = SDL_CreateCond();
2977
2978     packet_queue_init(&is->videoq);
2979     packet_queue_init(&is->audioq);
2980     packet_queue_init(&is->subtitleq);
2981
2982     is->continue_read_thread = SDL_CreateCond();
2983
2984     update_external_clock_pts(is, NAN);
2985     update_external_clock_speed(is, 1.0);
2986     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2987     is->video_current_pts_drift = is->audio_current_pts_drift;
2988     is->audio_clock_serial = -1;
2989     is->video_clock_serial = -1;
2990     is->audio_last_serial = -1;
2991     is->av_sync_type = av_sync_type;
2992     is->read_tid     = SDL_CreateThread(read_thread, is);
2993     if (!is->read_tid) {
2994         av_free(is);
2995         return NULL;
2996     }
2997     return is;
2998 }
2999
3000 static void stream_cycle_channel(VideoState *is, int codec_type)
3001 {
3002     AVFormatContext *ic = is->ic;
3003     int start_index, stream_index;
3004     int old_index;
3005     AVStream *st;
3006
3007     if (codec_type == AVMEDIA_TYPE_VIDEO) {
3008         start_index = is->last_video_stream;
3009         old_index = is->video_stream;
3010     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3011         start_index = is->last_audio_stream;
3012         old_index = is->audio_stream;
3013     } else {
3014         start_index = is->last_subtitle_stream;
3015         old_index = is->subtitle_stream;
3016     }
3017     stream_index = start_index;
3018     for (;;) {
3019         if (++stream_index >= is->ic->nb_streams)
3020         {
3021             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3022             {
3023                 stream_index = -1;
3024                 is->last_subtitle_stream = -1;
3025                 goto the_end;
3026             }
3027             if (start_index == -1)
3028                 return;
3029             stream_index = 0;
3030         }
3031         if (stream_index == start_index)
3032             return;
3033         st = ic->streams[stream_index];
3034         if (st->codec->codec_type == codec_type) {
3035             /* check that parameters are OK */
3036             switch (codec_type) {
3037             case AVMEDIA_TYPE_AUDIO:
3038                 if (st->codec->sample_rate != 0 &&
3039                     st->codec->channels != 0)
3040                     goto the_end;
3041                 break;
3042             case AVMEDIA_TYPE_VIDEO:
3043             case AVMEDIA_TYPE_SUBTITLE:
3044                 goto the_end;
3045             default:
3046                 break;
3047             }
3048         }
3049     }
3050  the_end:
3051     stream_component_close(is, old_index);
3052     stream_component_open(is, stream_index);
3053     if (codec_type == AVMEDIA_TYPE_VIDEO)
3054         is->queue_attachments_req = 1;
3055 }
3056
3057
3058 static void toggle_full_screen(VideoState *is)
3059 {
3060 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3061     /* OS X needs to reallocate the SDL overlays */
3062     int i;
3063     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3064         is->pictq[i].reallocate = 1;
3065 #endif
3066     is_full_screen = !is_full_screen;
3067     video_open(is, 1, NULL);
3068 }
3069
3070 static void toggle_audio_display(VideoState *is)
3071 {
3072     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3073     int next = is->show_mode;
3074     do {
3075         next = (next + 1) % SHOW_MODE_NB;
3076     } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3077     if (is->show_mode != next) {
3078         fill_rectangle(screen,
3079                     is->xleft, is->ytop, is->width, is->height,
3080                     bgcolor, 1);
3081         is->force_refresh = 1;
3082         is->show_mode = next;
3083     }
3084 }
3085
3086 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3087     double remaining_time = 0.0;
3088     SDL_PumpEvents();
3089     while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3090         if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3091             SDL_ShowCursor(0);
3092             cursor_hidden = 1;
3093         }
3094         if (remaining_time > 0.0)
3095             av_usleep((int64_t)(remaining_time * 1000000.0));
3096         remaining_time = REFRESH_RATE;
3097         if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3098             video_refresh(is, &remaining_time);
3099         SDL_PumpEvents();
3100     }
3101 }
3102
3103 /* handle an event sent by the GUI */
3104 static void event_loop(VideoState *cur_stream)
3105 {
3106     SDL_Event event;
3107     double incr, pos, frac;
3108
3109     for (;;) {
3110         double x;
3111         refresh_loop_wait_event(cur_stream, &event);
3112         switch (event.type) {
3113         case SDL_KEYDOWN:
3114             if (exit_on_keydown) {
3115                 do_exit(cur_stream);
3116                 break;
3117             }
3118             switch (event.key.keysym.sym) {
3119             case SDLK_ESCAPE:
3120             case SDLK_q:
3121                 do_exit(cur_stream);
3122                 break;
3123             case SDLK_f:
3124                 toggle_full_screen(cur_stream);
3125                 cur_stream->force_refresh = 1;
3126                 break;
3127             case SDLK_p:
3128             case SDLK_SPACE:
3129                 toggle_pause(cur_stream);
3130                 break;
3131             case SDLK_s: // S: Step to next frame
3132                 step_to_next_frame(cur_stream);
3133                 break;
3134             case SDLK_a:
3135                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3136                 break;
3137             case SDLK_v:
3138                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3139                 break;
3140             case SDLK_t:
3141                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3142                 break;
3143             case SDLK_w:
3144                 toggle_audio_display(cur_stream);
3145                 break;
3146             case SDLK_PAGEUP:
3147                 incr = 600.0;
3148                 goto do_seek;
3149             case SDLK_PAGEDOWN:
3150                 incr = -600.0;
3151                 goto do_seek;
3152             case SDLK_LEFT:
3153                 incr = -10.0;
3154                 goto do_seek;
3155             case SDLK_RIGHT:
3156                 incr = 10.0;
3157                 goto do_seek;
3158             case SDLK_UP:
3159                 incr = 60.0;
3160                 goto do_seek;
3161             case SDLK_DOWN:
3162                 incr = -60.0;
3163             do_seek:
3164                     if (seek_by_bytes) {
3165                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3166                             pos = cur_stream->video_current_pos;
3167                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3168                             pos = cur_stream->audio_pkt.pos;
3169                         } else
3170                             pos = avio_tell(cur_stream->ic->pb);
3171                         if (cur_stream->ic->bit_rate)
3172                             incr *= cur_stream->ic->bit_rate / 8.0;
3173                         else
3174                             incr *= 180000.0;
3175                         pos += incr;
3176                         stream_seek(cur_stream, pos, incr, 1);
3177                     } else {
3178                         pos = get_master_clock(cur_stream);
3179                         if (isnan(pos))
3180                             pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3181                         pos += incr;
3182                         if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3183                             pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3184                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3185                     }
3186                 break;
3187             default:
3188                 break;
3189             }
3190             break;
3191         case SDL_VIDEOEXPOSE:
3192             cur_stream->force_refresh = 1;
3193             break;
3194         case SDL_MOUSEBUTTONDOWN:
3195             if (exit_on_mousedown) {
3196                 do_exit(cur_stream);
3197                 break;
3198             }
3199         case SDL_MOUSEMOTION:
3200             if (cursor_hidden) {
3201                 SDL_ShowCursor(1);
3202                 cursor_hidden = 0;
3203             }
3204             cursor_last_shown = av_gettime();
3205             if (event.type == SDL_MOUSEBUTTONDOWN) {
3206                 x = event.button.x;
3207             } else {
3208                 if (event.motion.state != SDL_PRESSED)
3209                     break;
3210                 x = event.motion.x;
3211             }
3212                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3213                     uint64_t size =  avio_size(cur_stream->ic->pb);
3214                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3215                 } else {
3216                     int64_t ts;
3217                     int ns, hh, mm, ss;
3218                     int tns, thh, tmm, tss;
3219                     tns  = cur_stream->ic->duration / 1000000LL;
3220                     thh  = tns / 3600;
3221                     tmm  = (tns % 3600) / 60;
3222                     tss  = (tns % 60);
3223                     frac = x / cur_stream->width;
3224                     ns   = frac * tns;
3225                     hh   = ns / 3600;
3226                     mm   = (ns % 3600) / 60;
3227                     ss   = (ns % 60);
3228                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
3229                             hh, mm, ss, thh, tmm, tss);
3230                     ts = frac * cur_stream->ic->duration;
3231                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3232                         ts += cur_stream->ic->start_time;
3233                     stream_seek(cur_stream, ts, 0, 0);
3234                 }
3235             break;
3236         case SDL_VIDEORESIZE:
3237                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3238                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3239                 screen_width  = cur_stream->width  = event.resize.w;
3240                 screen_height = cur_stream->height = event.resize.h;
3241                 cur_stream->force_refresh = 1;
3242             break;
3243         case SDL_QUIT:
3244         case FF_QUIT_EVENT:
3245             do_exit(cur_stream);
3246             break;
3247         case FF_ALLOC_EVENT:
3248             alloc_picture(event.user.data1);
3249             break;
3250         default:
3251             break;
3252         }
3253     }
3254 }
3255
3256 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3257 {
3258     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3259     return opt_default(NULL, "video_size", arg);
3260 }
3261
3262 static int opt_width(void *optctx, const char *opt, const char *arg)
3263 {
3264     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3265     return 0;
3266 }
3267
3268 static int opt_height(void *optctx, const char *opt, const char *arg)
3269 {
3270     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3271     return 0;
3272 }
3273
3274 static int opt_format(void *optctx, const char *opt, const char *arg)
3275 {
3276     file_iformat = av_find_input_format(arg);
3277     if (!file_iformat) {
3278         fprintf(stderr, "Unknown input format: %s\n", arg);
3279         return AVERROR(EINVAL);
3280     }
3281     return 0;
3282 }
3283
3284 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3285 {
3286     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3287     return opt_default(NULL, "pixel_format", arg);
3288 }
3289
3290 static int opt_sync(void *optctx, const char *opt, const char *arg)
3291 {
3292     if (!strcmp(arg, "audio"))
3293         av_sync_type = AV_SYNC_AUDIO_MASTER;
3294     else if (!strcmp(arg, "video"))
3295         av_sync_type = AV_SYNC_VIDEO_MASTER;
3296     else if (!strcmp(arg, "ext"))
3297         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3298     else {
3299         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3300         exit(1);
3301     }
3302     return 0;
3303 }
3304
3305 static int opt_seek(void *optctx, const char *opt, const char *arg)
3306 {
3307     start_time = parse_time_or_die(opt, arg, 1);
3308     return 0;
3309 }
3310
3311 static int opt_duration(void *optctx, const char *opt, const char *arg)
3312 {
3313     duration = parse_time_or_die(opt, arg, 1);
3314     return 0;
3315 }
3316
3317 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3318 {
3319     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3320                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3321                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3322                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3323     return 0;
3324 }
3325
3326 static void opt_input_file(void *optctx, const char *filename)
3327 {
3328     if (input_filename) {
3329         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3330                 filename, input_filename);
3331         exit(1);
3332     }
3333     if (!strcmp(filename, "-"))
3334         filename = "pipe:";
3335     input_filename = filename;
3336 }
3337
3338 static int opt_codec(void *optctx, const char *opt, const char *arg)
3339 {
3340    const char *spec = strchr(opt, ':');
3341    if (!spec) {
3342        fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3343                arg, opt);
3344        return AVERROR(EINVAL);
3345    }
3346    spec++;
3347    switch (spec[0]) {
3348    case 'a' :    audio_codec_name = arg; break;
3349    case 's' : subtitle_codec_name = arg; break;
3350    case 'v' :    video_codec_name = arg; break;
3351    default:
3352        fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3353        return AVERROR(EINVAL);
3354    }
3355    return 0;
3356 }
3357
3358 static int dummy;
3359
3360 static const OptionDef options[] = {
3361 #include "cmdutils_common_opts.h"
3362     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3363     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3364     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3365     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3366     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3367     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3368     { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3369     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3370     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3371     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3372     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3373     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3374     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3375     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3376     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3377     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3378     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3379     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3380     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3381     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3382     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3383     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3384     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3385     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3386     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3387     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3388     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3389     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3390     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3391     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3392     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3393     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3394     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3395     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3396     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3397 #if CONFIG_AVFILTER
3398     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3399     { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3400 #endif
3401     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3402     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3403     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3404     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3405     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3406     { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &audio_codec_name }, "force audio decoder",    "decoder_name" },
3407     { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3408     { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &video_codec_name }, "force video decoder",    "decoder_name" },
3409     { NULL, },
3410 };
3411
3412 static void show_usage(void)
3413 {
3414     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3415     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3416     av_log(NULL, AV_LOG_INFO, "\n");
3417 }
3418
3419 void show_help_default(const char *opt, const char *arg)
3420 {
3421     av_log_set_callback(log_callback_help);
3422     show_usage();
3423     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3424     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3425     printf("\n");
3426     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3427     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3428 #if !CONFIG_AVFILTER
3429     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3430 #else
3431     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3432 #endif
3433     printf("\nWhile playing:\n"
3434            "q, ESC              quit\n"
3435            "f                   toggle full screen\n"
3436            "p, SPC              pause\n"
3437            "a                   cycle audio channel\n"
3438            "v                   cycle video channel\n"
3439            "t                   cycle subtitle channel\n"
3440            "w                   show audio waves\n"
3441            "s                   activate frame-step mode\n"
3442            "left/right          seek backward/forward 10 seconds\n"
3443            "down/up             seek backward/forward 1 minute\n"
3444            "page down/page up   seek backward/forward 10 minutes\n"
3445            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3446            );
3447 }
3448
3449 static int lockmgr(void **mtx, enum AVLockOp op)
3450 {
3451    switch(op) {
3452       case AV_LOCK_CREATE:
3453           *mtx = SDL_CreateMutex();
3454           if(!*mtx)
3455               return 1;
3456           return 0;
3457       case AV_LOCK_OBTAIN:
3458           return !!SDL_LockMutex(*mtx);
3459       case AV_LOCK_RELEASE:
3460           return !!SDL_UnlockMutex(*mtx);
3461       case AV_LOCK_DESTROY:
3462           SDL_DestroyMutex(*mtx);
3463           return 0;
3464    }
3465    return 1;
3466 }
3467
3468 /* Called from the main */
3469 int main(int argc, char **argv)
3470 {
3471     int flags;
3472     VideoState *is;
3473     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3474
3475     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3476     parse_loglevel(argc, argv, options);
3477
3478     /* register all codecs, demux and protocols */
3479     avcodec_register_all();
3480 #if CONFIG_AVDEVICE
3481     avdevice_register_all();
3482 #endif
3483 #if CONFIG_AVFILTER
3484     avfilter_register_all();
3485 #endif
3486     av_register_all();
3487     avformat_network_init();
3488
3489     init_opts();
3490
3491     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3492     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3493
3494     show_banner(argc, argv, options);
3495
3496     parse_options(NULL, argc, argv, options, opt_input_file);
3497
3498     if (!input_filename) {
3499         show_usage();
3500         fprintf(stderr, "An input file must be specified\n");
3501         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3502         exit(1);
3503     }
3504
3505     if (display_disable) {
3506         video_disable = 1;
3507     }
3508     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3509     if (audio_disable)
3510         flags &= ~SDL_INIT_AUDIO;
3511     if (display_disable)
3512         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3513 #if !defined(__MINGW32__) && !defined(__APPLE__)
3514     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3515 #endif
3516     if (SDL_Init (flags)) {
3517         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3518         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3519         exit(1);
3520     }
3521
3522     if (!display_disable) {
3523         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3524         fs_screen_width = vi->current_w;
3525         fs_screen_height = vi->current_h;
3526     }
3527
3528     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3529     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3530     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3531
3532     if (av_lockmgr_register(lockmgr)) {
3533         fprintf(stderr, "Could not initialize lock manager!\n");
3534         do_exit(NULL);
3535     }
3536
3537     av_init_packet(&flush_pkt);
3538     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3539
3540     is = stream_open(input_filename, file_iformat);
3541     if (!is) {
3542         fprintf(stderr, "Failed to initialize VideoState!\n");
3543         do_exit(NULL);
3544     }
3545
3546     event_loop(is);
3547
3548     /* never returns */
3549
3550     return 0;
3551 }