]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 4
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;             // presentation timestamp for this picture
104     int64_t pos;            // byte position in file
105     int skip;
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     AVRational sample_aspect_ratio;
109     int allocated;
110     int reallocate;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 typedef struct AudioParams {
123     int freq;
124     int channels;
125     int channel_layout;
126     enum AVSampleFormat fmt;
127 } AudioParams;
128
129 enum {
130     AV_SYNC_AUDIO_MASTER, /* default choice */
131     AV_SYNC_VIDEO_MASTER,
132     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
133 };
134
135 typedef struct VideoState {
136     SDL_Thread *read_tid;
137     SDL_Thread *video_tid;
138     SDL_Thread *refresh_tid;
139     AVInputFormat *iformat;
140     int no_background;
141     int abort_request;
142     int force_refresh;
143     int paused;
144     int last_paused;
145     int que_attachments_req;
146     int seek_req;
147     int seek_flags;
148     int64_t seek_pos;
149     int64_t seek_rel;
150     int read_pause_return;
151     AVFormatContext *ic;
152
153     int audio_stream;
154
155     int av_sync_type;
156     double external_clock; /* external clock base */
157     int64_t external_clock_time;
158
159     double audio_clock;
160     double audio_diff_cum; /* used for AV difference average computation */
161     double audio_diff_avg_coef;
162     double audio_diff_threshold;
163     int audio_diff_avg_count;
164     AVStream *audio_st;
165     PacketQueue audioq;
166     int audio_hw_buf_size;
167     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
168     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
169     uint8_t *audio_buf;
170     uint8_t *audio_buf1;
171     unsigned int audio_buf_size; /* in bytes */
172     int audio_buf_index; /* in bytes */
173     int audio_write_buf_size;
174     AVPacket audio_pkt_temp;
175     AVPacket audio_pkt;
176     struct AudioParams audio_src;
177     struct AudioParams audio_tgt;
178     struct SwrContext *swr_ctx;
179     double audio_current_pts;
180     double audio_current_pts_drift;
181     int frame_drops_early;
182     int frame_drops_late;
183     AVFrame *frame;
184
185     enum ShowMode {
186         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
187     } show_mode;
188     int16_t sample_array[SAMPLE_ARRAY_SIZE];
189     int sample_array_index;
190     int last_i_start;
191     RDFTContext *rdft;
192     int rdft_bits;
193     FFTSample *rdft_data;
194     int xpos;
195
196     SDL_Thread *subtitle_tid;
197     int subtitle_stream;
198     int subtitle_stream_changed;
199     AVStream *subtitle_st;
200     PacketQueue subtitleq;
201     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
202     int subpq_size, subpq_rindex, subpq_windex;
203     SDL_mutex *subpq_mutex;
204     SDL_cond *subpq_cond;
205
206     double frame_timer;
207     double frame_last_pts;
208     double frame_last_duration;
209     double frame_last_dropped_pts;
210     double frame_last_returned_time;
211     double frame_last_filter_delay;
212     int64_t frame_last_dropped_pos;
213     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
214     int video_stream;
215     AVStream *video_st;
216     PacketQueue videoq;
217     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
218     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
219     int64_t video_current_pos;      // current displayed file pos
220     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
221     int pictq_size, pictq_rindex, pictq_windex;
222     SDL_mutex *pictq_mutex;
223     SDL_cond *pictq_cond;
224 #if !CONFIG_AVFILTER
225     struct SwsContext *img_convert_ctx;
226 #endif
227
228     char filename[1024];
229     int width, height, xleft, ytop;
230     int step;
231
232 #if CONFIG_AVFILTER
233     AVFilterContext *in_video_filter;   // the first filter in the video chain
234     AVFilterContext *out_video_filter;  // the last filter in the video chain
235     int use_dr1;
236     FrameBuffer *buffer_pool;
237 #endif
238
239     int refresh;
240     int last_video_stream, last_audio_stream, last_subtitle_stream;
241
242     SDL_cond *continue_read_thread;
243 } VideoState;
244
245 /* options specified by the user */
246 static AVInputFormat *file_iformat;
247 static const char *input_filename;
248 static const char *window_title;
249 static int fs_screen_width;
250 static int fs_screen_height;
251 static int screen_width  = 0;
252 static int screen_height = 0;
253 static int audio_disable;
254 static int video_disable;
255 static int wanted_stream[AVMEDIA_TYPE_NB] = {
256     [AVMEDIA_TYPE_AUDIO]    = -1,
257     [AVMEDIA_TYPE_VIDEO]    = -1,
258     [AVMEDIA_TYPE_SUBTITLE] = -1,
259 };
260 static int seek_by_bytes = -1;
261 static int display_disable;
262 static int show_status = 1;
263 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
264 static int64_t start_time = AV_NOPTS_VALUE;
265 static int64_t duration = AV_NOPTS_VALUE;
266 static int workaround_bugs = 1;
267 static int fast = 0;
268 static int genpts = 0;
269 static int lowres = 0;
270 static int idct = FF_IDCT_AUTO;
271 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
272 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
273 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
274 static int error_concealment = 3;
275 static int decoder_reorder_pts = -1;
276 static int autoexit;
277 static int exit_on_keydown;
278 static int exit_on_mousedown;
279 static int loop = 1;
280 static int framedrop = -1;
281 static int infinite_buffer = -1;
282 static enum ShowMode show_mode = SHOW_MODE_NONE;
283 static const char *audio_codec_name;
284 static const char *subtitle_codec_name;
285 static const char *video_codec_name;
286 static int rdftspeed = 20;
287 #if CONFIG_AVFILTER
288 static char *vfilters = NULL;
289 #endif
290
291 /* current context */
292 static int is_full_screen;
293 static int64_t audio_callback_time;
294
295 static AVPacket flush_pkt;
296
297 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
298 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
299 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
300
301 static SDL_Surface *screen;
302
303 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
304
305 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
306 {
307     AVPacketList *pkt1;
308
309     if (q->abort_request)
310        return -1;
311
312     pkt1 = av_malloc(sizeof(AVPacketList));
313     if (!pkt1)
314         return -1;
315     pkt1->pkt = *pkt;
316     pkt1->next = NULL;
317
318     if (!q->last_pkt)
319         q->first_pkt = pkt1;
320     else
321         q->last_pkt->next = pkt1;
322     q->last_pkt = pkt1;
323     q->nb_packets++;
324     q->size += pkt1->pkt.size + sizeof(*pkt1);
325     /* XXX: should duplicate packet data in DV case */
326     SDL_CondSignal(q->cond);
327     return 0;
328 }
329
330 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
331 {
332     int ret;
333
334     /* duplicate the packet */
335     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
336         return -1;
337
338     SDL_LockMutex(q->mutex);
339     ret = packet_queue_put_private(q, pkt);
340     SDL_UnlockMutex(q->mutex);
341
342     if (pkt != &flush_pkt && ret < 0)
343         av_free_packet(pkt);
344
345     return ret;
346 }
347
348 /* packet queue handling */
349 static void packet_queue_init(PacketQueue *q)
350 {
351     memset(q, 0, sizeof(PacketQueue));
352     q->mutex = SDL_CreateMutex();
353     q->cond = SDL_CreateCond();
354     q->abort_request = 1;
355 }
356
357 static void packet_queue_flush(PacketQueue *q)
358 {
359     AVPacketList *pkt, *pkt1;
360
361     SDL_LockMutex(q->mutex);
362     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
363         pkt1 = pkt->next;
364         av_free_packet(&pkt->pkt);
365         av_freep(&pkt);
366     }
367     q->last_pkt = NULL;
368     q->first_pkt = NULL;
369     q->nb_packets = 0;
370     q->size = 0;
371     SDL_UnlockMutex(q->mutex);
372 }
373
374 static void packet_queue_destroy(PacketQueue *q)
375 {
376     packet_queue_flush(q);
377     SDL_DestroyMutex(q->mutex);
378     SDL_DestroyCond(q->cond);
379 }
380
381 static void packet_queue_abort(PacketQueue *q)
382 {
383     SDL_LockMutex(q->mutex);
384
385     q->abort_request = 1;
386
387     SDL_CondSignal(q->cond);
388
389     SDL_UnlockMutex(q->mutex);
390 }
391
392 static void packet_queue_start(PacketQueue *q)
393 {
394     SDL_LockMutex(q->mutex);
395     q->abort_request = 0;
396     packet_queue_put_private(q, &flush_pkt);
397     SDL_UnlockMutex(q->mutex);
398 }
399
400 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
401 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
402 {
403     AVPacketList *pkt1;
404     int ret;
405
406     SDL_LockMutex(q->mutex);
407
408     for (;;) {
409         if (q->abort_request) {
410             ret = -1;
411             break;
412         }
413
414         pkt1 = q->first_pkt;
415         if (pkt1) {
416             q->first_pkt = pkt1->next;
417             if (!q->first_pkt)
418                 q->last_pkt = NULL;
419             q->nb_packets--;
420             q->size -= pkt1->pkt.size + sizeof(*pkt1);
421             *pkt = pkt1->pkt;
422             av_free(pkt1);
423             ret = 1;
424             break;
425         } else if (!block) {
426             ret = 0;
427             break;
428         } else {
429             SDL_CondWait(q->cond, q->mutex);
430         }
431     }
432     SDL_UnlockMutex(q->mutex);
433     return ret;
434 }
435
436 static inline void fill_rectangle(SDL_Surface *screen,
437                                   int x, int y, int w, int h, int color)
438 {
439     SDL_Rect rect;
440     rect.x = x;
441     rect.y = y;
442     rect.w = w;
443     rect.h = h;
444     SDL_FillRect(screen, &rect, color);
445 }
446
447 #define ALPHA_BLEND(a, oldp, newp, s)\
448 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449
450 #define RGBA_IN(r, g, b, a, s)\
451 {\
452     unsigned int v = ((const uint32_t *)(s))[0];\
453     a = (v >> 24) & 0xff;\
454     r = (v >> 16) & 0xff;\
455     g = (v >> 8) & 0xff;\
456     b = v & 0xff;\
457 }
458
459 #define YUVA_IN(y, u, v, a, s, pal)\
460 {\
461     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462     a = (val >> 24) & 0xff;\
463     y = (val >> 16) & 0xff;\
464     u = (val >> 8) & 0xff;\
465     v = val & 0xff;\
466 }
467
468 #define YUVA_OUT(d, y, u, v, a)\
469 {\
470     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471 }
472
473
474 #define BPP 1
475
476 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477 {
478     int wrap, wrap3, width2, skip2;
479     int y, u, v, a, u1, v1, a1, w, h;
480     uint8_t *lum, *cb, *cr;
481     const uint8_t *p;
482     const uint32_t *pal;
483     int dstx, dsty, dstw, dsth;
484
485     dstw = av_clip(rect->w, 0, imgw);
486     dsth = av_clip(rect->h, 0, imgh);
487     dstx = av_clip(rect->x, 0, imgw - dstw);
488     dsty = av_clip(rect->y, 0, imgh - dsth);
489     lum = dst->data[0] + dsty * dst->linesize[0];
490     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492
493     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494     skip2 = dstx >> 1;
495     wrap = dst->linesize[0];
496     wrap3 = rect->pict.linesize[0];
497     p = rect->pict.data[0];
498     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499
500     if (dsty & 1) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510             cb++;
511             cr++;
512             lum++;
513             p += BPP;
514         }
515         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
522             YUVA_IN(y, u, v, a, p + BPP, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += 2 * BPP;
532             lum += 2;
533         }
534         if (w) {
535             YUVA_IN(y, u, v, a, p, pal);
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539             p++;
540             lum++;
541         }
542         p += wrap3 - dstw * BPP;
543         lum += wrap - dstw - dstx;
544         cb += dst->linesize[1] - width2 - skip2;
545         cr += dst->linesize[2] - width2 - skip2;
546     }
547     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
548         lum += dstx;
549         cb += skip2;
550         cr += skip2;
551
552         if (dstx & 1) {
553             YUVA_IN(y, u, v, a, p, pal);
554             u1 = u;
555             v1 = v;
556             a1 = a;
557             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558             p += wrap3;
559             lum += wrap;
560             YUVA_IN(y, u, v, a, p, pal);
561             u1 += u;
562             v1 += v;
563             a1 += a;
564             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567             cb++;
568             cr++;
569             p += -wrap3 + BPP;
570             lum += -wrap + 1;
571         }
572         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578
579             YUVA_IN(y, u, v, a, p + BPP, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584             p += wrap3;
585             lum += wrap;
586
587             YUVA_IN(y, u, v, a, p, pal);
588             u1 += u;
589             v1 += v;
590             a1 += a;
591             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592
593             YUVA_IN(y, u, v, a, p + BPP, pal);
594             u1 += u;
595             v1 += v;
596             a1 += a;
597             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598
599             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601
602             cb++;
603             cr++;
604             p += -wrap3 + 2 * BPP;
605             lum += -wrap + 2;
606         }
607         if (w) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613             p += wrap3;
614             lum += wrap;
615             YUVA_IN(y, u, v, a, p, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622             cb++;
623             cr++;
624             p += -wrap3 + BPP;
625             lum += -wrap + 1;
626         }
627         p += wrap3 + (wrap3 - dstw * BPP);
628         lum += wrap + (wrap - dstw - dstx);
629         cb += dst->linesize[1] - width2 - skip2;
630         cr += dst->linesize[2] - width2 - skip2;
631     }
632     /* handle odd height */
633     if (h) {
634         lum += dstx;
635         cb += skip2;
636         cr += skip2;
637
638         if (dstx & 1) {
639             YUVA_IN(y, u, v, a, p, pal);
640             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643             cb++;
644             cr++;
645             lum++;
646             p += BPP;
647         }
648         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
649             YUVA_IN(y, u, v, a, p, pal);
650             u1 = u;
651             v1 = v;
652             a1 = a;
653             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654
655             YUVA_IN(y, u, v, a, p + BPP, pal);
656             u1 += u;
657             v1 += v;
658             a1 += a;
659             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662             cb++;
663             cr++;
664             p += 2 * BPP;
665             lum += 2;
666         }
667         if (w) {
668             YUVA_IN(y, u, v, a, p, pal);
669             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672         }
673     }
674 }
675
676 static void free_subpicture(SubPicture *sp)
677 {
678     avsubtitle_free(&sp->sub);
679 }
680
681 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
682 {
683     float aspect_ratio;
684     int width, height, x, y;
685
686     if (vp->sample_aspect_ratio.num == 0)
687         aspect_ratio = 0;
688     else
689         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
690
691     if (aspect_ratio <= 0.0)
692         aspect_ratio = 1.0;
693     aspect_ratio *= (float)vp->width / (float)vp->height;
694
695     /* XXX: we suppose the screen has a 1.0 pixel ratio */
696     height = scr_height;
697     width = ((int)rint(height * aspect_ratio)) & ~1;
698     if (width > scr_width) {
699         width = scr_width;
700         height = ((int)rint(width / aspect_ratio)) & ~1;
701     }
702     x = (scr_width - width) / 2;
703     y = (scr_height - height) / 2;
704     rect->x = scr_xleft + x;
705     rect->y = scr_ytop  + y;
706     rect->w = FFMAX(width,  1);
707     rect->h = FFMAX(height, 1);
708 }
709
710 static void video_image_display(VideoState *is)
711 {
712     VideoPicture *vp;
713     SubPicture *sp;
714     AVPicture pict;
715     SDL_Rect rect;
716     int i;
717
718     vp = &is->pictq[is->pictq_rindex];
719     if (vp->bmp) {
720         if (is->subtitle_st) {
721             if (is->subpq_size > 0) {
722                 sp = &is->subpq[is->subpq_rindex];
723
724                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
725                     SDL_LockYUVOverlay (vp->bmp);
726
727                     pict.data[0] = vp->bmp->pixels[0];
728                     pict.data[1] = vp->bmp->pixels[2];
729                     pict.data[2] = vp->bmp->pixels[1];
730
731                     pict.linesize[0] = vp->bmp->pitches[0];
732                     pict.linesize[1] = vp->bmp->pitches[2];
733                     pict.linesize[2] = vp->bmp->pitches[1];
734
735                     for (i = 0; i < sp->sub.num_rects; i++)
736                         blend_subrect(&pict, sp->sub.rects[i],
737                                       vp->bmp->w, vp->bmp->h);
738
739                     SDL_UnlockYUVOverlay (vp->bmp);
740                 }
741             }
742         }
743
744         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
745
746         SDL_DisplayYUVOverlay(vp->bmp, &rect);
747     }
748 }
749
750 static inline int compute_mod(int a, int b)
751 {
752     return a < 0 ? a%b + b : a%b;
753 }
754
755 static void video_audio_display(VideoState *s)
756 {
757     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
758     int ch, channels, h, h2, bgcolor, fgcolor;
759     int16_t time_diff;
760     int rdft_bits, nb_freq;
761
762     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
763         ;
764     nb_freq = 1 << (rdft_bits - 1);
765
766     /* compute display index : center on currently output samples */
767     channels = s->audio_tgt.channels;
768     nb_display_channels = channels;
769     if (!s->paused) {
770         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
771         n = 2 * channels;
772         delay = s->audio_write_buf_size;
773         delay /= n;
774
775         /* to be more precise, we take into account the time spent since
776            the last buffer computation */
777         if (audio_callback_time) {
778             time_diff = av_gettime() - audio_callback_time;
779             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
780         }
781
782         delay += 2 * data_used;
783         if (delay < data_used)
784             delay = data_used;
785
786         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
787         if (s->show_mode == SHOW_MODE_WAVES) {
788             h = INT_MIN;
789             for (i = 0; i < 1000; i += channels) {
790                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
791                 int a = s->sample_array[idx];
792                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
793                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
794                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
795                 int score = a - d;
796                 if (h < score && (b ^ c) < 0) {
797                     h = score;
798                     i_start = idx;
799                 }
800             }
801         }
802
803         s->last_i_start = i_start;
804     } else {
805         i_start = s->last_i_start;
806     }
807
808     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
809     if (s->show_mode == SHOW_MODE_WAVES) {
810         fill_rectangle(screen,
811                        s->xleft, s->ytop, s->width, s->height,
812                        bgcolor);
813
814         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
815
816         /* total height for one channel */
817         h = s->height / nb_display_channels;
818         /* graph height / 2 */
819         h2 = (h * 9) / 20;
820         for (ch = 0; ch < nb_display_channels; ch++) {
821             i = i_start + ch;
822             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
823             for (x = 0; x < s->width; x++) {
824                 y = (s->sample_array[i] * h2) >> 15;
825                 if (y < 0) {
826                     y = -y;
827                     ys = y1 - y;
828                 } else {
829                     ys = y1;
830                 }
831                 fill_rectangle(screen,
832                                s->xleft + x, ys, 1, y,
833                                fgcolor);
834                 i += channels;
835                 if (i >= SAMPLE_ARRAY_SIZE)
836                     i -= SAMPLE_ARRAY_SIZE;
837             }
838         }
839
840         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
841
842         for (ch = 1; ch < nb_display_channels; ch++) {
843             y = s->ytop + ch * h;
844             fill_rectangle(screen,
845                            s->xleft, y, s->width, 1,
846                            fgcolor);
847         }
848         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
849     } else {
850         nb_display_channels= FFMIN(nb_display_channels, 2);
851         if (rdft_bits != s->rdft_bits) {
852             av_rdft_end(s->rdft);
853             av_free(s->rdft_data);
854             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
855             s->rdft_bits = rdft_bits;
856             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
857         }
858         {
859             FFTSample *data[2];
860             for (ch = 0; ch < nb_display_channels; ch++) {
861                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
862                 i = i_start + ch;
863                 for (x = 0; x < 2 * nb_freq; x++) {
864                     double w = (x-nb_freq) * (1.0 / nb_freq);
865                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
866                     i += channels;
867                     if (i >= SAMPLE_ARRAY_SIZE)
868                         i -= SAMPLE_ARRAY_SIZE;
869                 }
870                 av_rdft_calc(s->rdft, data[ch]);
871             }
872             // least efficient way to do this, we should of course directly access it but its more than fast enough
873             for (y = 0; y < s->height; y++) {
874                 double w = 1 / sqrt(nb_freq);
875                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
876                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
877                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
878                 a = FFMIN(a, 255);
879                 b = FFMIN(b, 255);
880                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
881
882                 fill_rectangle(screen,
883                             s->xpos, s->height-y, 1, 1,
884                             fgcolor);
885             }
886         }
887         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
888         if (!s->paused)
889             s->xpos++;
890         if (s->xpos >= s->width)
891             s->xpos= s->xleft;
892     }
893 }
894
895 static void stream_close(VideoState *is)
896 {
897     VideoPicture *vp;
898     int i;
899     /* XXX: use a special url_shutdown call to abort parse cleanly */
900     is->abort_request = 1;
901     SDL_WaitThread(is->read_tid, NULL);
902     SDL_WaitThread(is->refresh_tid, NULL);
903     packet_queue_destroy(&is->videoq);
904     packet_queue_destroy(&is->audioq);
905     packet_queue_destroy(&is->subtitleq);
906
907     /* free all pictures */
908     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
909         vp = &is->pictq[i];
910 #if CONFIG_AVFILTER
911         avfilter_unref_bufferp(&vp->picref);
912 #endif
913         if (vp->bmp) {
914             SDL_FreeYUVOverlay(vp->bmp);
915             vp->bmp = NULL;
916         }
917     }
918     SDL_DestroyMutex(is->pictq_mutex);
919     SDL_DestroyCond(is->pictq_cond);
920     SDL_DestroyMutex(is->subpq_mutex);
921     SDL_DestroyCond(is->subpq_cond);
922     SDL_DestroyCond(is->continue_read_thread);
923 #if !CONFIG_AVFILTER
924     if (is->img_convert_ctx)
925         sws_freeContext(is->img_convert_ctx);
926 #endif
927     av_free(is);
928 }
929
930 static void do_exit(VideoState *is)
931 {
932     if (is) {
933         stream_close(is);
934     }
935     av_lockmgr_register(NULL);
936     uninit_opts();
937 #if CONFIG_AVFILTER
938     avfilter_uninit();
939     av_freep(&vfilters);
940 #endif
941     avformat_network_deinit();
942     if (show_status)
943         printf("\n");
944     SDL_Quit();
945     av_log(NULL, AV_LOG_QUIET, "%s", "");
946     exit(0);
947 }
948
949 static void sigterm_handler(int sig)
950 {
951     exit(123);
952 }
953
954 static int video_open(VideoState *is, int force_set_video_mode)
955 {
956     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
957     int w,h;
958     VideoPicture *vp = &is->pictq[is->pictq_rindex];
959     SDL_Rect rect;
960
961     if (is_full_screen) flags |= SDL_FULLSCREEN;
962     else                flags |= SDL_RESIZABLE;
963
964     if (is_full_screen && fs_screen_width) {
965         w = fs_screen_width;
966         h = fs_screen_height;
967     } else if (!is_full_screen && screen_width) {
968         w = screen_width;
969         h = screen_height;
970     } else if (vp->width) {
971         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
972         w = rect.w;
973         h = rect.h;
974     } else {
975         w = 640;
976         h = 480;
977     }
978     if (screen && is->width == screen->w && screen->w == w
979        && is->height== screen->h && screen->h == h && !force_set_video_mode)
980         return 0;
981     screen = SDL_SetVideoMode(w, h, 0, flags);
982     if (!screen) {
983         fprintf(stderr, "SDL: could not set video mode - exiting\n");
984         do_exit(is);
985     }
986     if (!window_title)
987         window_title = input_filename;
988     SDL_WM_SetCaption(window_title, window_title);
989
990     is->width  = screen->w;
991     is->height = screen->h;
992
993     return 0;
994 }
995
996 /* display the current picture, if any */
997 static void video_display(VideoState *is)
998 {
999     if (!screen)
1000         video_open(is, 0);
1001     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1002         video_audio_display(is);
1003     else if (is->video_st)
1004         video_image_display(is);
1005 }
1006
1007 static int refresh_thread(void *opaque)
1008 {
1009     VideoState *is= opaque;
1010     while (!is->abort_request) {
1011         SDL_Event event;
1012         event.type = FF_REFRESH_EVENT;
1013         event.user.data1 = opaque;
1014         if (!is->refresh && (!is->paused || is->force_refresh)) {
1015             is->refresh = 1;
1016             SDL_PushEvent(&event);
1017         }
1018         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1019         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1020     }
1021     return 0;
1022 }
1023
1024 /* get the current audio clock value */
1025 static double get_audio_clock(VideoState *is)
1026 {
1027     if (is->paused) {
1028         return is->audio_current_pts;
1029     } else {
1030         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1031     }
1032 }
1033
1034 /* get the current video clock value */
1035 static double get_video_clock(VideoState *is)
1036 {
1037     if (is->paused) {
1038         return is->video_current_pts;
1039     } else {
1040         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1041     }
1042 }
1043
1044 /* get the current external clock value */
1045 static double get_external_clock(VideoState *is)
1046 {
1047     int64_t ti;
1048     ti = av_gettime();
1049     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1050 }
1051
1052 /* get the current master clock value */
1053 static double get_master_clock(VideoState *is)
1054 {
1055     double val;
1056
1057     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1058         if (is->video_st)
1059             val = get_video_clock(is);
1060         else
1061             val = get_audio_clock(is);
1062     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1063         if (is->audio_st)
1064             val = get_audio_clock(is);
1065         else
1066             val = get_video_clock(is);
1067     } else {
1068         val = get_external_clock(is);
1069     }
1070     return val;
1071 }
1072
1073 /* seek in the stream */
1074 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1075 {
1076     if (!is->seek_req) {
1077         is->seek_pos = pos;
1078         is->seek_rel = rel;
1079         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1080         if (seek_by_bytes)
1081             is->seek_flags |= AVSEEK_FLAG_BYTE;
1082         is->seek_req = 1;
1083     }
1084 }
1085
1086 /* pause or resume the video */
1087 static void stream_toggle_pause(VideoState *is)
1088 {
1089     if (is->paused) {
1090         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1091         if (is->read_pause_return != AVERROR(ENOSYS)) {
1092             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1093         }
1094         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1095     }
1096     is->paused = !is->paused;
1097 }
1098
1099 static double compute_target_delay(double delay, VideoState *is)
1100 {
1101     double sync_threshold, diff;
1102
1103     /* update delay to follow master synchronisation source */
1104     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1105          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1106         /* if video is slave, we try to correct big delays by
1107            duplicating or deleting a frame */
1108         diff = get_video_clock(is) - get_master_clock(is);
1109
1110         /* skip or repeat frame. We take into account the
1111            delay to compute the threshold. I still don't know
1112            if it is the best guess */
1113         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1114         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1115             if (diff <= -sync_threshold)
1116                 delay = 0;
1117             else if (diff >= sync_threshold)
1118                 delay = 2 * delay;
1119         }
1120     }
1121
1122     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1123             delay, -diff);
1124
1125     return delay;
1126 }
1127
1128 static void pictq_next_picture(VideoState *is) {
1129     /* update queue size and signal for next picture */
1130     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1131         is->pictq_rindex = 0;
1132
1133     SDL_LockMutex(is->pictq_mutex);
1134     is->pictq_size--;
1135     SDL_CondSignal(is->pictq_cond);
1136     SDL_UnlockMutex(is->pictq_mutex);
1137 }
1138
1139 static void pictq_prev_picture(VideoState *is) {
1140     VideoPicture *prevvp;
1141     /* update queue size and signal for the previous picture */
1142     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1143     if (prevvp->allocated && !prevvp->skip) {
1144         SDL_LockMutex(is->pictq_mutex);
1145         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1146             if (--is->pictq_rindex == -1)
1147                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1148             is->pictq_size++;
1149         }
1150         SDL_CondSignal(is->pictq_cond);
1151         SDL_UnlockMutex(is->pictq_mutex);
1152     }
1153 }
1154
1155 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1156     double time = av_gettime() / 1000000.0;
1157     /* update current video pts */
1158     is->video_current_pts = pts;
1159     is->video_current_pts_drift = is->video_current_pts - time;
1160     is->video_current_pos = pos;
1161     is->frame_last_pts = pts;
1162 }
1163
1164 /* called to display each frame */
1165 static void video_refresh(void *opaque)
1166 {
1167     VideoState *is = opaque;
1168     VideoPicture *vp;
1169     double time;
1170
1171     SubPicture *sp, *sp2;
1172
1173     if (is->video_st) {
1174         if (is->force_refresh)
1175             pictq_prev_picture(is);
1176 retry:
1177         if (is->pictq_size == 0) {
1178             SDL_LockMutex(is->pictq_mutex);
1179             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1180                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1181                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1182             }
1183             SDL_UnlockMutex(is->pictq_mutex);
1184             // nothing to do, no picture to display in the que
1185         } else {
1186             double last_duration, duration, delay;
1187             /* dequeue the picture */
1188             vp = &is->pictq[is->pictq_rindex];
1189
1190             if (vp->skip) {
1191                 pictq_next_picture(is);
1192                 goto retry;
1193             }
1194
1195             if (is->paused)
1196                 goto display;
1197
1198             /* compute nominal last_duration */
1199             last_duration = vp->pts - is->frame_last_pts;
1200             if (last_duration > 0 && last_duration < 10.0) {
1201                 /* if duration of the last frame was sane, update last_duration in video state */
1202                 is->frame_last_duration = last_duration;
1203             }
1204             delay = compute_target_delay(is->frame_last_duration, is);
1205
1206             time= av_gettime()/1000000.0;
1207             if (time < is->frame_timer + delay)
1208                 return;
1209
1210             if (delay > 0)
1211                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1212
1213             SDL_LockMutex(is->pictq_mutex);
1214             update_video_pts(is, vp->pts, vp->pos);
1215             SDL_UnlockMutex(is->pictq_mutex);
1216
1217             if (is->pictq_size > 1) {
1218                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1219                 duration = nextvp->pts - vp->pts;
1220                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1221                     is->frame_drops_late++;
1222                     pictq_next_picture(is);
1223                     goto retry;
1224                 }
1225             }
1226
1227             if (is->subtitle_st) {
1228                 if (is->subtitle_stream_changed) {
1229                     SDL_LockMutex(is->subpq_mutex);
1230
1231                     while (is->subpq_size) {
1232                         free_subpicture(&is->subpq[is->subpq_rindex]);
1233
1234                         /* update queue size and signal for next picture */
1235                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236                             is->subpq_rindex = 0;
1237
1238                         is->subpq_size--;
1239                     }
1240                     is->subtitle_stream_changed = 0;
1241
1242                     SDL_CondSignal(is->subpq_cond);
1243                     SDL_UnlockMutex(is->subpq_mutex);
1244                 } else {
1245                     if (is->subpq_size > 0) {
1246                         sp = &is->subpq[is->subpq_rindex];
1247
1248                         if (is->subpq_size > 1)
1249                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1250                         else
1251                             sp2 = NULL;
1252
1253                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1254                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1255                         {
1256                             free_subpicture(sp);
1257
1258                             /* update queue size and signal for next picture */
1259                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1260                                 is->subpq_rindex = 0;
1261
1262                             SDL_LockMutex(is->subpq_mutex);
1263                             is->subpq_size--;
1264                             SDL_CondSignal(is->subpq_cond);
1265                             SDL_UnlockMutex(is->subpq_mutex);
1266                         }
1267                     }
1268                 }
1269             }
1270
1271 display:
1272             /* display picture */
1273             if (!display_disable)
1274                 video_display(is);
1275
1276             pictq_next_picture(is);
1277         }
1278     } else if (is->audio_st) {
1279         /* draw the next audio frame */
1280
1281         /* if only audio stream, then display the audio bars (better
1282            than nothing, just to test the implementation */
1283
1284         /* display picture */
1285         if (!display_disable)
1286             video_display(is);
1287     }
1288     is->force_refresh = 0;
1289     if (show_status) {
1290         static int64_t last_time;
1291         int64_t cur_time;
1292         int aqsize, vqsize, sqsize;
1293         double av_diff;
1294
1295         cur_time = av_gettime();
1296         if (!last_time || (cur_time - last_time) >= 30000) {
1297             aqsize = 0;
1298             vqsize = 0;
1299             sqsize = 0;
1300             if (is->audio_st)
1301                 aqsize = is->audioq.size;
1302             if (is->video_st)
1303                 vqsize = is->videoq.size;
1304             if (is->subtitle_st)
1305                 sqsize = is->subtitleq.size;
1306             av_diff = 0;
1307             if (is->audio_st && is->video_st)
1308                 av_diff = get_audio_clock(is) - get_video_clock(is);
1309             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1310                    get_master_clock(is),
1311                    av_diff,
1312                    is->frame_drops_early + is->frame_drops_late,
1313                    aqsize / 1024,
1314                    vqsize / 1024,
1315                    sqsize,
1316                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1317                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1318             fflush(stdout);
1319             last_time = cur_time;
1320         }
1321     }
1322 }
1323
1324 /* allocate a picture (needs to do that in main thread to avoid
1325    potential locking problems */
1326 static void alloc_picture(VideoState *is)
1327 {
1328     VideoPicture *vp;
1329
1330     vp = &is->pictq[is->pictq_windex];
1331
1332     if (vp->bmp)
1333         SDL_FreeYUVOverlay(vp->bmp);
1334
1335 #if CONFIG_AVFILTER
1336     avfilter_unref_bufferp(&vp->picref);
1337 #endif
1338
1339     video_open(is, 0);
1340
1341     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1342                                    SDL_YV12_OVERLAY,
1343                                    screen);
1344     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1345         /* SDL allocates a buffer smaller than requested if the video
1346          * overlay hardware is unable to support the requested size. */
1347         fprintf(stderr, "Error: the video system does not support an image\n"
1348                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1349                         "to reduce the image size.\n", vp->width, vp->height );
1350         do_exit(is);
1351     }
1352
1353     SDL_LockMutex(is->pictq_mutex);
1354     vp->allocated = 1;
1355     SDL_CondSignal(is->pictq_cond);
1356     SDL_UnlockMutex(is->pictq_mutex);
1357 }
1358
1359 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1360 {
1361     VideoPicture *vp;
1362     double frame_delay, pts = pts1;
1363
1364     /* compute the exact PTS for the picture if it is omitted in the stream
1365      * pts1 is the dts of the pkt / pts of the frame */
1366     if (pts != 0) {
1367         /* update video clock with pts, if present */
1368         is->video_clock = pts;
1369     } else {
1370         pts = is->video_clock;
1371     }
1372     /* update video clock for next frame */
1373     frame_delay = av_q2d(is->video_st->codec->time_base);
1374     /* for MPEG2, the frame can be repeated, so we update the
1375        clock accordingly */
1376     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1377     is->video_clock += frame_delay;
1378
1379 #if defined(DEBUG_SYNC) && 0
1380     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1381            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1382 #endif
1383
1384     /* wait until we have space to put a new picture */
1385     SDL_LockMutex(is->pictq_mutex);
1386
1387     /* keep the last already displayed picture in the queue */
1388     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1389            !is->videoq.abort_request) {
1390         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1391     }
1392     SDL_UnlockMutex(is->pictq_mutex);
1393
1394     if (is->videoq.abort_request)
1395         return -1;
1396
1397     vp = &is->pictq[is->pictq_windex];
1398
1399 #if CONFIG_AVFILTER
1400     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1401 #else
1402     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1403 #endif
1404
1405     /* alloc or resize hardware picture buffer */
1406     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1407         vp->width  != src_frame->width ||
1408         vp->height != src_frame->height) {
1409         SDL_Event event;
1410
1411         vp->allocated  = 0;
1412         vp->reallocate = 0;
1413         vp->width = src_frame->width;
1414         vp->height = src_frame->height;
1415
1416         /* the allocation must be done in the main thread to avoid
1417            locking problems. */
1418         event.type = FF_ALLOC_EVENT;
1419         event.user.data1 = is;
1420         SDL_PushEvent(&event);
1421
1422         /* wait until the picture is allocated */
1423         SDL_LockMutex(is->pictq_mutex);
1424         while (!vp->allocated && !is->videoq.abort_request) {
1425             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1426         }
1427         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1428         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1429             while (!vp->allocated) {
1430                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1431             }
1432         }
1433         SDL_UnlockMutex(is->pictq_mutex);
1434
1435         if (is->videoq.abort_request)
1436             return -1;
1437     }
1438
1439     /* if the frame is not skipped, then display it */
1440     if (vp->bmp) {
1441         AVPicture pict = { { 0 } };
1442 #if CONFIG_AVFILTER
1443         avfilter_unref_bufferp(&vp->picref);
1444         vp->picref = src_frame->opaque;
1445 #endif
1446
1447         /* get a pointer on the bitmap */
1448         SDL_LockYUVOverlay (vp->bmp);
1449
1450         pict.data[0] = vp->bmp->pixels[0];
1451         pict.data[1] = vp->bmp->pixels[2];
1452         pict.data[2] = vp->bmp->pixels[1];
1453
1454         pict.linesize[0] = vp->bmp->pitches[0];
1455         pict.linesize[1] = vp->bmp->pitches[2];
1456         pict.linesize[2] = vp->bmp->pitches[1];
1457
1458 #if CONFIG_AVFILTER
1459         // FIXME use direct rendering
1460         av_picture_copy(&pict, (AVPicture *)src_frame,
1461                         src_frame->format, vp->width, vp->height);
1462 #else
1463         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1464         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1465             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1466             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1467         if (is->img_convert_ctx == NULL) {
1468             fprintf(stderr, "Cannot initialize the conversion context\n");
1469             exit(1);
1470         }
1471         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1472                   0, vp->height, pict.data, pict.linesize);
1473 #endif
1474         /* update the bitmap content */
1475         SDL_UnlockYUVOverlay(vp->bmp);
1476
1477         vp->pts = pts;
1478         vp->pos = pos;
1479         vp->skip = 0;
1480
1481         /* now we can update the picture count */
1482         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1483             is->pictq_windex = 0;
1484         SDL_LockMutex(is->pictq_mutex);
1485         is->pictq_size++;
1486         SDL_UnlockMutex(is->pictq_mutex);
1487     }
1488     return 0;
1489 }
1490
1491 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1492 {
1493     int got_picture, i;
1494
1495     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1496         return -1;
1497
1498     if (pkt->data == flush_pkt.data) {
1499         avcodec_flush_buffers(is->video_st->codec);
1500
1501         SDL_LockMutex(is->pictq_mutex);
1502         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1503         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1504             is->pictq[i].skip = 1;
1505         }
1506         while (is->pictq_size && !is->videoq.abort_request) {
1507             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1508         }
1509         is->video_current_pos = -1;
1510         is->frame_last_pts = AV_NOPTS_VALUE;
1511         is->frame_last_duration = 0;
1512         is->frame_timer = (double)av_gettime() / 1000000.0;
1513         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1514         SDL_UnlockMutex(is->pictq_mutex);
1515
1516         return 0;
1517     }
1518
1519     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1520         return 0;
1521
1522     if (got_picture) {
1523         int ret = 1;
1524
1525         if (decoder_reorder_pts == -1) {
1526             *pts = av_frame_get_best_effort_timestamp(frame);
1527         } else if (decoder_reorder_pts) {
1528             *pts = frame->pkt_pts;
1529         } else {
1530             *pts = frame->pkt_dts;
1531         }
1532
1533         if (*pts == AV_NOPTS_VALUE) {
1534             *pts = 0;
1535         }
1536
1537         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1538              (framedrop>0 || (framedrop && is->audio_st))) {
1539             SDL_LockMutex(is->pictq_mutex);
1540             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1541                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1542                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1543                 double ptsdiff = dpts - is->frame_last_pts;
1544                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1545                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1546                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1547                     is->frame_last_dropped_pos = pkt->pos;
1548                     is->frame_last_dropped_pts = dpts;
1549                     is->frame_drops_early++;
1550                     ret = 0;
1551                 }
1552             }
1553             SDL_UnlockMutex(is->pictq_mutex);
1554         }
1555
1556         return ret;
1557     }
1558     return 0;
1559 }
1560
1561 #if CONFIG_AVFILTER
1562 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1563                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1564 {
1565     int ret;
1566     AVFilterInOut *outputs = NULL, *inputs = NULL;
1567
1568     if (filtergraph) {
1569         outputs = avfilter_inout_alloc();
1570         inputs  = avfilter_inout_alloc();
1571         if (!outputs || !inputs) {
1572             ret = AVERROR(ENOMEM);
1573             goto fail;
1574         }
1575
1576         outputs->name       = av_strdup("in");
1577         outputs->filter_ctx = source_ctx;
1578         outputs->pad_idx    = 0;
1579         outputs->next       = NULL;
1580
1581         inputs->name        = av_strdup("out");
1582         inputs->filter_ctx  = sink_ctx;
1583         inputs->pad_idx     = 0;
1584         inputs->next        = NULL;
1585
1586         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1587             goto fail;
1588     } else {
1589         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1590             goto fail;
1591     }
1592
1593     return avfilter_graph_config(graph, NULL);
1594 fail:
1595     avfilter_inout_free(&outputs);
1596     avfilter_inout_free(&inputs);
1597     return ret;
1598 }
1599
1600 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1601 {
1602     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1603     char sws_flags_str[128];
1604     char buffersrc_args[256];
1605     int ret;
1606     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1607     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format, *filt_crop;
1608     AVCodecContext *codec = is->video_st->codec;
1609
1610     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1611     graph->scale_sws_opts = av_strdup(sws_flags_str);
1612
1613     snprintf(buffersrc_args, sizeof(buffersrc_args),
1614              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1615              codec->width, codec->height, codec->pix_fmt,
1616              is->video_st->time_base.num, is->video_st->time_base.den,
1617              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1618
1619     if ((ret = avfilter_graph_create_filter(&filt_src,
1620                                             avfilter_get_by_name("buffer"),
1621                                             "ffplay_buffer", buffersrc_args, NULL,
1622                                             graph)) < 0)
1623         return ret;
1624
1625     buffersink_params->pixel_fmts = pix_fmts;
1626     ret = avfilter_graph_create_filter(&filt_out,
1627                                        avfilter_get_by_name("ffbuffersink"),
1628                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1629     av_freep(&buffersink_params);
1630     if (ret < 0)
1631         return ret;
1632
1633     /* SDL YUV code is not handling odd width/height for some driver
1634      * combinations, therefore we crop the picture to an even width/height. */
1635     if ((ret = avfilter_graph_create_filter(&filt_crop,
1636                                             avfilter_get_by_name("crop"),
1637                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1638         return ret;
1639     if ((ret = avfilter_graph_create_filter(&filt_format,
1640                                             avfilter_get_by_name("format"),
1641                                             "format", "yuv420p", NULL, graph)) < 0)
1642         return ret;
1643     if ((ret = avfilter_link(filt_crop, 0, filt_format, 0)) < 0)
1644         return ret;
1645     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1646         return ret;
1647
1648     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1649         return ret;
1650
1651     is->in_video_filter  = filt_src;
1652     is->out_video_filter = filt_out;
1653
1654     return ret;
1655 }
1656
1657 #endif  /* CONFIG_AVFILTER */
1658
1659 static int video_thread(void *arg)
1660 {
1661     AVPacket pkt = { 0 };
1662     VideoState *is = arg;
1663     AVFrame *frame = avcodec_alloc_frame();
1664     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1665     double pts;
1666     int ret;
1667
1668 #if CONFIG_AVFILTER
1669     AVCodecContext *codec = is->video_st->codec;
1670     AVFilterGraph *graph = avfilter_graph_alloc();
1671     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1672     int last_w = 0;
1673     int last_h = 0;
1674     enum AVPixelFormat last_format = -2;
1675
1676     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1677         is->use_dr1 = 1;
1678         codec->get_buffer     = codec_get_buffer;
1679         codec->release_buffer = codec_release_buffer;
1680         codec->opaque         = &is->buffer_pool;
1681     }
1682 #endif
1683
1684     for (;;) {
1685 #if CONFIG_AVFILTER
1686         AVFilterBufferRef *picref;
1687         AVRational tb;
1688 #endif
1689         while (is->paused && !is->videoq.abort_request)
1690             SDL_Delay(10);
1691
1692         avcodec_get_frame_defaults(frame);
1693         av_free_packet(&pkt);
1694
1695         ret = get_video_frame(is, frame, &pts_int, &pkt);
1696         if (ret < 0)
1697             goto the_end;
1698
1699         if (!ret)
1700             continue;
1701
1702 #if CONFIG_AVFILTER
1703         if (   last_w != is->video_st->codec->width
1704             || last_h != is->video_st->codec->height
1705             || last_format != is->video_st->codec->pix_fmt) {
1706             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1707                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1708             avfilter_graph_free(&graph);
1709             graph = avfilter_graph_alloc();
1710             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1711                 SDL_Event event;
1712                 event.type = FF_QUIT_EVENT;
1713                 event.user.data1 = is;
1714                 SDL_PushEvent(&event);
1715                 av_free_packet(&pkt);
1716                 goto the_end;
1717             }
1718             filt_in  = is->in_video_filter;
1719             filt_out = is->out_video_filter;
1720             last_w = is->video_st->codec->width;
1721             last_h = is->video_st->codec->height;
1722             last_format = is->video_st->codec->pix_fmt;
1723         }
1724
1725         frame->pts = pts_int;
1726         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1727         if (is->use_dr1 && frame->opaque) {
1728             FrameBuffer      *buf = frame->opaque;
1729             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1730                                         frame->data, frame->linesize,
1731                                         AV_PERM_READ | AV_PERM_PRESERVE,
1732                                         frame->width, frame->height,
1733                                         frame->format);
1734
1735             avfilter_copy_frame_props(fb, frame);
1736             fb->buf->priv           = buf;
1737             fb->buf->free           = filter_release_buffer;
1738
1739             buf->refcount++;
1740             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1741
1742         } else
1743             av_buffersrc_write_frame(filt_in, frame);
1744
1745         av_free_packet(&pkt);
1746
1747         while (ret >= 0) {
1748             is->frame_last_returned_time = av_gettime() / 1000000.0;
1749
1750             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1751             if (ret < 0) {
1752                 ret = 0;
1753                 break;
1754             }
1755
1756             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1757             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1758                 is->frame_last_filter_delay = 0;
1759
1760             avfilter_copy_buf_props(frame, picref);
1761
1762             pts_int = picref->pts;
1763             tb      = filt_out->inputs[0]->time_base;
1764             pos     = picref->pos;
1765             frame->opaque = picref;
1766
1767             if (av_cmp_q(tb, is->video_st->time_base)) {
1768                 av_unused int64_t pts1 = pts_int;
1769                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1770                 av_dlog(NULL, "video_thread(): "
1771                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1772                         tb.num, tb.den, pts1,
1773                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1774             }
1775             pts = pts_int * av_q2d(is->video_st->time_base);
1776             ret = queue_picture(is, frame, pts, pos);
1777         }
1778 #else
1779         pts = pts_int * av_q2d(is->video_st->time_base);
1780         ret = queue_picture(is, frame, pts, pkt.pos);
1781 #endif
1782
1783         if (ret < 0)
1784             goto the_end;
1785
1786         if (is->step)
1787             stream_toggle_pause(is);
1788     }
1789  the_end:
1790     avcodec_flush_buffers(is->video_st->codec);
1791 #if CONFIG_AVFILTER
1792     avfilter_graph_free(&graph);
1793 #endif
1794     av_free_packet(&pkt);
1795     avcodec_free_frame(&frame);
1796     return 0;
1797 }
1798
1799 static int subtitle_thread(void *arg)
1800 {
1801     VideoState *is = arg;
1802     SubPicture *sp;
1803     AVPacket pkt1, *pkt = &pkt1;
1804     int got_subtitle;
1805     double pts;
1806     int i, j;
1807     int r, g, b, y, u, v, a;
1808
1809     for (;;) {
1810         while (is->paused && !is->subtitleq.abort_request) {
1811             SDL_Delay(10);
1812         }
1813         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1814             break;
1815
1816         if (pkt->data == flush_pkt.data) {
1817             avcodec_flush_buffers(is->subtitle_st->codec);
1818             continue;
1819         }
1820         SDL_LockMutex(is->subpq_mutex);
1821         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1822                !is->subtitleq.abort_request) {
1823             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1824         }
1825         SDL_UnlockMutex(is->subpq_mutex);
1826
1827         if (is->subtitleq.abort_request)
1828             return 0;
1829
1830         sp = &is->subpq[is->subpq_windex];
1831
1832        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1833            this packet, if any */
1834         pts = 0;
1835         if (pkt->pts != AV_NOPTS_VALUE)
1836             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1837
1838         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1839                                  &got_subtitle, pkt);
1840         if (got_subtitle && sp->sub.format == 0) {
1841             if (sp->sub.pts != AV_NOPTS_VALUE)
1842                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1843             sp->pts = pts;
1844
1845             for (i = 0; i < sp->sub.num_rects; i++)
1846             {
1847                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1848                 {
1849                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1850                     y = RGB_TO_Y_CCIR(r, g, b);
1851                     u = RGB_TO_U_CCIR(r, g, b, 0);
1852                     v = RGB_TO_V_CCIR(r, g, b, 0);
1853                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1854                 }
1855             }
1856
1857             /* now we can update the picture count */
1858             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1859                 is->subpq_windex = 0;
1860             SDL_LockMutex(is->subpq_mutex);
1861             is->subpq_size++;
1862             SDL_UnlockMutex(is->subpq_mutex);
1863         }
1864         av_free_packet(pkt);
1865     }
1866     return 0;
1867 }
1868
1869 /* copy samples for viewing in editor window */
1870 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1871 {
1872     int size, len;
1873
1874     size = samples_size / sizeof(short);
1875     while (size > 0) {
1876         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1877         if (len > size)
1878             len = size;
1879         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1880         samples += len;
1881         is->sample_array_index += len;
1882         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1883             is->sample_array_index = 0;
1884         size -= len;
1885     }
1886 }
1887
1888 /* return the wanted number of samples to get better sync if sync_type is video
1889  * or external master clock */
1890 static int synchronize_audio(VideoState *is, int nb_samples)
1891 {
1892     int wanted_nb_samples = nb_samples;
1893
1894     /* if not master, then we try to remove or add samples to correct the clock */
1895     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1896          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1897         double diff, avg_diff;
1898         int min_nb_samples, max_nb_samples;
1899
1900         diff = get_audio_clock(is) - get_master_clock(is);
1901
1902         if (diff < AV_NOSYNC_THRESHOLD) {
1903             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1904             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1905                 /* not enough measures to have a correct estimate */
1906                 is->audio_diff_avg_count++;
1907             } else {
1908                 /* estimate the A-V difference */
1909                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1910
1911                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1912                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1913                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1914                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1915                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1916                 }
1917                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1918                         diff, avg_diff, wanted_nb_samples - nb_samples,
1919                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1920             }
1921         } else {
1922             /* too big difference : may be initial PTS errors, so
1923                reset A-V filter */
1924             is->audio_diff_avg_count = 0;
1925             is->audio_diff_cum       = 0;
1926         }
1927     }
1928
1929     return wanted_nb_samples;
1930 }
1931
1932 /* decode one audio frame and returns its uncompressed size */
1933 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1934 {
1935     AVPacket *pkt_temp = &is->audio_pkt_temp;
1936     AVPacket *pkt = &is->audio_pkt;
1937     AVCodecContext *dec = is->audio_st->codec;
1938     int len1, len2, data_size, resampled_data_size;
1939     int64_t dec_channel_layout;
1940     int got_frame;
1941     double pts;
1942     int new_packet = 0;
1943     int flush_complete = 0;
1944     int wanted_nb_samples;
1945
1946     for (;;) {
1947         /* NOTE: the audio packet can contain several frames */
1948         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1949             if (!is->frame) {
1950                 if (!(is->frame = avcodec_alloc_frame()))
1951                     return AVERROR(ENOMEM);
1952             } else
1953                 avcodec_get_frame_defaults(is->frame);
1954
1955             if (is->paused)
1956                 return -1;
1957
1958             if (flush_complete)
1959                 break;
1960             new_packet = 0;
1961             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1962             if (len1 < 0) {
1963                 /* if error, we skip the frame */
1964                 pkt_temp->size = 0;
1965                 break;
1966             }
1967
1968             pkt_temp->data += len1;
1969             pkt_temp->size -= len1;
1970
1971             if (!got_frame) {
1972                 /* stop sending empty packets if the decoder is finished */
1973                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1974                     flush_complete = 1;
1975                 continue;
1976             }
1977             data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
1978                                                    is->frame->nb_samples,
1979                                                    is->frame->format, 1);
1980
1981             dec_channel_layout =
1982                 (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
1983                 is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
1984             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
1985
1986             if (is->frame->format        != is->audio_src.fmt            ||
1987                 dec_channel_layout       != is->audio_src.channel_layout ||
1988                 is->frame->sample_rate   != is->audio_src.freq           ||
1989                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
1990                 swr_free(&is->swr_ctx);
1991                 is->swr_ctx = swr_alloc_set_opts(NULL,
1992                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
1993                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
1994                                                  0, NULL);
1995                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
1996                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
1997                         is->frame->sample_rate,   av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
1998                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
1999                     break;
2000                 }
2001                 is->audio_src.channel_layout = dec_channel_layout;
2002                 is->audio_src.channels = is->frame->channels;
2003                 is->audio_src.freq = is->frame->sample_rate;
2004                 is->audio_src.fmt = is->frame->format;
2005             }
2006
2007             if (is->swr_ctx) {
2008                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2009                 uint8_t *out[] = {is->audio_buf2};
2010                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2011                 if (wanted_nb_samples != is->frame->nb_samples) {
2012                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2013                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2014                         fprintf(stderr, "swr_set_compensation() failed\n");
2015                         break;
2016                     }
2017                 }
2018                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2019                 if (len2 < 0) {
2020                     fprintf(stderr, "swr_convert() failed\n");
2021                     break;
2022                 }
2023                 if (len2 == out_count) {
2024                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2025                     swr_init(is->swr_ctx);
2026                 }
2027                 is->audio_buf = is->audio_buf2;
2028                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2029             } else {
2030                 is->audio_buf = is->frame->data[0];
2031                 resampled_data_size = data_size;
2032             }
2033
2034             /* if no pts, then compute it */
2035             pts = is->audio_clock;
2036             *pts_ptr = pts;
2037             is->audio_clock += (double)data_size /
2038                 (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
2039 #ifdef DEBUG
2040             {
2041                 static double last_clock;
2042                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2043                        is->audio_clock - last_clock,
2044                        is->audio_clock, pts);
2045                 last_clock = is->audio_clock;
2046             }
2047 #endif
2048             return resampled_data_size;
2049         }
2050
2051         /* free the current packet */
2052         if (pkt->data)
2053             av_free_packet(pkt);
2054         memset(pkt_temp, 0, sizeof(*pkt_temp));
2055
2056         if (is->paused || is->audioq.abort_request) {
2057             return -1;
2058         }
2059
2060         if (is->audioq.nb_packets == 0)
2061             SDL_CondSignal(is->continue_read_thread);
2062
2063         /* read next packet */
2064         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2065             return -1;
2066
2067         if (pkt->data == flush_pkt.data) {
2068             avcodec_flush_buffers(dec);
2069             flush_complete = 0;
2070         }
2071
2072         *pkt_temp = *pkt;
2073
2074         /* if update the audio clock with the pts */
2075         if (pkt->pts != AV_NOPTS_VALUE) {
2076             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2077         }
2078     }
2079 }
2080
2081 /* prepare a new audio buffer */
2082 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2083 {
2084     VideoState *is = opaque;
2085     int audio_size, len1;
2086     int bytes_per_sec;
2087     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2088     double pts;
2089
2090     audio_callback_time = av_gettime();
2091
2092     while (len > 0) {
2093         if (is->audio_buf_index >= is->audio_buf_size) {
2094            audio_size = audio_decode_frame(is, &pts);
2095            if (audio_size < 0) {
2096                 /* if error, just output silence */
2097                is->audio_buf      = is->silence_buf;
2098                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2099            } else {
2100                if (is->show_mode != SHOW_MODE_VIDEO)
2101                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2102                is->audio_buf_size = audio_size;
2103            }
2104            is->audio_buf_index = 0;
2105         }
2106         len1 = is->audio_buf_size - is->audio_buf_index;
2107         if (len1 > len)
2108             len1 = len;
2109         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2110         len -= len1;
2111         stream += len1;
2112         is->audio_buf_index += len1;
2113     }
2114     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2115     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2116     /* Let's assume the audio driver that is used by SDL has two periods. */
2117     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2118     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2119 }
2120
2121 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2122 {
2123     SDL_AudioSpec wanted_spec, spec;
2124     const char *env;
2125     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2126
2127     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2128     if (env) {
2129         wanted_nb_channels = atoi(env);
2130         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2131     }
2132     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2133         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2134         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2135     }
2136     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2137     wanted_spec.freq = wanted_sample_rate;
2138     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2139         fprintf(stderr, "Invalid sample rate or channel count!\n");
2140         return -1;
2141     }
2142     wanted_spec.format = AUDIO_S16SYS;
2143     wanted_spec.silence = 0;
2144     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2145     wanted_spec.callback = sdl_audio_callback;
2146     wanted_spec.userdata = opaque;
2147     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2148         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2149         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2150         if (!wanted_spec.channels) {
2151             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2152             return -1;
2153         }
2154         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2155     }
2156     if (spec.format != AUDIO_S16SYS) {
2157         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2158         return -1;
2159     }
2160     if (spec.channels != wanted_spec.channels) {
2161         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2162         if (!wanted_channel_layout) {
2163             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2164             return -1;
2165         }
2166     }
2167
2168     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2169     audio_hw_params->freq = spec.freq;
2170     audio_hw_params->channel_layout = wanted_channel_layout;
2171     audio_hw_params->channels =  spec.channels;
2172     return spec.size;
2173 }
2174
2175 /* open a given stream. Return 0 if OK */
2176 static int stream_component_open(VideoState *is, int stream_index)
2177 {
2178     AVFormatContext *ic = is->ic;
2179     AVCodecContext *avctx;
2180     AVCodec *codec;
2181     AVDictionary *opts;
2182     AVDictionaryEntry *t = NULL;
2183
2184     if (stream_index < 0 || stream_index >= ic->nb_streams)
2185         return -1;
2186     avctx = ic->streams[stream_index]->codec;
2187
2188     codec = avcodec_find_decoder(avctx->codec_id);
2189     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2190
2191     switch(avctx->codec_type){
2192         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2193         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2194         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2195     }
2196     if (!codec)
2197         return -1;
2198
2199     avctx->workaround_bugs   = workaround_bugs;
2200     avctx->lowres            = lowres;
2201     if(avctx->lowres > codec->max_lowres){
2202         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2203                 codec->max_lowres);
2204         avctx->lowres= codec->max_lowres;
2205     }
2206     avctx->idct_algo         = idct;
2207     avctx->skip_frame        = skip_frame;
2208     avctx->skip_idct         = skip_idct;
2209     avctx->skip_loop_filter  = skip_loop_filter;
2210     avctx->error_concealment = error_concealment;
2211
2212     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2213     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2214     if(codec->capabilities & CODEC_CAP_DR1)
2215         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2216
2217     if (!av_dict_get(opts, "threads", NULL, 0))
2218         av_dict_set(&opts, "threads", "auto", 0);
2219     if (!codec ||
2220         avcodec_open2(avctx, codec, &opts) < 0)
2221         return -1;
2222     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2223         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2224         return AVERROR_OPTION_NOT_FOUND;
2225     }
2226
2227     /* prepare audio output */
2228     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2229         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2230         if (audio_hw_buf_size < 0)
2231             return -1;
2232         is->audio_hw_buf_size = audio_hw_buf_size;
2233         is->audio_tgt = is->audio_src;
2234     }
2235
2236     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2237     switch (avctx->codec_type) {
2238     case AVMEDIA_TYPE_AUDIO:
2239         is->audio_stream = stream_index;
2240         is->audio_st = ic->streams[stream_index];
2241         is->audio_buf_size  = 0;
2242         is->audio_buf_index = 0;
2243
2244         /* init averaging filter */
2245         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2246         is->audio_diff_avg_count = 0;
2247         /* since we do not have a precise anough audio fifo fullness,
2248            we correct audio sync only if larger than this threshold */
2249         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2250
2251         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2252         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2253         packet_queue_start(&is->audioq);
2254         SDL_PauseAudio(0);
2255         break;
2256     case AVMEDIA_TYPE_VIDEO:
2257         is->video_stream = stream_index;
2258         is->video_st = ic->streams[stream_index];
2259
2260         packet_queue_start(&is->videoq);
2261         is->video_tid = SDL_CreateThread(video_thread, is);
2262         break;
2263     case AVMEDIA_TYPE_SUBTITLE:
2264         is->subtitle_stream = stream_index;
2265         is->subtitle_st = ic->streams[stream_index];
2266         packet_queue_start(&is->subtitleq);
2267
2268         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2269         break;
2270     default:
2271         break;
2272     }
2273     return 0;
2274 }
2275
2276 static void stream_component_close(VideoState *is, int stream_index)
2277 {
2278     AVFormatContext *ic = is->ic;
2279     AVCodecContext *avctx;
2280
2281     if (stream_index < 0 || stream_index >= ic->nb_streams)
2282         return;
2283     avctx = ic->streams[stream_index]->codec;
2284
2285     switch (avctx->codec_type) {
2286     case AVMEDIA_TYPE_AUDIO:
2287         packet_queue_abort(&is->audioq);
2288
2289         SDL_CloseAudio();
2290
2291         packet_queue_flush(&is->audioq);
2292         av_free_packet(&is->audio_pkt);
2293         swr_free(&is->swr_ctx);
2294         av_freep(&is->audio_buf1);
2295         is->audio_buf = NULL;
2296         avcodec_free_frame(&is->frame);
2297
2298         if (is->rdft) {
2299             av_rdft_end(is->rdft);
2300             av_freep(&is->rdft_data);
2301             is->rdft = NULL;
2302             is->rdft_bits = 0;
2303         }
2304         break;
2305     case AVMEDIA_TYPE_VIDEO:
2306         packet_queue_abort(&is->videoq);
2307
2308         /* note: we also signal this mutex to make sure we deblock the
2309            video thread in all cases */
2310         SDL_LockMutex(is->pictq_mutex);
2311         SDL_CondSignal(is->pictq_cond);
2312         SDL_UnlockMutex(is->pictq_mutex);
2313
2314         SDL_WaitThread(is->video_tid, NULL);
2315
2316         packet_queue_flush(&is->videoq);
2317         break;
2318     case AVMEDIA_TYPE_SUBTITLE:
2319         packet_queue_abort(&is->subtitleq);
2320
2321         /* note: we also signal this mutex to make sure we deblock the
2322            video thread in all cases */
2323         SDL_LockMutex(is->subpq_mutex);
2324         is->subtitle_stream_changed = 1;
2325
2326         SDL_CondSignal(is->subpq_cond);
2327         SDL_UnlockMutex(is->subpq_mutex);
2328
2329         SDL_WaitThread(is->subtitle_tid, NULL);
2330
2331         packet_queue_flush(&is->subtitleq);
2332         break;
2333     default:
2334         break;
2335     }
2336
2337     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2338     avcodec_close(avctx);
2339 #if CONFIG_AVFILTER
2340     free_buffer_pool(&is->buffer_pool);
2341 #endif
2342     switch (avctx->codec_type) {
2343     case AVMEDIA_TYPE_AUDIO:
2344         is->audio_st = NULL;
2345         is->audio_stream = -1;
2346         break;
2347     case AVMEDIA_TYPE_VIDEO:
2348         is->video_st = NULL;
2349         is->video_stream = -1;
2350         break;
2351     case AVMEDIA_TYPE_SUBTITLE:
2352         is->subtitle_st = NULL;
2353         is->subtitle_stream = -1;
2354         break;
2355     default:
2356         break;
2357     }
2358 }
2359
2360 static int decode_interrupt_cb(void *ctx)
2361 {
2362     VideoState *is = ctx;
2363     return is->abort_request;
2364 }
2365
2366 static int is_realtime(AVFormatContext *s)
2367 {
2368     if(   !strcmp(s->iformat->name, "rtp")
2369        || !strcmp(s->iformat->name, "rtsp")
2370        || !strcmp(s->iformat->name, "sdp")
2371     )
2372         return 1;
2373
2374     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2375                  || !strncmp(s->filename, "udp:", 4)
2376                 )
2377     )
2378         return 1;
2379     return 0;
2380 }
2381
2382 /* this thread gets the stream from the disk or the network */
2383 static int read_thread(void *arg)
2384 {
2385     VideoState *is = arg;
2386     AVFormatContext *ic = NULL;
2387     int err, i, ret;
2388     int st_index[AVMEDIA_TYPE_NB];
2389     AVPacket pkt1, *pkt = &pkt1;
2390     int eof = 0;
2391     int pkt_in_play_range = 0;
2392     AVDictionaryEntry *t;
2393     AVDictionary **opts;
2394     int orig_nb_streams;
2395     SDL_mutex *wait_mutex = SDL_CreateMutex();
2396
2397     memset(st_index, -1, sizeof(st_index));
2398     is->last_video_stream = is->video_stream = -1;
2399     is->last_audio_stream = is->audio_stream = -1;
2400     is->last_subtitle_stream = is->subtitle_stream = -1;
2401
2402     ic = avformat_alloc_context();
2403     ic->interrupt_callback.callback = decode_interrupt_cb;
2404     ic->interrupt_callback.opaque = is;
2405     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2406     if (err < 0) {
2407         print_error(is->filename, err);
2408         ret = -1;
2409         goto fail;
2410     }
2411     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2412         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2413         ret = AVERROR_OPTION_NOT_FOUND;
2414         goto fail;
2415     }
2416     is->ic = ic;
2417
2418     if (genpts)
2419         ic->flags |= AVFMT_FLAG_GENPTS;
2420
2421     opts = setup_find_stream_info_opts(ic, codec_opts);
2422     orig_nb_streams = ic->nb_streams;
2423
2424     err = avformat_find_stream_info(ic, opts);
2425     if (err < 0) {
2426         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2427         ret = -1;
2428         goto fail;
2429     }
2430     for (i = 0; i < orig_nb_streams; i++)
2431         av_dict_free(&opts[i]);
2432     av_freep(&opts);
2433
2434     if (ic->pb)
2435         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2436
2437     if (seek_by_bytes < 0)
2438         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2439
2440     /* if seeking requested, we execute it */
2441     if (start_time != AV_NOPTS_VALUE) {
2442         int64_t timestamp;
2443
2444         timestamp = start_time;
2445         /* add the stream start time */
2446         if (ic->start_time != AV_NOPTS_VALUE)
2447             timestamp += ic->start_time;
2448         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2449         if (ret < 0) {
2450             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2451                     is->filename, (double)timestamp / AV_TIME_BASE);
2452         }
2453     }
2454
2455     for (i = 0; i < ic->nb_streams; i++)
2456         ic->streams[i]->discard = AVDISCARD_ALL;
2457     if (!video_disable)
2458         st_index[AVMEDIA_TYPE_VIDEO] =
2459             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2460                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2461     if (!audio_disable)
2462         st_index[AVMEDIA_TYPE_AUDIO] =
2463             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2464                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2465                                 st_index[AVMEDIA_TYPE_VIDEO],
2466                                 NULL, 0);
2467     if (!video_disable)
2468         st_index[AVMEDIA_TYPE_SUBTITLE] =
2469             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2470                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2471                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2472                                  st_index[AVMEDIA_TYPE_AUDIO] :
2473                                  st_index[AVMEDIA_TYPE_VIDEO]),
2474                                 NULL, 0);
2475     if (show_status) {
2476         av_dump_format(ic, 0, is->filename, 0);
2477     }
2478
2479     is->show_mode = show_mode;
2480
2481     /* open the streams */
2482     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2483         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2484     }
2485
2486     ret = -1;
2487     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2488         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2489     }
2490     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2491     if (is->show_mode == SHOW_MODE_NONE)
2492         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2493
2494     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2495         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2496     }
2497
2498     if (is->video_stream < 0 && is->audio_stream < 0) {
2499         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2500         ret = -1;
2501         goto fail;
2502     }
2503
2504     if (infinite_buffer < 0 && is_realtime(ic))
2505         infinite_buffer = 1;
2506
2507     for (;;) {
2508         if (is->abort_request)
2509             break;
2510         if (is->paused != is->last_paused) {
2511             is->last_paused = is->paused;
2512             if (is->paused)
2513                 is->read_pause_return = av_read_pause(ic);
2514             else
2515                 av_read_play(ic);
2516         }
2517 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2518         if (is->paused &&
2519                 (!strcmp(ic->iformat->name, "rtsp") ||
2520                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2521             /* wait 10 ms to avoid trying to get another packet */
2522             /* XXX: horrible */
2523             SDL_Delay(10);
2524             continue;
2525         }
2526 #endif
2527         if (is->seek_req) {
2528             int64_t seek_target = is->seek_pos;
2529             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2530             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2531 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2532 //      of the seek_pos/seek_rel variables
2533
2534             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2535             if (ret < 0) {
2536                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2537             } else {
2538                 if (is->audio_stream >= 0) {
2539                     packet_queue_flush(&is->audioq);
2540                     packet_queue_put(&is->audioq, &flush_pkt);
2541                 }
2542                 if (is->subtitle_stream >= 0) {
2543                     packet_queue_flush(&is->subtitleq);
2544                     packet_queue_put(&is->subtitleq, &flush_pkt);
2545                 }
2546                 if (is->video_stream >= 0) {
2547                     packet_queue_flush(&is->videoq);
2548                     packet_queue_put(&is->videoq, &flush_pkt);
2549                 }
2550             }
2551             is->seek_req = 0;
2552             eof = 0;
2553         }
2554         if (is->que_attachments_req) {
2555             avformat_queue_attached_pictures(ic);
2556             is->que_attachments_req = 0;
2557         }
2558
2559         /* if the queue are full, no need to read more */
2560         if (infinite_buffer<1 &&
2561               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2562             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2563                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2564                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2565             /* wait 10 ms */
2566             SDL_LockMutex(wait_mutex);
2567             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2568             SDL_UnlockMutex(wait_mutex);
2569             continue;
2570         }
2571         if (eof) {
2572             if (is->video_stream >= 0) {
2573                 av_init_packet(pkt);
2574                 pkt->data = NULL;
2575                 pkt->size = 0;
2576                 pkt->stream_index = is->video_stream;
2577                 packet_queue_put(&is->videoq, pkt);
2578             }
2579             if (is->audio_stream >= 0 &&
2580                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2581                 av_init_packet(pkt);
2582                 pkt->data = NULL;
2583                 pkt->size = 0;
2584                 pkt->stream_index = is->audio_stream;
2585                 packet_queue_put(&is->audioq, pkt);
2586             }
2587             SDL_Delay(10);
2588             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2589                 if (loop != 1 && (!loop || --loop)) {
2590                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2591                 } else if (autoexit) {
2592                     ret = AVERROR_EOF;
2593                     goto fail;
2594                 }
2595             }
2596             eof=0;
2597             continue;
2598         }
2599         ret = av_read_frame(ic, pkt);
2600         if (ret < 0) {
2601             if (ret == AVERROR_EOF || url_feof(ic->pb))
2602                 eof = 1;
2603             if (ic->pb && ic->pb->error)
2604                 break;
2605             SDL_LockMutex(wait_mutex);
2606             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2607             SDL_UnlockMutex(wait_mutex);
2608             continue;
2609         }
2610         /* check if packet is in play range specified by user, then queue, otherwise discard */
2611         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2612                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2613                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2614                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2615                 <= ((double)duration / 1000000);
2616         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2617             packet_queue_put(&is->audioq, pkt);
2618         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2619             packet_queue_put(&is->videoq, pkt);
2620         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2621             packet_queue_put(&is->subtitleq, pkt);
2622         } else {
2623             av_free_packet(pkt);
2624         }
2625     }
2626     /* wait until the end */
2627     while (!is->abort_request) {
2628         SDL_Delay(100);
2629     }
2630
2631     ret = 0;
2632  fail:
2633     /* close each stream */
2634     if (is->audio_stream >= 0)
2635         stream_component_close(is, is->audio_stream);
2636     if (is->video_stream >= 0)
2637         stream_component_close(is, is->video_stream);
2638     if (is->subtitle_stream >= 0)
2639         stream_component_close(is, is->subtitle_stream);
2640     if (is->ic) {
2641         avformat_close_input(&is->ic);
2642     }
2643
2644     if (ret != 0) {
2645         SDL_Event event;
2646
2647         event.type = FF_QUIT_EVENT;
2648         event.user.data1 = is;
2649         SDL_PushEvent(&event);
2650     }
2651     SDL_DestroyMutex(wait_mutex);
2652     return 0;
2653 }
2654
2655 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2656 {
2657     VideoState *is;
2658
2659     is = av_mallocz(sizeof(VideoState));
2660     if (!is)
2661         return NULL;
2662     av_strlcpy(is->filename, filename, sizeof(is->filename));
2663     is->iformat = iformat;
2664     is->ytop    = 0;
2665     is->xleft   = 0;
2666
2667     /* start video display */
2668     is->pictq_mutex = SDL_CreateMutex();
2669     is->pictq_cond  = SDL_CreateCond();
2670
2671     is->subpq_mutex = SDL_CreateMutex();
2672     is->subpq_cond  = SDL_CreateCond();
2673
2674     packet_queue_init(&is->videoq);
2675     packet_queue_init(&is->audioq);
2676     packet_queue_init(&is->subtitleq);
2677
2678     is->continue_read_thread = SDL_CreateCond();
2679
2680     is->av_sync_type = av_sync_type;
2681     is->read_tid     = SDL_CreateThread(read_thread, is);
2682     if (!is->read_tid) {
2683         av_free(is);
2684         return NULL;
2685     }
2686     return is;
2687 }
2688
2689 static void stream_cycle_channel(VideoState *is, int codec_type)
2690 {
2691     AVFormatContext *ic = is->ic;
2692     int start_index, stream_index;
2693     int old_index;
2694     AVStream *st;
2695
2696     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2697         start_index = is->last_video_stream;
2698         old_index = is->video_stream;
2699     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2700         start_index = is->last_audio_stream;
2701         old_index = is->audio_stream;
2702     } else {
2703         start_index = is->last_subtitle_stream;
2704         old_index = is->subtitle_stream;
2705     }
2706     stream_index = start_index;
2707     for (;;) {
2708         if (++stream_index >= is->ic->nb_streams)
2709         {
2710             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2711             {
2712                 stream_index = -1;
2713                 is->last_subtitle_stream = -1;
2714                 goto the_end;
2715             }
2716             if (start_index == -1)
2717                 return;
2718             stream_index = 0;
2719         }
2720         if (stream_index == start_index)
2721             return;
2722         st = ic->streams[stream_index];
2723         if (st->codec->codec_type == codec_type) {
2724             /* check that parameters are OK */
2725             switch (codec_type) {
2726             case AVMEDIA_TYPE_AUDIO:
2727                 if (st->codec->sample_rate != 0 &&
2728                     st->codec->channels != 0)
2729                     goto the_end;
2730                 break;
2731             case AVMEDIA_TYPE_VIDEO:
2732             case AVMEDIA_TYPE_SUBTITLE:
2733                 goto the_end;
2734             default:
2735                 break;
2736             }
2737         }
2738     }
2739  the_end:
2740     stream_component_close(is, old_index);
2741     stream_component_open(is, stream_index);
2742     if (codec_type == AVMEDIA_TYPE_VIDEO)
2743         is->que_attachments_req = 1;
2744 }
2745
2746
2747 static void toggle_full_screen(VideoState *is)
2748 {
2749 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2750     /* OS X needs to reallocate the SDL overlays */
2751     int i;
2752     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2753         is->pictq[i].reallocate = 1;
2754 #endif
2755     is_full_screen = !is_full_screen;
2756     video_open(is, 1);
2757 }
2758
2759 static void toggle_pause(VideoState *is)
2760 {
2761     stream_toggle_pause(is);
2762     is->step = 0;
2763 }
2764
2765 static void step_to_next_frame(VideoState *is)
2766 {
2767     /* if the stream is paused unpause it, then step */
2768     if (is->paused)
2769         stream_toggle_pause(is);
2770     is->step = 1;
2771 }
2772
2773 static void toggle_audio_display(VideoState *is)
2774 {
2775     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2776     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2777     fill_rectangle(screen,
2778                 is->xleft, is->ytop, is->width, is->height,
2779                 bgcolor);
2780     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2781 }
2782
2783 /* handle an event sent by the GUI */
2784 static void event_loop(VideoState *cur_stream)
2785 {
2786     SDL_Event event;
2787     double incr, pos, frac;
2788
2789     for (;;) {
2790         double x;
2791         SDL_WaitEvent(&event);
2792         switch (event.type) {
2793         case SDL_KEYDOWN:
2794             if (exit_on_keydown) {
2795                 do_exit(cur_stream);
2796                 break;
2797             }
2798             switch (event.key.keysym.sym) {
2799             case SDLK_ESCAPE:
2800             case SDLK_q:
2801                 do_exit(cur_stream);
2802                 break;
2803             case SDLK_f:
2804                 toggle_full_screen(cur_stream);
2805                 cur_stream->force_refresh = 1;
2806                 break;
2807             case SDLK_p:
2808             case SDLK_SPACE:
2809                 toggle_pause(cur_stream);
2810                 break;
2811             case SDLK_s: // S: Step to next frame
2812                 step_to_next_frame(cur_stream);
2813                 break;
2814             case SDLK_a:
2815                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2816                 break;
2817             case SDLK_v:
2818                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2819                 break;
2820             case SDLK_t:
2821                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2822                 break;
2823             case SDLK_w:
2824                 toggle_audio_display(cur_stream);
2825                 cur_stream->force_refresh = 1;
2826                 break;
2827             case SDLK_PAGEUP:
2828                 incr = 600.0;
2829                 goto do_seek;
2830             case SDLK_PAGEDOWN:
2831                 incr = -600.0;
2832                 goto do_seek;
2833             case SDLK_LEFT:
2834                 incr = -10.0;
2835                 goto do_seek;
2836             case SDLK_RIGHT:
2837                 incr = 10.0;
2838                 goto do_seek;
2839             case SDLK_UP:
2840                 incr = 60.0;
2841                 goto do_seek;
2842             case SDLK_DOWN:
2843                 incr = -60.0;
2844             do_seek:
2845                     if (seek_by_bytes) {
2846                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2847                             pos = cur_stream->video_current_pos;
2848                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2849                             pos = cur_stream->audio_pkt.pos;
2850                         } else
2851                             pos = avio_tell(cur_stream->ic->pb);
2852                         if (cur_stream->ic->bit_rate)
2853                             incr *= cur_stream->ic->bit_rate / 8.0;
2854                         else
2855                             incr *= 180000.0;
2856                         pos += incr;
2857                         stream_seek(cur_stream, pos, incr, 1);
2858                     } else {
2859                         pos = get_master_clock(cur_stream);
2860                         pos += incr;
2861                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2862                     }
2863                 break;
2864             default:
2865                 break;
2866             }
2867             break;
2868         case SDL_VIDEOEXPOSE:
2869             cur_stream->force_refresh = 1;
2870             break;
2871         case SDL_MOUSEBUTTONDOWN:
2872             if (exit_on_mousedown) {
2873                 do_exit(cur_stream);
2874                 break;
2875             }
2876         case SDL_MOUSEMOTION:
2877             if (event.type == SDL_MOUSEBUTTONDOWN) {
2878                 x = event.button.x;
2879             } else {
2880                 if (event.motion.state != SDL_PRESSED)
2881                     break;
2882                 x = event.motion.x;
2883             }
2884                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2885                     uint64_t size =  avio_size(cur_stream->ic->pb);
2886                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2887                 } else {
2888                     int64_t ts;
2889                     int ns, hh, mm, ss;
2890                     int tns, thh, tmm, tss;
2891                     tns  = cur_stream->ic->duration / 1000000LL;
2892                     thh  = tns / 3600;
2893                     tmm  = (tns % 3600) / 60;
2894                     tss  = (tns % 60);
2895                     frac = x / cur_stream->width;
2896                     ns   = frac * tns;
2897                     hh   = ns / 3600;
2898                     mm   = (ns % 3600) / 60;
2899                     ss   = (ns % 60);
2900                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2901                             hh, mm, ss, thh, tmm, tss);
2902                     ts = frac * cur_stream->ic->duration;
2903                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2904                         ts += cur_stream->ic->start_time;
2905                     stream_seek(cur_stream, ts, 0, 0);
2906                 }
2907             break;
2908         case SDL_VIDEORESIZE:
2909                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2910                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2911                 screen_width  = cur_stream->width  = event.resize.w;
2912                 screen_height = cur_stream->height = event.resize.h;
2913                 cur_stream->force_refresh = 1;
2914             break;
2915         case SDL_QUIT:
2916         case FF_QUIT_EVENT:
2917             do_exit(cur_stream);
2918             break;
2919         case FF_ALLOC_EVENT:
2920             alloc_picture(event.user.data1);
2921             break;
2922         case FF_REFRESH_EVENT:
2923             video_refresh(event.user.data1);
2924             cur_stream->refresh = 0;
2925             break;
2926         default:
2927             break;
2928         }
2929     }
2930 }
2931
2932 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2933 {
2934     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2935     return opt_default(NULL, "video_size", arg);
2936 }
2937
2938 static int opt_width(void *optctx, const char *opt, const char *arg)
2939 {
2940     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2941     return 0;
2942 }
2943
2944 static int opt_height(void *optctx, const char *opt, const char *arg)
2945 {
2946     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2947     return 0;
2948 }
2949
2950 static int opt_format(void *optctx, const char *opt, const char *arg)
2951 {
2952     file_iformat = av_find_input_format(arg);
2953     if (!file_iformat) {
2954         fprintf(stderr, "Unknown input format: %s\n", arg);
2955         return AVERROR(EINVAL);
2956     }
2957     return 0;
2958 }
2959
2960 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2961 {
2962     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2963     return opt_default(NULL, "pixel_format", arg);
2964 }
2965
2966 static int opt_sync(void *optctx, const char *opt, const char *arg)
2967 {
2968     if (!strcmp(arg, "audio"))
2969         av_sync_type = AV_SYNC_AUDIO_MASTER;
2970     else if (!strcmp(arg, "video"))
2971         av_sync_type = AV_SYNC_VIDEO_MASTER;
2972     else if (!strcmp(arg, "ext"))
2973         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2974     else {
2975         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2976         exit(1);
2977     }
2978     return 0;
2979 }
2980
2981 static int opt_seek(void *optctx, const char *opt, const char *arg)
2982 {
2983     start_time = parse_time_or_die(opt, arg, 1);
2984     return 0;
2985 }
2986
2987 static int opt_duration(void *optctx, const char *opt, const char *arg)
2988 {
2989     duration = parse_time_or_die(opt, arg, 1);
2990     return 0;
2991 }
2992
2993 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
2994 {
2995     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2996                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2997                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2998                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2999     return 0;
3000 }
3001
3002 static void opt_input_file(void *optctx, const char *filename)
3003 {
3004     if (input_filename) {
3005         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3006                 filename, input_filename);
3007         exit(1);
3008     }
3009     if (!strcmp(filename, "-"))
3010         filename = "pipe:";
3011     input_filename = filename;
3012 }
3013
3014 static int opt_codec(void *o, const char *opt, const char *arg)
3015 {
3016     switch(opt[strlen(opt)-1]){
3017     case 'a' :    audio_codec_name = arg; break;
3018     case 's' : subtitle_codec_name = arg; break;
3019     case 'v' :    video_codec_name = arg; break;
3020     }
3021     return 0;
3022 }
3023
3024 static int dummy;
3025
3026 static const OptionDef options[] = {
3027 #include "cmdutils_common_opts.h"
3028     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3029     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3030     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3031     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3032     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3033     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3034     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3035     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3036     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3037     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3038     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3039     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3040     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3041     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3042     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3043     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3044     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3045     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3046     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3047     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3048     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3049     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3050     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3051     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3052     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3053     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3054     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3055     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3056     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3057     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3058     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3059     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3060     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3061     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3062 #if CONFIG_AVFILTER
3063     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3064 #endif
3065     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3066     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3067     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3068     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3069     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3070     { NULL, },
3071 };
3072
3073 static void show_usage(void)
3074 {
3075     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3076     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3077     av_log(NULL, AV_LOG_INFO, "\n");
3078 }
3079
3080 void show_help_default(const char *opt, const char *arg)
3081 {
3082     av_log_set_callback(log_callback_help);
3083     show_usage();
3084     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3085     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3086     printf("\n");
3087     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3088     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3089 #if !CONFIG_AVFILTER
3090     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3091 #else
3092     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3093 #endif
3094     printf("\nWhile playing:\n"
3095            "q, ESC              quit\n"
3096            "f                   toggle full screen\n"
3097            "p, SPC              pause\n"
3098            "a                   cycle audio channel\n"
3099            "v                   cycle video channel\n"
3100            "t                   cycle subtitle channel\n"
3101            "w                   show audio waves\n"
3102            "s                   activate frame-step mode\n"
3103            "left/right          seek backward/forward 10 seconds\n"
3104            "down/up             seek backward/forward 1 minute\n"
3105            "page down/page up   seek backward/forward 10 minutes\n"
3106            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3107            );
3108 }
3109
3110 static int lockmgr(void **mtx, enum AVLockOp op)
3111 {
3112    switch(op) {
3113       case AV_LOCK_CREATE:
3114           *mtx = SDL_CreateMutex();
3115           if(!*mtx)
3116               return 1;
3117           return 0;
3118       case AV_LOCK_OBTAIN:
3119           return !!SDL_LockMutex(*mtx);
3120       case AV_LOCK_RELEASE:
3121           return !!SDL_UnlockMutex(*mtx);
3122       case AV_LOCK_DESTROY:
3123           SDL_DestroyMutex(*mtx);
3124           return 0;
3125    }
3126    return 1;
3127 }
3128
3129 /* Called from the main */
3130 int main(int argc, char **argv)
3131 {
3132     int flags;
3133     VideoState *is;
3134     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3135
3136     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3137     parse_loglevel(argc, argv, options);
3138
3139     /* register all codecs, demux and protocols */
3140     avcodec_register_all();
3141 #if CONFIG_AVDEVICE
3142     avdevice_register_all();
3143 #endif
3144 #if CONFIG_AVFILTER
3145     avfilter_register_all();
3146 #endif
3147     av_register_all();
3148     avformat_network_init();
3149
3150     init_opts();
3151
3152     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3153     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3154
3155     show_banner(argc, argv, options);
3156
3157     parse_options(NULL, argc, argv, options, opt_input_file);
3158
3159     if (!input_filename) {
3160         show_usage();
3161         fprintf(stderr, "An input file must be specified\n");
3162         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3163         exit(1);
3164     }
3165
3166     if (display_disable) {
3167         video_disable = 1;
3168     }
3169     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3170     if (audio_disable)
3171         flags &= ~SDL_INIT_AUDIO;
3172     if (display_disable)
3173         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3174 #if !defined(__MINGW32__) && !defined(__APPLE__)
3175     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3176 #endif
3177     if (SDL_Init (flags)) {
3178         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3179         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3180         exit(1);
3181     }
3182
3183     if (!display_disable) {
3184 #if HAVE_SDL_VIDEO_SIZE
3185         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3186         fs_screen_width = vi->current_w;
3187         fs_screen_height = vi->current_h;
3188 #endif
3189     }
3190
3191     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3192     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3193     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3194
3195     if (av_lockmgr_register(lockmgr)) {
3196         fprintf(stderr, "Could not initialize lock manager!\n");
3197         do_exit(NULL);
3198     }
3199
3200     av_init_packet(&flush_pkt);
3201     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3202
3203     is = stream_open(input_filename, file_iformat);
3204     if (!is) {
3205         fprintf(stderr, "Failed to initialize VideoState!\n");
3206         do_exit(NULL);
3207     }
3208
3209     event_loop(is);
3210
3211     /* never returns */
3212
3213     return 0;
3214 }