]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavcodec/audioconvert.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 #endif
54
55 #include <SDL.h>
56 #include <SDL_thread.h>
57
58 #include "cmdutils.h"
59
60 #include <unistd.h>
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     double duration;                             ///< expected duration of the frame
104     int64_t pos;                                 ///< byte position in file
105     int skip;
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     int reallocate;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *read_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int force_refresh;
136     int paused;
137     int last_paused;
138     int seek_req;
139     int seek_flags;
140     int64_t seek_pos;
141     int64_t seek_rel;
142     int read_pause_return;
143     AVFormatContext *ic;
144
145     int audio_stream;
146
147     int av_sync_type;
148     double external_clock; /* external clock base */
149     int64_t external_clock_time;
150
151     double audio_clock;
152     double audio_diff_cum; /* used for AV difference average computation */
153     double audio_diff_avg_coef;
154     double audio_diff_threshold;
155     int audio_diff_avg_count;
156     AVStream *audio_st;
157     PacketQueue audioq;
158     int audio_hw_buf_size;
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
160     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
161     uint8_t *audio_buf;
162     uint8_t *audio_buf1;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     int audio_write_buf_size;
166     AVPacket audio_pkt_temp;
167     AVPacket audio_pkt;
168     enum AVSampleFormat audio_src_fmt;
169     enum AVSampleFormat audio_tgt_fmt;
170     int audio_src_channels;
171     int audio_tgt_channels;
172     int64_t audio_src_channel_layout;
173     int64_t audio_tgt_channel_layout;
174     int audio_src_freq;
175     int audio_tgt_freq;
176     struct SwrContext *swr_ctx;
177     double audio_current_pts;
178     double audio_current_pts_drift;
179     int frame_drops_early;
180     int frame_drops_late;
181     AVFrame *frame;
182
183     enum ShowMode {
184         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
185     } show_mode;
186     int16_t sample_array[SAMPLE_ARRAY_SIZE];
187     int sample_array_index;
188     int last_i_start;
189     RDFTContext *rdft;
190     int rdft_bits;
191     FFTSample *rdft_data;
192     int xpos;
193
194     SDL_Thread *subtitle_tid;
195     int subtitle_stream;
196     int subtitle_stream_changed;
197     AVStream *subtitle_st;
198     PacketQueue subtitleq;
199     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
200     int subpq_size, subpq_rindex, subpq_windex;
201     SDL_mutex *subpq_mutex;
202     SDL_cond *subpq_cond;
203
204     double frame_timer;
205     double frame_last_pts;
206     double frame_last_duration;
207     double frame_last_dropped_pts;
208     double frame_last_returned_time;
209     double frame_last_filter_delay;
210     int64_t frame_last_dropped_pos;
211     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
212     int video_stream;
213     AVStream *video_st;
214     PacketQueue videoq;
215     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
216     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
217     int64_t video_current_pos;                   ///< current displayed file pos
218     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
219     int pictq_size, pictq_rindex, pictq_windex;
220     SDL_mutex *pictq_mutex;
221     SDL_cond *pictq_cond;
222 #if !CONFIG_AVFILTER
223     struct SwsContext *img_convert_ctx;
224 #endif
225
226     char filename[1024];
227     int width, height, xleft, ytop;
228     int step;
229
230 #if CONFIG_AVFILTER
231     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
232 #endif
233
234     int refresh;
235 } VideoState;
236
237 static int opt_help(const char *opt, const char *arg);
238
239 /* options specified by the user */
240 static AVInputFormat *file_iformat;
241 static const char *input_filename;
242 static const char *window_title;
243 static int fs_screen_width;
244 static int fs_screen_height;
245 static int screen_width  = 0;
246 static int screen_height = 0;
247 static int audio_disable;
248 static int video_disable;
249 static int wanted_stream[AVMEDIA_TYPE_NB] = {
250     [AVMEDIA_TYPE_AUDIO]    = -1,
251     [AVMEDIA_TYPE_VIDEO]    = -1,
252     [AVMEDIA_TYPE_SUBTITLE] = -1,
253 };
254 static int seek_by_bytes = -1;
255 static int display_disable;
256 static int show_status = 1;
257 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
258 static int64_t start_time = AV_NOPTS_VALUE;
259 static int64_t duration = AV_NOPTS_VALUE;
260 static int workaround_bugs = 1;
261 static int fast = 0;
262 static int genpts = 0;
263 static int lowres = 0;
264 static int idct = FF_IDCT_AUTO;
265 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
266 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
267 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
268 static int error_concealment = 3;
269 static int decoder_reorder_pts = -1;
270 static int autoexit;
271 static int exit_on_keydown;
272 static int exit_on_mousedown;
273 static int loop = 1;
274 static int framedrop = -1;
275 static enum ShowMode show_mode = SHOW_MODE_NONE;
276 static const char *audio_codec_name;
277 static const char *subtitle_codec_name;
278 static const char *video_codec_name;
279 static int rdftspeed = 20;
280 #if CONFIG_AVFILTER
281 static char *vfilters = NULL;
282 #endif
283
284 /* current context */
285 static int is_full_screen;
286 static int64_t audio_callback_time;
287
288 static AVPacket flush_pkt;
289
290 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
291 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
292 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
293
294 static SDL_Surface *screen;
295
296 void av_noreturn exit_program(int ret)
297 {
298     exit(ret);
299 }
300
301 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
302 {
303     AVPacketList *pkt1;
304
305     /* duplicate the packet */
306     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
307         return -1;
308
309     pkt1 = av_malloc(sizeof(AVPacketList));
310     if (!pkt1)
311         return -1;
312     pkt1->pkt = *pkt;
313     pkt1->next = NULL;
314
315
316     SDL_LockMutex(q->mutex);
317
318     if (!q->last_pkt)
319
320         q->first_pkt = pkt1;
321     else
322         q->last_pkt->next = pkt1;
323     q->last_pkt = pkt1;
324     q->nb_packets++;
325     q->size += pkt1->pkt.size + sizeof(*pkt1);
326     /* XXX: should duplicate packet data in DV case */
327     SDL_CondSignal(q->cond);
328
329     SDL_UnlockMutex(q->mutex);
330     return 0;
331 }
332
333 /* packet queue handling */
334 static void packet_queue_init(PacketQueue *q)
335 {
336     memset(q, 0, sizeof(PacketQueue));
337     q->mutex = SDL_CreateMutex();
338     q->cond = SDL_CreateCond();
339     packet_queue_put(q, &flush_pkt);
340 }
341
342 static void packet_queue_flush(PacketQueue *q)
343 {
344     AVPacketList *pkt, *pkt1;
345
346     SDL_LockMutex(q->mutex);
347     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
348         pkt1 = pkt->next;
349         av_free_packet(&pkt->pkt);
350         av_freep(&pkt);
351     }
352     q->last_pkt = NULL;
353     q->first_pkt = NULL;
354     q->nb_packets = 0;
355     q->size = 0;
356     SDL_UnlockMutex(q->mutex);
357 }
358
359 static void packet_queue_end(PacketQueue *q)
360 {
361     packet_queue_flush(q);
362     SDL_DestroyMutex(q->mutex);
363     SDL_DestroyCond(q->cond);
364 }
365
366 static void packet_queue_abort(PacketQueue *q)
367 {
368     SDL_LockMutex(q->mutex);
369
370     q->abort_request = 1;
371
372     SDL_CondSignal(q->cond);
373
374     SDL_UnlockMutex(q->mutex);
375 }
376
377 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
378 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
379 {
380     AVPacketList *pkt1;
381     int ret;
382
383     SDL_LockMutex(q->mutex);
384
385     for (;;) {
386         if (q->abort_request) {
387             ret = -1;
388             break;
389         }
390
391         pkt1 = q->first_pkt;
392         if (pkt1) {
393             q->first_pkt = pkt1->next;
394             if (!q->first_pkt)
395                 q->last_pkt = NULL;
396             q->nb_packets--;
397             q->size -= pkt1->pkt.size + sizeof(*pkt1);
398             *pkt = pkt1->pkt;
399             av_free(pkt1);
400             ret = 1;
401             break;
402         } else if (!block) {
403             ret = 0;
404             break;
405         } else {
406             SDL_CondWait(q->cond, q->mutex);
407         }
408     }
409     SDL_UnlockMutex(q->mutex);
410     return ret;
411 }
412
413 static inline void fill_rectangle(SDL_Surface *screen,
414                                   int x, int y, int w, int h, int color)
415 {
416     SDL_Rect rect;
417     rect.x = x;
418     rect.y = y;
419     rect.w = w;
420     rect.h = h;
421     SDL_FillRect(screen, &rect, color);
422 }
423
424 #define ALPHA_BLEND(a, oldp, newp, s)\
425 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
426
427 #define RGBA_IN(r, g, b, a, s)\
428 {\
429     unsigned int v = ((const uint32_t *)(s))[0];\
430     a = (v >> 24) & 0xff;\
431     r = (v >> 16) & 0xff;\
432     g = (v >> 8) & 0xff;\
433     b = v & 0xff;\
434 }
435
436 #define YUVA_IN(y, u, v, a, s, pal)\
437 {\
438     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
439     a = (val >> 24) & 0xff;\
440     y = (val >> 16) & 0xff;\
441     u = (val >> 8) & 0xff;\
442     v = val & 0xff;\
443 }
444
445 #define YUVA_OUT(d, y, u, v, a)\
446 {\
447     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
448 }
449
450
451 #define BPP 1
452
453 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
454 {
455     int wrap, wrap3, width2, skip2;
456     int y, u, v, a, u1, v1, a1, w, h;
457     uint8_t *lum, *cb, *cr;
458     const uint8_t *p;
459     const uint32_t *pal;
460     int dstx, dsty, dstw, dsth;
461
462     dstw = av_clip(rect->w, 0, imgw);
463     dsth = av_clip(rect->h, 0, imgh);
464     dstx = av_clip(rect->x, 0, imgw - dstw);
465     dsty = av_clip(rect->y, 0, imgh - dsth);
466     lum = dst->data[0] + dsty * dst->linesize[0];
467     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
468     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
469
470     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
471     skip2 = dstx >> 1;
472     wrap = dst->linesize[0];
473     wrap3 = rect->pict.linesize[0];
474     p = rect->pict.data[0];
475     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
476
477     if (dsty & 1) {
478         lum += dstx;
479         cb += skip2;
480         cr += skip2;
481
482         if (dstx & 1) {
483             YUVA_IN(y, u, v, a, p, pal);
484             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
485             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
486             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
487             cb++;
488             cr++;
489             lum++;
490             p += BPP;
491         }
492         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
493             YUVA_IN(y, u, v, a, p, pal);
494             u1 = u;
495             v1 = v;
496             a1 = a;
497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498
499             YUVA_IN(y, u, v, a, p + BPP, pal);
500             u1 += u;
501             v1 += v;
502             a1 += a;
503             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
504             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
505             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
506             cb++;
507             cr++;
508             p += 2 * BPP;
509             lum += 2;
510         }
511         if (w) {
512             YUVA_IN(y, u, v, a, p, pal);
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
515             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
516             p++;
517             lum++;
518         }
519         p += wrap3 - dstw * BPP;
520         lum += wrap - dstw - dstx;
521         cb += dst->linesize[1] - width2 - skip2;
522         cr += dst->linesize[2] - width2 - skip2;
523     }
524     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
525         lum += dstx;
526         cb += skip2;
527         cr += skip2;
528
529         if (dstx & 1) {
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 = u;
532             v1 = v;
533             a1 = a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535             p += wrap3;
536             lum += wrap;
537             YUVA_IN(y, u, v, a, p, pal);
538             u1 += u;
539             v1 += v;
540             a1 += a;
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
543             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
544             cb++;
545             cr++;
546             p += -wrap3 + BPP;
547             lum += -wrap + 1;
548         }
549         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
550             YUVA_IN(y, u, v, a, p, pal);
551             u1 = u;
552             v1 = v;
553             a1 = a;
554             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555
556             YUVA_IN(y, u, v, a, p + BPP, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
561             p += wrap3;
562             lum += wrap;
563
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 += u;
566             v1 += v;
567             a1 += a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569
570             YUVA_IN(y, u, v, a, p + BPP, pal);
571             u1 += u;
572             v1 += v;
573             a1 += a;
574             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
575
576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
578
579             cb++;
580             cr++;
581             p += -wrap3 + 2 * BPP;
582             lum += -wrap + 2;
583         }
584         if (w) {
585             YUVA_IN(y, u, v, a, p, pal);
586             u1 = u;
587             v1 = v;
588             a1 = a;
589             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
590             p += wrap3;
591             lum += wrap;
592             YUVA_IN(y, u, v, a, p, pal);
593             u1 += u;
594             v1 += v;
595             a1 += a;
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
598             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
599             cb++;
600             cr++;
601             p += -wrap3 + BPP;
602             lum += -wrap + 1;
603         }
604         p += wrap3 + (wrap3 - dstw * BPP);
605         lum += wrap + (wrap - dstw - dstx);
606         cb += dst->linesize[1] - width2 - skip2;
607         cr += dst->linesize[2] - width2 - skip2;
608     }
609     /* handle odd height */
610     if (h) {
611         lum += dstx;
612         cb += skip2;
613         cr += skip2;
614
615         if (dstx & 1) {
616             YUVA_IN(y, u, v, a, p, pal);
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
619             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
620             cb++;
621             cr++;
622             lum++;
623             p += BPP;
624         }
625         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
626             YUVA_IN(y, u, v, a, p, pal);
627             u1 = u;
628             v1 = v;
629             a1 = a;
630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631
632             YUVA_IN(y, u, v, a, p + BPP, pal);
633             u1 += u;
634             v1 += v;
635             a1 += a;
636             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
637             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
638             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
639             cb++;
640             cr++;
641             p += 2 * BPP;
642             lum += 2;
643         }
644         if (w) {
645             YUVA_IN(y, u, v, a, p, pal);
646             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
647             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
648             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
649         }
650     }
651 }
652
653 static void free_subpicture(SubPicture *sp)
654 {
655     avsubtitle_free(&sp->sub);
656 }
657
658 static void video_image_display(VideoState *is)
659 {
660     VideoPicture *vp;
661     SubPicture *sp;
662     AVPicture pict;
663     float aspect_ratio;
664     int width, height, x, y;
665     SDL_Rect rect;
666     int i;
667
668     vp = &is->pictq[is->pictq_rindex];
669     if (vp->bmp) {
670 #if CONFIG_AVFILTER
671          if (vp->picref->video->sample_aspect_ratio.num == 0)
672              aspect_ratio = 0;
673          else
674              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
675 #else
676
677         /* XXX: use variable in the frame */
678         if (is->video_st->sample_aspect_ratio.num)
679             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
680         else if (is->video_st->codec->sample_aspect_ratio.num)
681             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
682         else
683             aspect_ratio = 0;
684 #endif
685         if (aspect_ratio <= 0.0)
686             aspect_ratio = 1.0;
687         aspect_ratio *= (float)vp->width / (float)vp->height;
688
689         if (is->subtitle_st) {
690             if (is->subpq_size > 0) {
691                 sp = &is->subpq[is->subpq_rindex];
692
693                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
694                     SDL_LockYUVOverlay (vp->bmp);
695
696                     pict.data[0] = vp->bmp->pixels[0];
697                     pict.data[1] = vp->bmp->pixels[2];
698                     pict.data[2] = vp->bmp->pixels[1];
699
700                     pict.linesize[0] = vp->bmp->pitches[0];
701                     pict.linesize[1] = vp->bmp->pitches[2];
702                     pict.linesize[2] = vp->bmp->pitches[1];
703
704                     for (i = 0; i < sp->sub.num_rects; i++)
705                         blend_subrect(&pict, sp->sub.rects[i],
706                                       vp->bmp->w, vp->bmp->h);
707
708                     SDL_UnlockYUVOverlay (vp->bmp);
709                 }
710             }
711         }
712
713
714         /* XXX: we suppose the screen has a 1.0 pixel ratio */
715         height = is->height;
716         width = ((int)rint(height * aspect_ratio)) & ~1;
717         if (width > is->width) {
718             width = is->width;
719             height = ((int)rint(width / aspect_ratio)) & ~1;
720         }
721         x = (is->width - width) / 2;
722         y = (is->height - height) / 2;
723         is->no_background = 0;
724         rect.x = is->xleft + x;
725         rect.y = is->ytop  + y;
726         rect.w = FFMAX(width,  1);
727         rect.h = FFMAX(height, 1);
728         SDL_DisplayYUVOverlay(vp->bmp, &rect);
729     }
730 }
731
732 static inline int compute_mod(int a, int b)
733 {
734     return a < 0 ? a%b + b : a%b;
735 }
736
737 static void video_audio_display(VideoState *s)
738 {
739     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
740     int ch, channels, h, h2, bgcolor, fgcolor;
741     int16_t time_diff;
742     int rdft_bits, nb_freq;
743
744     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
745         ;
746     nb_freq = 1 << (rdft_bits - 1);
747
748     /* compute display index : center on currently output samples */
749     channels = s->audio_tgt_channels;
750     nb_display_channels = channels;
751     if (!s->paused) {
752         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
753         n = 2 * channels;
754         delay = s->audio_write_buf_size;
755         delay /= n;
756
757         /* to be more precise, we take into account the time spent since
758            the last buffer computation */
759         if (audio_callback_time) {
760             time_diff = av_gettime() - audio_callback_time;
761             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
762         }
763
764         delay += 2 * data_used;
765         if (delay < data_used)
766             delay = data_used;
767
768         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
769         if (s->show_mode == SHOW_MODE_WAVES) {
770             h = INT_MIN;
771             for (i = 0; i < 1000; i += channels) {
772                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
773                 int a = s->sample_array[idx];
774                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
775                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
776                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
777                 int score = a - d;
778                 if (h < score && (b ^ c) < 0) {
779                     h = score;
780                     i_start = idx;
781                 }
782             }
783         }
784
785         s->last_i_start = i_start;
786     } else {
787         i_start = s->last_i_start;
788     }
789
790     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791     if (s->show_mode == SHOW_MODE_WAVES) {
792         fill_rectangle(screen,
793                        s->xleft, s->ytop, s->width, s->height,
794                        bgcolor);
795
796         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
797
798         /* total height for one channel */
799         h = s->height / nb_display_channels;
800         /* graph height / 2 */
801         h2 = (h * 9) / 20;
802         for (ch = 0; ch < nb_display_channels; ch++) {
803             i = i_start + ch;
804             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
805             for (x = 0; x < s->width; x++) {
806                 y = (s->sample_array[i] * h2) >> 15;
807                 if (y < 0) {
808                     y = -y;
809                     ys = y1 - y;
810                 } else {
811                     ys = y1;
812                 }
813                 fill_rectangle(screen,
814                                s->xleft + x, ys, 1, y,
815                                fgcolor);
816                 i += channels;
817                 if (i >= SAMPLE_ARRAY_SIZE)
818                     i -= SAMPLE_ARRAY_SIZE;
819             }
820         }
821
822         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
823
824         for (ch = 1; ch < nb_display_channels; ch++) {
825             y = s->ytop + ch * h;
826             fill_rectangle(screen,
827                            s->xleft, y, s->width, 1,
828                            fgcolor);
829         }
830         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
831     } else {
832         nb_display_channels= FFMIN(nb_display_channels, 2);
833         if (rdft_bits != s->rdft_bits) {
834             av_rdft_end(s->rdft);
835             av_free(s->rdft_data);
836             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
837             s->rdft_bits = rdft_bits;
838             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
839         }
840         {
841             FFTSample *data[2];
842             for (ch = 0; ch < nb_display_channels; ch++) {
843                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
844                 i = i_start + ch;
845                 for (x = 0; x < 2 * nb_freq; x++) {
846                     double w = (x-nb_freq) * (1.0 / nb_freq);
847                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
848                     i += channels;
849                     if (i >= SAMPLE_ARRAY_SIZE)
850                         i -= SAMPLE_ARRAY_SIZE;
851                 }
852                 av_rdft_calc(s->rdft, data[ch]);
853             }
854             // least efficient way to do this, we should of course directly access it but its more than fast enough
855             for (y = 0; y < s->height; y++) {
856                 double w = 1 / sqrt(nb_freq);
857                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
858                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
859                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
860                 a = FFMIN(a, 255);
861                 b = FFMIN(b, 255);
862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
863
864                 fill_rectangle(screen,
865                             s->xpos, s->height-y, 1, 1,
866                             fgcolor);
867             }
868         }
869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870         if (!s->paused)
871             s->xpos++;
872         if (s->xpos >= s->width)
873             s->xpos= s->xleft;
874     }
875 }
876
877 static void stream_close(VideoState *is)
878 {
879     VideoPicture *vp;
880     int i;
881     /* XXX: use a special url_shutdown call to abort parse cleanly */
882     is->abort_request = 1;
883     SDL_WaitThread(is->read_tid, NULL);
884     SDL_WaitThread(is->refresh_tid, NULL);
885
886     /* free all pictures */
887     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
888         vp = &is->pictq[i];
889 #if CONFIG_AVFILTER
890         if (vp->picref) {
891             avfilter_unref_buffer(vp->picref);
892             vp->picref = NULL;
893         }
894 #endif
895         if (vp->bmp) {
896             SDL_FreeYUVOverlay(vp->bmp);
897             vp->bmp = NULL;
898         }
899     }
900     SDL_DestroyMutex(is->pictq_mutex);
901     SDL_DestroyCond(is->pictq_cond);
902     SDL_DestroyMutex(is->subpq_mutex);
903     SDL_DestroyCond(is->subpq_cond);
904 #if !CONFIG_AVFILTER
905     if (is->img_convert_ctx)
906         sws_freeContext(is->img_convert_ctx);
907 #endif
908     av_free(is);
909 }
910
911 static void do_exit(VideoState *is)
912 {
913     if (is) {
914         stream_close(is);
915     }
916     av_lockmgr_register(NULL);
917     uninit_opts();
918 #if CONFIG_AVFILTER
919     avfilter_uninit();
920 #endif
921     avformat_network_deinit();
922     if (show_status)
923         printf("\n");
924     SDL_Quit();
925     av_log(NULL, AV_LOG_QUIET, "%s", "");
926     exit(0);
927 }
928
929 static void sigterm_handler(int sig)
930 {
931     exit(123);
932 }
933
934 static int video_open(VideoState *is, int force_set_video_mode)
935 {
936     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
937     int w,h;
938
939     if (is_full_screen) flags |= SDL_FULLSCREEN;
940     else                flags |= SDL_RESIZABLE;
941
942     if (is_full_screen && fs_screen_width) {
943         w = fs_screen_width;
944         h = fs_screen_height;
945     } else if (!is_full_screen && screen_width) {
946         w = screen_width;
947         h = screen_height;
948 #if CONFIG_AVFILTER
949     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
950         w = is->out_video_filter->inputs[0]->w;
951         h = is->out_video_filter->inputs[0]->h;
952 #else
953     } else if (is->video_st && is->video_st->codec->width) {
954         w = is->video_st->codec->width;
955         h = is->video_st->codec->height;
956 #endif
957     } else {
958         w = 640;
959         h = 480;
960     }
961     if (screen && is->width == screen->w && screen->w == w
962        && is->height== screen->h && screen->h == h && !force_set_video_mode)
963         return 0;
964     screen = SDL_SetVideoMode(w, h, 0, flags);
965     if (!screen) {
966         fprintf(stderr, "SDL: could not set video mode - exiting\n");
967         do_exit(is);
968     }
969     if (!window_title)
970         window_title = input_filename;
971     SDL_WM_SetCaption(window_title, window_title);
972
973     is->width  = screen->w;
974     is->height = screen->h;
975
976     return 0;
977 }
978
979 /* display the current picture, if any */
980 static void video_display(VideoState *is)
981 {
982     if (!screen)
983         video_open(is, 0);
984     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
985         video_audio_display(is);
986     else if (is->video_st)
987         video_image_display(is);
988 }
989
990 static int refresh_thread(void *opaque)
991 {
992     VideoState *is= opaque;
993     while (!is->abort_request) {
994         SDL_Event event;
995         event.type = FF_REFRESH_EVENT;
996         event.user.data1 = opaque;
997         if (!is->refresh && (!is->paused || is->force_refresh)) {
998             is->refresh = 1;
999             SDL_PushEvent(&event);
1000         }
1001         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1002         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1003     }
1004     return 0;
1005 }
1006
1007 /* get the current audio clock value */
1008 static double get_audio_clock(VideoState *is)
1009 {
1010     if (is->paused) {
1011         return is->audio_current_pts;
1012     } else {
1013         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1014     }
1015 }
1016
1017 /* get the current video clock value */
1018 static double get_video_clock(VideoState *is)
1019 {
1020     if (is->paused) {
1021         return is->video_current_pts;
1022     } else {
1023         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1024     }
1025 }
1026
1027 /* get the current external clock value */
1028 static double get_external_clock(VideoState *is)
1029 {
1030     int64_t ti;
1031     ti = av_gettime();
1032     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1033 }
1034
1035 /* get the current master clock value */
1036 static double get_master_clock(VideoState *is)
1037 {
1038     double val;
1039
1040     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1041         if (is->video_st)
1042             val = get_video_clock(is);
1043         else
1044             val = get_audio_clock(is);
1045     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1046         if (is->audio_st)
1047             val = get_audio_clock(is);
1048         else
1049             val = get_video_clock(is);
1050     } else {
1051         val = get_external_clock(is);
1052     }
1053     return val;
1054 }
1055
1056 /* seek in the stream */
1057 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1058 {
1059     if (!is->seek_req) {
1060         is->seek_pos = pos;
1061         is->seek_rel = rel;
1062         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1063         if (seek_by_bytes)
1064             is->seek_flags |= AVSEEK_FLAG_BYTE;
1065         is->seek_req = 1;
1066     }
1067 }
1068
1069 /* pause or resume the video */
1070 static void stream_toggle_pause(VideoState *is)
1071 {
1072     if (is->paused) {
1073         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1074         if (is->read_pause_return != AVERROR(ENOSYS)) {
1075             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1076         }
1077         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1078     }
1079     is->paused = !is->paused;
1080 }
1081
1082 static double compute_target_delay(double delay, VideoState *is)
1083 {
1084     double sync_threshold, diff;
1085
1086     /* update delay to follow master synchronisation source */
1087     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1088          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1089         /* if video is slave, we try to correct big delays by
1090            duplicating or deleting a frame */
1091         diff = get_video_clock(is) - get_master_clock(is);
1092
1093         /* skip or repeat frame. We take into account the
1094            delay to compute the threshold. I still don't know
1095            if it is the best guess */
1096         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1097         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1098             if (diff <= -sync_threshold)
1099                 delay = 0;
1100             else if (diff >= sync_threshold)
1101                 delay = 2 * delay;
1102         }
1103     }
1104
1105     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1106             delay, -diff);
1107
1108     return delay;
1109 }
1110
1111 static void pictq_next_picture(VideoState *is) {
1112     /* update queue size and signal for next picture */
1113     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114         is->pictq_rindex = 0;
1115
1116     SDL_LockMutex(is->pictq_mutex);
1117     is->pictq_size--;
1118     SDL_CondSignal(is->pictq_cond);
1119     SDL_UnlockMutex(is->pictq_mutex);
1120 }
1121
1122 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1123     double time = av_gettime() / 1000000.0;
1124     /* update current video pts */
1125     is->video_current_pts = pts;
1126     is->video_current_pts_drift = is->video_current_pts - time;
1127     is->video_current_pos = pos;
1128     is->frame_last_pts = pts;
1129 }
1130
1131 /* called to display each frame */
1132 static void video_refresh(void *opaque)
1133 {
1134     VideoState *is = opaque;
1135     VideoPicture *vp;
1136     double time;
1137
1138     SubPicture *sp, *sp2;
1139
1140     if (is->video_st) {
1141 retry:
1142         if (is->pictq_size == 0) {
1143             SDL_LockMutex(is->pictq_mutex);
1144             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1145                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1146                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1147             }
1148             SDL_UnlockMutex(is->pictq_mutex);
1149             // nothing to do, no picture to display in the que
1150         } else {
1151             double last_duration, duration, delay;
1152             /* dequeue the picture */
1153             vp = &is->pictq[is->pictq_rindex];
1154
1155             if (vp->skip) {
1156                 pictq_next_picture(is);
1157                 goto retry;
1158             }
1159
1160             if (is->paused)
1161                 goto display;
1162
1163             /* compute nominal last_duration */
1164             last_duration = vp->pts - is->frame_last_pts;
1165             if (last_duration > 0 && last_duration < 10.0) {
1166                 /* if duration of the last frame was sane, update last_duration in video state */
1167                 is->frame_last_duration = last_duration;
1168             }
1169             delay = compute_target_delay(is->frame_last_duration, is);
1170
1171             time= av_gettime()/1000000.0;
1172             if (time < is->frame_timer + delay)
1173                 return;
1174
1175             if (delay > 0)
1176                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1177
1178             SDL_LockMutex(is->pictq_mutex);
1179             update_video_pts(is, vp->pts, vp->pos);
1180             SDL_UnlockMutex(is->pictq_mutex);
1181
1182             if (is->pictq_size > 1) {
1183                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1184                 duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1185             } else {
1186                 duration = vp->duration;
1187             }
1188
1189             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1190                 if(is->pictq_size > 1){
1191                     is->frame_drops_late++;
1192                     pictq_next_picture(is);
1193                     goto retry;
1194                 }
1195             }
1196
1197             if (is->subtitle_st) {
1198                 if (is->subtitle_stream_changed) {
1199                     SDL_LockMutex(is->subpq_mutex);
1200
1201                     while (is->subpq_size) {
1202                         free_subpicture(&is->subpq[is->subpq_rindex]);
1203
1204                         /* update queue size and signal for next picture */
1205                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1206                             is->subpq_rindex = 0;
1207
1208                         is->subpq_size--;
1209                     }
1210                     is->subtitle_stream_changed = 0;
1211
1212                     SDL_CondSignal(is->subpq_cond);
1213                     SDL_UnlockMutex(is->subpq_mutex);
1214                 } else {
1215                     if (is->subpq_size > 0) {
1216                         sp = &is->subpq[is->subpq_rindex];
1217
1218                         if (is->subpq_size > 1)
1219                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1220                         else
1221                             sp2 = NULL;
1222
1223                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1224                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1225                         {
1226                             free_subpicture(sp);
1227
1228                             /* update queue size and signal for next picture */
1229                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1230                                 is->subpq_rindex = 0;
1231
1232                             SDL_LockMutex(is->subpq_mutex);
1233                             is->subpq_size--;
1234                             SDL_CondSignal(is->subpq_cond);
1235                             SDL_UnlockMutex(is->subpq_mutex);
1236                         }
1237                     }
1238                 }
1239             }
1240
1241 display:
1242             /* display picture */
1243             if (!display_disable)
1244                 video_display(is);
1245
1246             if (!is->paused)
1247                 pictq_next_picture(is);
1248         }
1249     } else if (is->audio_st) {
1250         /* draw the next audio frame */
1251
1252         /* if only audio stream, then display the audio bars (better
1253            than nothing, just to test the implementation */
1254
1255         /* display picture */
1256         if (!display_disable)
1257             video_display(is);
1258     }
1259     is->force_refresh = 0;
1260     if (show_status) {
1261         static int64_t last_time;
1262         int64_t cur_time;
1263         int aqsize, vqsize, sqsize;
1264         double av_diff;
1265
1266         cur_time = av_gettime();
1267         if (!last_time || (cur_time - last_time) >= 30000) {
1268             aqsize = 0;
1269             vqsize = 0;
1270             sqsize = 0;
1271             if (is->audio_st)
1272                 aqsize = is->audioq.size;
1273             if (is->video_st)
1274                 vqsize = is->videoq.size;
1275             if (is->subtitle_st)
1276                 sqsize = is->subtitleq.size;
1277             av_diff = 0;
1278             if (is->audio_st && is->video_st)
1279                 av_diff = get_audio_clock(is) - get_video_clock(is);
1280             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1281                    get_master_clock(is),
1282                    av_diff,
1283                    is->frame_drops_early + is->frame_drops_late,
1284                    aqsize / 1024,
1285                    vqsize / 1024,
1286                    sqsize,
1287                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1288                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1289             fflush(stdout);
1290             last_time = cur_time;
1291         }
1292     }
1293 }
1294
1295 /* allocate a picture (needs to do that in main thread to avoid
1296    potential locking problems */
1297 static void alloc_picture(void *opaque)
1298 {
1299     VideoState *is = opaque;
1300     VideoPicture *vp;
1301
1302     vp = &is->pictq[is->pictq_windex];
1303
1304     if (vp->bmp)
1305         SDL_FreeYUVOverlay(vp->bmp);
1306
1307 #if CONFIG_AVFILTER
1308     if (vp->picref)
1309         avfilter_unref_buffer(vp->picref);
1310     vp->picref = NULL;
1311
1312     vp->width   = is->out_video_filter->inputs[0]->w;
1313     vp->height  = is->out_video_filter->inputs[0]->h;
1314     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1315 #else
1316     vp->width   = is->video_st->codec->width;
1317     vp->height  = is->video_st->codec->height;
1318     vp->pix_fmt = is->video_st->codec->pix_fmt;
1319 #endif
1320
1321     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1322                                    SDL_YV12_OVERLAY,
1323                                    screen);
1324     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1325         /* SDL allocates a buffer smaller than requested if the video
1326          * overlay hardware is unable to support the requested size. */
1327         fprintf(stderr, "Error: the video system does not support an image\n"
1328                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1329                         "to reduce the image size.\n", vp->width, vp->height );
1330         do_exit(is);
1331     }
1332
1333     SDL_LockMutex(is->pictq_mutex);
1334     vp->allocated = 1;
1335     SDL_CondSignal(is->pictq_cond);
1336     SDL_UnlockMutex(is->pictq_mutex);
1337 }
1338
1339 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1340 {
1341     VideoPicture *vp;
1342     double frame_delay, pts = pts1;
1343
1344     /* compute the exact PTS for the picture if it is omitted in the stream
1345      * pts1 is the dts of the pkt / pts of the frame */
1346     if (pts != 0) {
1347         /* update video clock with pts, if present */
1348         is->video_clock = pts;
1349     } else {
1350         pts = is->video_clock;
1351     }
1352     /* update video clock for next frame */
1353     frame_delay = av_q2d(is->video_st->codec->time_base);
1354     /* for MPEG2, the frame can be repeated, so we update the
1355        clock accordingly */
1356     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1357     is->video_clock += frame_delay;
1358
1359 #if defined(DEBUG_SYNC) && 0
1360     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1361            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1362 #endif
1363
1364     /* wait until we have space to put a new picture */
1365     SDL_LockMutex(is->pictq_mutex);
1366
1367     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1368            !is->videoq.abort_request) {
1369         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370     }
1371     SDL_UnlockMutex(is->pictq_mutex);
1372
1373     if (is->videoq.abort_request)
1374         return -1;
1375
1376     vp = &is->pictq[is->pictq_windex];
1377
1378     vp->duration = frame_delay;
1379
1380     /* alloc or resize hardware picture buffer */
1381     if (!vp->bmp || vp->reallocate ||
1382 #if CONFIG_AVFILTER
1383         vp->width  != is->out_video_filter->inputs[0]->w ||
1384         vp->height != is->out_video_filter->inputs[0]->h) {
1385 #else
1386         vp->width != is->video_st->codec->width ||
1387         vp->height != is->video_st->codec->height) {
1388 #endif
1389         SDL_Event event;
1390
1391         vp->allocated  = 0;
1392         vp->reallocate = 0;
1393
1394         /* the allocation must be done in the main thread to avoid
1395            locking problems */
1396         event.type = FF_ALLOC_EVENT;
1397         event.user.data1 = is;
1398         SDL_PushEvent(&event);
1399
1400         /* wait until the picture is allocated */
1401         SDL_LockMutex(is->pictq_mutex);
1402         while (!vp->allocated && !is->videoq.abort_request) {
1403             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1404         }
1405         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1406         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1407             while (!vp->allocated) {
1408                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1409             }
1410         }
1411         SDL_UnlockMutex(is->pictq_mutex);
1412
1413         if (is->videoq.abort_request)
1414             return -1;
1415     }
1416
1417     /* if the frame is not skipped, then display it */
1418     if (vp->bmp) {
1419         AVPicture pict = { { 0 } };
1420 #if CONFIG_AVFILTER
1421         if (vp->picref)
1422             avfilter_unref_buffer(vp->picref);
1423         vp->picref = src_frame->opaque;
1424 #endif
1425
1426         /* get a pointer on the bitmap */
1427         SDL_LockYUVOverlay (vp->bmp);
1428
1429         pict.data[0] = vp->bmp->pixels[0];
1430         pict.data[1] = vp->bmp->pixels[2];
1431         pict.data[2] = vp->bmp->pixels[1];
1432
1433         pict.linesize[0] = vp->bmp->pitches[0];
1434         pict.linesize[1] = vp->bmp->pitches[2];
1435         pict.linesize[2] = vp->bmp->pitches[1];
1436
1437 #if CONFIG_AVFILTER
1438         // FIXME use direct rendering
1439         av_picture_copy(&pict, (AVPicture *)src_frame,
1440                         vp->pix_fmt, vp->width, vp->height);
1441 #else
1442         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1443         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1444             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1445             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1446         if (is->img_convert_ctx == NULL) {
1447             fprintf(stderr, "Cannot initialize the conversion context\n");
1448             exit(1);
1449         }
1450         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1451                   0, vp->height, pict.data, pict.linesize);
1452 #endif
1453         /* update the bitmap content */
1454         SDL_UnlockYUVOverlay(vp->bmp);
1455
1456         vp->pts = pts;
1457         vp->pos = pos;
1458         vp->skip = 0;
1459
1460         /* now we can update the picture count */
1461         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1462             is->pictq_windex = 0;
1463         SDL_LockMutex(is->pictq_mutex);
1464         is->pictq_size++;
1465         SDL_UnlockMutex(is->pictq_mutex);
1466     }
1467     return 0;
1468 }
1469
1470 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1471 {
1472     int got_picture, i;
1473
1474     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1475         return -1;
1476
1477     if (pkt->data == flush_pkt.data) {
1478         avcodec_flush_buffers(is->video_st->codec);
1479
1480         SDL_LockMutex(is->pictq_mutex);
1481         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1482         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1483             is->pictq[i].skip = 1;
1484         }
1485         while (is->pictq_size && !is->videoq.abort_request) {
1486             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1487         }
1488         is->video_current_pos = -1;
1489         is->frame_last_pts = AV_NOPTS_VALUE;
1490         is->frame_last_duration = 0;
1491         is->frame_timer = (double)av_gettime() / 1000000.0;
1492         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1493         SDL_UnlockMutex(is->pictq_mutex);
1494
1495         return 0;
1496     }
1497
1498     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1499
1500     if (got_picture) {
1501         int ret = 1;
1502
1503         if (decoder_reorder_pts == -1) {
1504             *pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
1505         } else if (decoder_reorder_pts) {
1506             *pts = frame->pkt_pts;
1507         } else {
1508             *pts = frame->pkt_dts;
1509         }
1510
1511         if (*pts == AV_NOPTS_VALUE) {
1512             *pts = 0;
1513         }
1514
1515         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1516              (framedrop>0 || (framedrop && is->audio_st))) {
1517             SDL_LockMutex(is->pictq_mutex);
1518             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1519                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1520                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1521                 double ptsdiff = dpts - is->frame_last_pts;
1522                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1523                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1524                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1525                     is->frame_last_dropped_pos = pkt->pos;
1526                     is->frame_last_dropped_pts = dpts;
1527                     is->frame_drops_early++;
1528                     ret = 0;
1529                 }
1530             }
1531             SDL_UnlockMutex(is->pictq_mutex);
1532         }
1533
1534         if (ret)
1535             is->frame_last_returned_time = av_gettime() / 1000000.0;
1536
1537         return ret;
1538     }
1539     return 0;
1540 }
1541
1542 #if CONFIG_AVFILTER
1543 typedef struct {
1544     VideoState *is;
1545     AVFrame *frame;
1546     int use_dr1;
1547 } FilterPriv;
1548
1549 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1550 {
1551     AVFilterContext *ctx = codec->opaque;
1552     AVFilterBufferRef  *ref;
1553     int perms = AV_PERM_WRITE;
1554     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1555     unsigned edge;
1556     int pixel_size;
1557
1558     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1559
1560     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1561         perms |= AV_PERM_NEG_LINESIZES;
1562
1563     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1564         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1565         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1566         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1567     }
1568     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1569
1570     w = codec->width;
1571     h = codec->height;
1572
1573     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1574         return -1;
1575
1576     avcodec_align_dimensions2(codec, &w, &h, stride);
1577     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1578     w += edge << 1;
1579     h += edge << 1;
1580     if (codec->pix_fmt != ctx->outputs[0]->format) {
1581         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1582         return -1;
1583     }
1584     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1585         return -1;
1586
1587     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1588     ref->video->w = codec->width;
1589     ref->video->h = codec->height;
1590     for (i = 0; i < 4; i ++) {
1591         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1592         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1593
1594         if (ref->data[i]) {
1595             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1596         }
1597         pic->data[i]     = ref->data[i];
1598         pic->linesize[i] = ref->linesize[i];
1599     }
1600     pic->opaque = ref;
1601     pic->type   = FF_BUFFER_TYPE_USER;
1602     pic->reordered_opaque = codec->reordered_opaque;
1603     pic->width               = codec->width;
1604     pic->height              = codec->height;
1605     pic->format              = codec->pix_fmt;
1606     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1607     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1608     else            pic->pkt_pts = AV_NOPTS_VALUE;
1609     return 0;
1610 }
1611
1612 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1613 {
1614     memset(pic->data, 0, sizeof(pic->data));
1615     avfilter_unref_buffer(pic->opaque);
1616 }
1617
1618 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1619 {
1620     AVFilterBufferRef *ref = pic->opaque;
1621
1622     if (pic->data[0] == NULL) {
1623         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1624         return codec->get_buffer(codec, pic);
1625     }
1626
1627     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1628         (codec->pix_fmt != ref->format)) {
1629         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1630         return -1;
1631     }
1632
1633     pic->reordered_opaque = codec->reordered_opaque;
1634     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1635     else            pic->pkt_pts = AV_NOPTS_VALUE;
1636     return 0;
1637 }
1638
1639 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1640 {
1641     FilterPriv *priv = ctx->priv;
1642     AVCodecContext *codec;
1643     if (!opaque) return -1;
1644
1645     priv->is = opaque;
1646     codec    = priv->is->video_st->codec;
1647     codec->opaque = ctx;
1648     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1649         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1650         priv->use_dr1 = 1;
1651         codec->get_buffer     = input_get_buffer;
1652         codec->release_buffer = input_release_buffer;
1653         codec->reget_buffer   = input_reget_buffer;
1654         codec->thread_safe_callbacks = 1;
1655     }
1656
1657     priv->frame = avcodec_alloc_frame();
1658
1659     return 0;
1660 }
1661
1662 static void input_uninit(AVFilterContext *ctx)
1663 {
1664     FilterPriv *priv = ctx->priv;
1665     av_free(priv->frame);
1666 }
1667
1668 static int input_request_frame(AVFilterLink *link)
1669 {
1670     FilterPriv *priv = link->src->priv;
1671     AVFilterBufferRef *picref;
1672     int64_t pts = 0;
1673     AVPacket pkt;
1674     int ret;
1675
1676     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1677         av_free_packet(&pkt);
1678     if (ret < 0)
1679         return -1;
1680
1681     if (priv->use_dr1 && priv->frame->opaque) {
1682         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1683     } else {
1684         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1685         av_image_copy(picref->data, picref->linesize,
1686                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1687                       picref->format, link->w, link->h);
1688     }
1689     av_free_packet(&pkt);
1690
1691     avfilter_copy_frame_props(picref, priv->frame);
1692     picref->pts = pts;
1693
1694     avfilter_start_frame(link, picref);
1695     avfilter_draw_slice(link, 0, link->h, 1);
1696     avfilter_end_frame(link);
1697
1698     return 0;
1699 }
1700
1701 static int input_query_formats(AVFilterContext *ctx)
1702 {
1703     FilterPriv *priv = ctx->priv;
1704     enum PixelFormat pix_fmts[] = {
1705         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1706     };
1707
1708     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1709     return 0;
1710 }
1711
1712 static int input_config_props(AVFilterLink *link)
1713 {
1714     FilterPriv *priv  = link->src->priv;
1715     AVStream *s = priv->is->video_st;
1716
1717     link->w = s->codec->width;
1718     link->h = s->codec->height;
1719     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1720         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1721     link->time_base = s->time_base;
1722
1723     return 0;
1724 }
1725
1726 static AVFilter input_filter =
1727 {
1728     .name      = "ffplay_input",
1729
1730     .priv_size = sizeof(FilterPriv),
1731
1732     .init      = input_init,
1733     .uninit    = input_uninit,
1734
1735     .query_formats = input_query_formats,
1736
1737     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1738     .outputs   = (AVFilterPad[]) {{ .name = "default",
1739                                     .type = AVMEDIA_TYPE_VIDEO,
1740                                     .request_frame = input_request_frame,
1741                                     .config_props  = input_config_props, },
1742                                   { .name = NULL }},
1743 };
1744
1745 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1746 {
1747     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1748     char sws_flags_str[128];
1749     int ret;
1750     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1751     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1752     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1753     graph->scale_sws_opts = av_strdup(sws_flags_str);
1754
1755     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1756                                             NULL, is, graph)) < 0)
1757         return ret;
1758
1759 #if FF_API_OLD_VSINK_API
1760     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1761                                        NULL, pix_fmts, graph);
1762 #else
1763     buffersink_params->pixel_fmts = pix_fmts;
1764     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1765                                        NULL, buffersink_params, graph);
1766 #endif
1767     av_freep(&buffersink_params);
1768     if (ret < 0)
1769         return ret;
1770
1771     if (vfilters) {
1772         AVFilterInOut *outputs = avfilter_inout_alloc();
1773         AVFilterInOut *inputs  = avfilter_inout_alloc();
1774
1775         outputs->name    = av_strdup("in");
1776         outputs->filter_ctx = filt_src;
1777         outputs->pad_idx = 0;
1778         outputs->next    = NULL;
1779
1780         inputs->name    = av_strdup("out");
1781         inputs->filter_ctx = filt_out;
1782         inputs->pad_idx = 0;
1783         inputs->next    = NULL;
1784
1785         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1786             return ret;
1787     } else {
1788         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1789             return ret;
1790     }
1791
1792     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1793         return ret;
1794
1795     is->out_video_filter = filt_out;
1796
1797     return ret;
1798 }
1799
1800 #endif  /* CONFIG_AVFILTER */
1801
1802 static int video_thread(void *arg)
1803 {
1804     VideoState *is = arg;
1805     AVFrame *frame = avcodec_alloc_frame();
1806     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1807     double pts;
1808     int ret;
1809
1810 #if CONFIG_AVFILTER
1811     AVFilterGraph *graph = avfilter_graph_alloc();
1812     AVFilterContext *filt_out = NULL;
1813     int last_w = is->video_st->codec->width;
1814     int last_h = is->video_st->codec->height;
1815
1816     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1817         goto the_end;
1818     filt_out = is->out_video_filter;
1819 #endif
1820
1821     for (;;) {
1822 #if !CONFIG_AVFILTER
1823         AVPacket pkt;
1824 #else
1825         AVFilterBufferRef *picref;
1826         AVRational tb = filt_out->inputs[0]->time_base;
1827 #endif
1828         while (is->paused && !is->videoq.abort_request)
1829             SDL_Delay(10);
1830 #if CONFIG_AVFILTER
1831         if (   last_w != is->video_st->codec->width
1832             || last_h != is->video_st->codec->height) {
1833             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1834                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1835             avfilter_graph_free(&graph);
1836             graph = avfilter_graph_alloc();
1837             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1838                 goto the_end;
1839             filt_out = is->out_video_filter;
1840             last_w = is->video_st->codec->width;
1841             last_h = is->video_st->codec->height;
1842         }
1843         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1844         if (picref) {
1845             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1846             pts_int = picref->pts;
1847             pos     = picref->pos;
1848             frame->opaque = picref;
1849         }
1850
1851         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1852             av_unused int64_t pts1 = pts_int;
1853             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1854             av_dlog(NULL, "video_thread(): "
1855                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1856                     tb.num, tb.den, pts1,
1857                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1858         }
1859 #else
1860         ret = get_video_frame(is, frame, &pts_int, &pkt);
1861         pos = pkt.pos;
1862         av_free_packet(&pkt);
1863 #endif
1864
1865         if (ret < 0)
1866             goto the_end;
1867
1868         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1869         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1870             is->frame_last_filter_delay = 0;
1871
1872 #if CONFIG_AVFILTER
1873         if (!picref)
1874             continue;
1875 #endif
1876
1877         pts = pts_int * av_q2d(is->video_st->time_base);
1878
1879         ret = queue_picture(is, frame, pts, pos);
1880
1881         if (ret < 0)
1882             goto the_end;
1883
1884         if (is->step)
1885             stream_toggle_pause(is);
1886     }
1887  the_end:
1888 #if CONFIG_AVFILTER
1889     av_freep(&vfilters);
1890     avfilter_graph_free(&graph);
1891 #endif
1892     av_free(frame);
1893     return 0;
1894 }
1895
1896 static int subtitle_thread(void *arg)
1897 {
1898     VideoState *is = arg;
1899     SubPicture *sp;
1900     AVPacket pkt1, *pkt = &pkt1;
1901     int got_subtitle;
1902     double pts;
1903     int i, j;
1904     int r, g, b, y, u, v, a;
1905
1906     for (;;) {
1907         while (is->paused && !is->subtitleq.abort_request) {
1908             SDL_Delay(10);
1909         }
1910         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1911             break;
1912
1913         if (pkt->data == flush_pkt.data) {
1914             avcodec_flush_buffers(is->subtitle_st->codec);
1915             continue;
1916         }
1917         SDL_LockMutex(is->subpq_mutex);
1918         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1919                !is->subtitleq.abort_request) {
1920             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1921         }
1922         SDL_UnlockMutex(is->subpq_mutex);
1923
1924         if (is->subtitleq.abort_request)
1925             return 0;
1926
1927         sp = &is->subpq[is->subpq_windex];
1928
1929        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1930            this packet, if any */
1931         pts = 0;
1932         if (pkt->pts != AV_NOPTS_VALUE)
1933             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1934
1935         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1936                                  &got_subtitle, pkt);
1937
1938         if (got_subtitle && sp->sub.format == 0) {
1939             sp->pts = pts;
1940
1941             for (i = 0; i < sp->sub.num_rects; i++)
1942             {
1943                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1944                 {
1945                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1946                     y = RGB_TO_Y_CCIR(r, g, b);
1947                     u = RGB_TO_U_CCIR(r, g, b, 0);
1948                     v = RGB_TO_V_CCIR(r, g, b, 0);
1949                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1950                 }
1951             }
1952
1953             /* now we can update the picture count */
1954             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1955                 is->subpq_windex = 0;
1956             SDL_LockMutex(is->subpq_mutex);
1957             is->subpq_size++;
1958             SDL_UnlockMutex(is->subpq_mutex);
1959         }
1960         av_free_packet(pkt);
1961     }
1962     return 0;
1963 }
1964
1965 /* copy samples for viewing in editor window */
1966 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1967 {
1968     int size, len;
1969
1970     size = samples_size / sizeof(short);
1971     while (size > 0) {
1972         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1973         if (len > size)
1974             len = size;
1975         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1976         samples += len;
1977         is->sample_array_index += len;
1978         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1979             is->sample_array_index = 0;
1980         size -= len;
1981     }
1982 }
1983
1984 /* return the wanted number of samples to get better sync if sync_type is video
1985  * or external master clock */
1986 static int synchronize_audio(VideoState *is, int nb_samples)
1987 {
1988     int wanted_nb_samples = nb_samples;
1989
1990     /* if not master, then we try to remove or add samples to correct the clock */
1991     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1992          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1993         double diff, avg_diff;
1994         int min_nb_samples, max_nb_samples;
1995
1996         diff = get_audio_clock(is) - get_master_clock(is);
1997
1998         if (diff < AV_NOSYNC_THRESHOLD) {
1999             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2000             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2001                 /* not enough measures to have a correct estimate */
2002                 is->audio_diff_avg_count++;
2003             } else {
2004                 /* estimate the A-V difference */
2005                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2006
2007                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2008                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
2009                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2010                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2011                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2012                 }
2013                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2014                         diff, avg_diff, wanted_nb_samples - nb_samples,
2015                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2016             }
2017         } else {
2018             /* too big difference : may be initial PTS errors, so
2019                reset A-V filter */
2020             is->audio_diff_avg_count = 0;
2021             is->audio_diff_cum       = 0;
2022         }
2023     }
2024
2025     return wanted_nb_samples;
2026 }
2027
2028 /* decode one audio frame and returns its uncompressed size */
2029 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2030 {
2031     AVPacket *pkt_temp = &is->audio_pkt_temp;
2032     AVPacket *pkt = &is->audio_pkt;
2033     AVCodecContext *dec = is->audio_st->codec;
2034     int len1, len2, data_size, resampled_data_size;
2035     int64_t dec_channel_layout;
2036     int got_frame;
2037     double pts;
2038     int new_packet = 0;
2039     int flush_complete = 0;
2040     int wanted_nb_samples;
2041
2042     for (;;) {
2043         /* NOTE: the audio packet can contain several frames */
2044         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2045             if (!is->frame) {
2046                 if (!(is->frame = avcodec_alloc_frame()))
2047                     return AVERROR(ENOMEM);
2048             } else
2049                 avcodec_get_frame_defaults(is->frame);
2050
2051             if (flush_complete)
2052                 break;
2053             new_packet = 0;
2054             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2055             if (len1 < 0) {
2056                 /* if error, we skip the frame */
2057                 pkt_temp->size = 0;
2058                 break;
2059             }
2060
2061             pkt_temp->data += len1;
2062             pkt_temp->size -= len1;
2063
2064             if (!got_frame) {
2065                 /* stop sending empty packets if the decoder is finished */
2066                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2067                     flush_complete = 1;
2068                 continue;
2069             }
2070             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2071                                                    is->frame->nb_samples,
2072                                                    dec->sample_fmt, 1);
2073
2074             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2075             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2076
2077             if (dec->sample_fmt != is->audio_src_fmt ||
2078                 dec_channel_layout != is->audio_src_channel_layout ||
2079                 dec->sample_rate != is->audio_src_freq ||
2080                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2081                 if (is->swr_ctx)
2082                     swr_free(&is->swr_ctx);
2083                 is->swr_ctx = swr_alloc_set_opts(NULL,
2084                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2085                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2086                                                  0, NULL);
2087                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2088                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2089                         dec->sample_rate,
2090                         av_get_sample_fmt_name(dec->sample_fmt),
2091                         dec->channels,
2092                         is->audio_tgt_freq,
2093                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2094                         is->audio_tgt_channels);
2095                     break;
2096                 }
2097                 is->audio_src_channel_layout = dec_channel_layout;
2098                 is->audio_src_channels = dec->channels;
2099                 is->audio_src_freq = dec->sample_rate;
2100                 is->audio_src_fmt = dec->sample_fmt;
2101             }
2102
2103             resampled_data_size = data_size;
2104             if (is->swr_ctx) {
2105                 const uint8_t *in[] = { is->frame->data[0] };
2106                 uint8_t *out[] = {is->audio_buf2};
2107                 if (wanted_nb_samples != is->frame->nb_samples) {
2108                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
2109                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
2110                         fprintf(stderr, "swr_set_compensation() failed\n");
2111                         break;
2112                     }
2113                 }
2114                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2115                                                 in, is->frame->nb_samples);
2116                 if (len2 < 0) {
2117                     fprintf(stderr, "audio_resample() failed\n");
2118                     break;
2119                 }
2120                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2121                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2122                     swr_init(is->swr_ctx);
2123                 }
2124                 is->audio_buf = is->audio_buf2;
2125                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2126             } else {
2127                 is->audio_buf = is->frame->data[0];
2128             }
2129
2130             /* if no pts, then compute it */
2131             pts = is->audio_clock;
2132             *pts_ptr = pts;
2133             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2134 #ifdef DEBUG
2135             {
2136                 static double last_clock;
2137                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2138                        is->audio_clock - last_clock,
2139                        is->audio_clock, pts);
2140                 last_clock = is->audio_clock;
2141             }
2142 #endif
2143             return resampled_data_size;
2144         }
2145
2146         /* free the current packet */
2147         if (pkt->data)
2148             av_free_packet(pkt);
2149         memset(pkt_temp, 0, sizeof(*pkt_temp));
2150
2151         if (is->paused || is->audioq.abort_request) {
2152             return -1;
2153         }
2154
2155         /* read next packet */
2156         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2157             return -1;
2158
2159         if (pkt->data == flush_pkt.data) {
2160             avcodec_flush_buffers(dec);
2161             flush_complete = 0;
2162         }
2163
2164         *pkt_temp = *pkt;
2165
2166         /* if update the audio clock with the pts */
2167         if (pkt->pts != AV_NOPTS_VALUE) {
2168             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2169         }
2170     }
2171 }
2172
2173 /* prepare a new audio buffer */
2174 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2175 {
2176     VideoState *is = opaque;
2177     int audio_size, len1;
2178     int bytes_per_sec;
2179     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
2180     double pts;
2181
2182     audio_callback_time = av_gettime();
2183
2184     while (len > 0) {
2185         if (is->audio_buf_index >= is->audio_buf_size) {
2186            audio_size = audio_decode_frame(is, &pts);
2187            if (audio_size < 0) {
2188                 /* if error, just output silence */
2189                is->audio_buf      = is->silence_buf;
2190                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2191            } else {
2192                if (is->show_mode != SHOW_MODE_VIDEO)
2193                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2194                is->audio_buf_size = audio_size;
2195            }
2196            is->audio_buf_index = 0;
2197         }
2198         len1 = is->audio_buf_size - is->audio_buf_index;
2199         if (len1 > len)
2200             len1 = len;
2201         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2202         len -= len1;
2203         stream += len1;
2204         is->audio_buf_index += len1;
2205     }
2206     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2207     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2208     /* Let's assume the audio driver that is used by SDL has two periods. */
2209     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2210     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2211 }
2212
2213 /* open a given stream. Return 0 if OK */
2214 static int stream_component_open(VideoState *is, int stream_index)
2215 {
2216     AVFormatContext *ic = is->ic;
2217     AVCodecContext *avctx;
2218     AVCodec *codec;
2219     SDL_AudioSpec wanted_spec, spec;
2220     AVDictionary *opts;
2221     AVDictionaryEntry *t = NULL;
2222     int64_t wanted_channel_layout = 0;
2223     int wanted_nb_channels;
2224     const char *env;
2225
2226     if (stream_index < 0 || stream_index >= ic->nb_streams)
2227         return -1;
2228     avctx = ic->streams[stream_index]->codec;
2229
2230     codec = avcodec_find_decoder(avctx->codec_id);
2231     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2232
2233     switch(avctx->codec_type){
2234         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2235         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2236         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2237     }
2238     if (!codec)
2239         return -1;
2240
2241     avctx->workaround_bugs   = workaround_bugs;
2242     avctx->lowres            = lowres;
2243     if(avctx->lowres > codec->max_lowres){
2244         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2245                 codec->max_lowres);
2246         avctx->lowres= codec->max_lowres;
2247     }
2248     avctx->idct_algo         = idct;
2249     avctx->skip_frame        = skip_frame;
2250     avctx->skip_idct         = skip_idct;
2251     avctx->skip_loop_filter  = skip_loop_filter;
2252     avctx->error_concealment = error_concealment;
2253
2254     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2255     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2256     if(codec->capabilities & CODEC_CAP_DR1)
2257         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2258
2259     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2260         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2261         env = SDL_getenv("SDL_AUDIO_CHANNELS");
2262         if (env)
2263             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2264         if (!wanted_channel_layout) {
2265             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2266             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2267             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2268             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2269             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > 6)) {
2270                 wanted_nb_channels--;
2271                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2272             }
2273         }
2274         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2275         wanted_spec.freq = avctx->sample_rate;
2276         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2277             fprintf(stderr, "Invalid sample rate or channel count!\n");
2278             return -1;
2279         }
2280     }
2281
2282     if (!av_dict_get(opts, "threads", NULL, 0))
2283         av_dict_set(&opts, "threads", "auto", 0);
2284     if (!codec ||
2285         avcodec_open2(avctx, codec, &opts) < 0)
2286         return -1;
2287     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2288         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2289         return AVERROR_OPTION_NOT_FOUND;
2290     }
2291
2292     /* prepare audio output */
2293     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2294         wanted_spec.format = AUDIO_S16SYS;
2295         wanted_spec.silence = 0;
2296         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2297         wanted_spec.callback = sdl_audio_callback;
2298         wanted_spec.userdata = is;
2299         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2300             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2301             return -1;
2302         }
2303         is->audio_hw_buf_size = spec.size;
2304         if (spec.format != AUDIO_S16SYS) {
2305             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2306             return -1;
2307         }
2308         if (spec.channels != wanted_spec.channels) {
2309             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2310             if (!wanted_channel_layout) {
2311                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2312                 return -1;
2313             }
2314         }
2315         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2316         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2317         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2318         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2319     }
2320
2321     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2322     switch (avctx->codec_type) {
2323     case AVMEDIA_TYPE_AUDIO:
2324         is->audio_stream = stream_index;
2325         is->audio_st = ic->streams[stream_index];
2326         is->audio_buf_size  = 0;
2327         is->audio_buf_index = 0;
2328
2329         /* init averaging filter */
2330         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2331         is->audio_diff_avg_count = 0;
2332         /* since we do not have a precise anough audio fifo fullness,
2333            we correct audio sync only if larger than this threshold */
2334         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2335
2336         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2337         packet_queue_init(&is->audioq);
2338         SDL_PauseAudio(0);
2339         break;
2340     case AVMEDIA_TYPE_VIDEO:
2341         is->video_stream = stream_index;
2342         is->video_st = ic->streams[stream_index];
2343
2344         packet_queue_init(&is->videoq);
2345         is->video_tid = SDL_CreateThread(video_thread, is);
2346         break;
2347     case AVMEDIA_TYPE_SUBTITLE:
2348         is->subtitle_stream = stream_index;
2349         is->subtitle_st = ic->streams[stream_index];
2350         packet_queue_init(&is->subtitleq);
2351
2352         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2353         break;
2354     default:
2355         break;
2356     }
2357     return 0;
2358 }
2359
2360 static void stream_component_close(VideoState *is, int stream_index)
2361 {
2362     AVFormatContext *ic = is->ic;
2363     AVCodecContext *avctx;
2364
2365     if (stream_index < 0 || stream_index >= ic->nb_streams)
2366         return;
2367     avctx = ic->streams[stream_index]->codec;
2368
2369     switch (avctx->codec_type) {
2370     case AVMEDIA_TYPE_AUDIO:
2371         packet_queue_abort(&is->audioq);
2372
2373         SDL_CloseAudio();
2374
2375         packet_queue_end(&is->audioq);
2376         if (is->swr_ctx)
2377             swr_free(&is->swr_ctx);
2378         av_free_packet(&is->audio_pkt);
2379         av_freep(&is->audio_buf1);
2380         is->audio_buf = NULL;
2381         av_freep(&is->frame);
2382
2383         if (is->rdft) {
2384             av_rdft_end(is->rdft);
2385             av_freep(&is->rdft_data);
2386             is->rdft = NULL;
2387             is->rdft_bits = 0;
2388         }
2389         break;
2390     case AVMEDIA_TYPE_VIDEO:
2391         packet_queue_abort(&is->videoq);
2392
2393         /* note: we also signal this mutex to make sure we deblock the
2394            video thread in all cases */
2395         SDL_LockMutex(is->pictq_mutex);
2396         SDL_CondSignal(is->pictq_cond);
2397         SDL_UnlockMutex(is->pictq_mutex);
2398
2399         SDL_WaitThread(is->video_tid, NULL);
2400
2401         packet_queue_end(&is->videoq);
2402         break;
2403     case AVMEDIA_TYPE_SUBTITLE:
2404         packet_queue_abort(&is->subtitleq);
2405
2406         /* note: we also signal this mutex to make sure we deblock the
2407            video thread in all cases */
2408         SDL_LockMutex(is->subpq_mutex);
2409         is->subtitle_stream_changed = 1;
2410
2411         SDL_CondSignal(is->subpq_cond);
2412         SDL_UnlockMutex(is->subpq_mutex);
2413
2414         SDL_WaitThread(is->subtitle_tid, NULL);
2415
2416         packet_queue_end(&is->subtitleq);
2417         break;
2418     default:
2419         break;
2420     }
2421
2422     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2423     avcodec_close(avctx);
2424     switch (avctx->codec_type) {
2425     case AVMEDIA_TYPE_AUDIO:
2426         is->audio_st = NULL;
2427         is->audio_stream = -1;
2428         break;
2429     case AVMEDIA_TYPE_VIDEO:
2430         is->video_st = NULL;
2431         is->video_stream = -1;
2432         break;
2433     case AVMEDIA_TYPE_SUBTITLE:
2434         is->subtitle_st = NULL;
2435         is->subtitle_stream = -1;
2436         break;
2437     default:
2438         break;
2439     }
2440 }
2441
2442 static int decode_interrupt_cb(void *ctx)
2443 {
2444     VideoState *is = ctx;
2445     return is->abort_request;
2446 }
2447
2448 /* this thread gets the stream from the disk or the network */
2449 static int read_thread(void *arg)
2450 {
2451     VideoState *is = arg;
2452     AVFormatContext *ic = NULL;
2453     int err, i, ret;
2454     int st_index[AVMEDIA_TYPE_NB];
2455     AVPacket pkt1, *pkt = &pkt1;
2456     int eof = 0;
2457     int pkt_in_play_range = 0;
2458     AVDictionaryEntry *t;
2459     AVDictionary **opts;
2460     int orig_nb_streams;
2461
2462     memset(st_index, -1, sizeof(st_index));
2463     is->video_stream = -1;
2464     is->audio_stream = -1;
2465     is->subtitle_stream = -1;
2466
2467     ic = avformat_alloc_context();
2468     ic->interrupt_callback.callback = decode_interrupt_cb;
2469     ic->interrupt_callback.opaque = is;
2470     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2471     if (err < 0) {
2472         print_error(is->filename, err);
2473         ret = -1;
2474         goto fail;
2475     }
2476     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2477         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2478         ret = AVERROR_OPTION_NOT_FOUND;
2479         goto fail;
2480     }
2481     is->ic = ic;
2482
2483     if (genpts)
2484         ic->flags |= AVFMT_FLAG_GENPTS;
2485
2486     opts = setup_find_stream_info_opts(ic, codec_opts);
2487     orig_nb_streams = ic->nb_streams;
2488
2489     err = avformat_find_stream_info(ic, opts);
2490     if (err < 0) {
2491         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2492         ret = -1;
2493         goto fail;
2494     }
2495     for (i = 0; i < orig_nb_streams; i++)
2496         av_dict_free(&opts[i]);
2497     av_freep(&opts);
2498
2499     if (ic->pb)
2500         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2501
2502     if (seek_by_bytes < 0)
2503         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2504
2505     /* if seeking requested, we execute it */
2506     if (start_time != AV_NOPTS_VALUE) {
2507         int64_t timestamp;
2508
2509         timestamp = start_time;
2510         /* add the stream start time */
2511         if (ic->start_time != AV_NOPTS_VALUE)
2512             timestamp += ic->start_time;
2513         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2514         if (ret < 0) {
2515             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2516                     is->filename, (double)timestamp / AV_TIME_BASE);
2517         }
2518     }
2519
2520     for (i = 0; i < ic->nb_streams; i++)
2521         ic->streams[i]->discard = AVDISCARD_ALL;
2522     if (!video_disable)
2523         st_index[AVMEDIA_TYPE_VIDEO] =
2524             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2525                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2526     if (!audio_disable)
2527         st_index[AVMEDIA_TYPE_AUDIO] =
2528             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2529                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2530                                 st_index[AVMEDIA_TYPE_VIDEO],
2531                                 NULL, 0);
2532     if (!video_disable)
2533         st_index[AVMEDIA_TYPE_SUBTITLE] =
2534             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2535                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2536                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2537                                  st_index[AVMEDIA_TYPE_AUDIO] :
2538                                  st_index[AVMEDIA_TYPE_VIDEO]),
2539                                 NULL, 0);
2540     if (show_status) {
2541         av_dump_format(ic, 0, is->filename, 0);
2542     }
2543
2544     is->show_mode = show_mode;
2545
2546     /* open the streams */
2547     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2548         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2549     }
2550
2551     ret = -1;
2552     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2553         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2554     }
2555     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2556     if (is->show_mode == SHOW_MODE_NONE)
2557         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2558
2559     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2560         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2561     }
2562
2563     if (is->video_stream < 0 && is->audio_stream < 0) {
2564         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2565         ret = -1;
2566         goto fail;
2567     }
2568
2569     for (;;) {
2570         if (is->abort_request)
2571             break;
2572         if (is->paused != is->last_paused) {
2573             is->last_paused = is->paused;
2574             if (is->paused)
2575                 is->read_pause_return = av_read_pause(ic);
2576             else
2577                 av_read_play(ic);
2578         }
2579 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2580         if (is->paused &&
2581                 (!strcmp(ic->iformat->name, "rtsp") ||
2582                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2583             /* wait 10 ms to avoid trying to get another packet */
2584             /* XXX: horrible */
2585             SDL_Delay(10);
2586             continue;
2587         }
2588 #endif
2589         if (is->seek_req) {
2590             int64_t seek_target = is->seek_pos;
2591             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2592             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2593 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2594 //      of the seek_pos/seek_rel variables
2595
2596             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2597             if (ret < 0) {
2598                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2599             } else {
2600                 if (is->audio_stream >= 0) {
2601                     packet_queue_flush(&is->audioq);
2602                     packet_queue_put(&is->audioq, &flush_pkt);
2603                 }
2604                 if (is->subtitle_stream >= 0) {
2605                     packet_queue_flush(&is->subtitleq);
2606                     packet_queue_put(&is->subtitleq, &flush_pkt);
2607                 }
2608                 if (is->video_stream >= 0) {
2609                     packet_queue_flush(&is->videoq);
2610                     packet_queue_put(&is->videoq, &flush_pkt);
2611                 }
2612             }
2613             is->seek_req = 0;
2614             eof = 0;
2615         }
2616
2617         /* if the queue are full, no need to read more */
2618         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2619             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0)
2620                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2621                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
2622             /* wait 10 ms */
2623             SDL_Delay(10);
2624             continue;
2625         }
2626         if (eof) {
2627             if (is->video_stream >= 0) {
2628                 av_init_packet(pkt);
2629                 pkt->data = NULL;
2630                 pkt->size = 0;
2631                 pkt->stream_index = is->video_stream;
2632                 packet_queue_put(&is->videoq, pkt);
2633             }
2634             if (is->audio_stream >= 0 &&
2635                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2636                 av_init_packet(pkt);
2637                 pkt->data = NULL;
2638                 pkt->size = 0;
2639                 pkt->stream_index = is->audio_stream;
2640                 packet_queue_put(&is->audioq, pkt);
2641             }
2642             SDL_Delay(10);
2643             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2644                 if (loop != 1 && (!loop || --loop)) {
2645                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2646                 } else if (autoexit) {
2647                     ret = AVERROR_EOF;
2648                     goto fail;
2649                 }
2650             }
2651             eof=0;
2652             continue;
2653         }
2654         ret = av_read_frame(ic, pkt);
2655         if (ret < 0) {
2656             if (ret == AVERROR_EOF || url_feof(ic->pb))
2657                 eof = 1;
2658             if (ic->pb && ic->pb->error)
2659                 break;
2660             SDL_Delay(100); /* wait for user event */
2661             continue;
2662         }
2663         /* check if packet is in play range specified by user, then queue, otherwise discard */
2664         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2665                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2666                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2667                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2668                 <= ((double)duration / 1000000);
2669         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2670             packet_queue_put(&is->audioq, pkt);
2671         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2672             packet_queue_put(&is->videoq, pkt);
2673         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2674             packet_queue_put(&is->subtitleq, pkt);
2675         } else {
2676             av_free_packet(pkt);
2677         }
2678     }
2679     /* wait until the end */
2680     while (!is->abort_request) {
2681         SDL_Delay(100);
2682     }
2683
2684     ret = 0;
2685  fail:
2686     /* close each stream */
2687     if (is->audio_stream >= 0)
2688         stream_component_close(is, is->audio_stream);
2689     if (is->video_stream >= 0)
2690         stream_component_close(is, is->video_stream);
2691     if (is->subtitle_stream >= 0)
2692         stream_component_close(is, is->subtitle_stream);
2693     if (is->ic) {
2694         avformat_close_input(&is->ic);
2695     }
2696
2697     if (ret != 0) {
2698         SDL_Event event;
2699
2700         event.type = FF_QUIT_EVENT;
2701         event.user.data1 = is;
2702         SDL_PushEvent(&event);
2703     }
2704     return 0;
2705 }
2706
2707 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2708 {
2709     VideoState *is;
2710
2711     is = av_mallocz(sizeof(VideoState));
2712     if (!is)
2713         return NULL;
2714     av_strlcpy(is->filename, filename, sizeof(is->filename));
2715     is->iformat = iformat;
2716     is->ytop    = 0;
2717     is->xleft   = 0;
2718
2719     /* start video display */
2720     is->pictq_mutex = SDL_CreateMutex();
2721     is->pictq_cond  = SDL_CreateCond();
2722
2723     is->subpq_mutex = SDL_CreateMutex();
2724     is->subpq_cond  = SDL_CreateCond();
2725
2726     is->av_sync_type = av_sync_type;
2727     is->read_tid     = SDL_CreateThread(read_thread, is);
2728     if (!is->read_tid) {
2729         av_free(is);
2730         return NULL;
2731     }
2732     return is;
2733 }
2734
2735 static void stream_cycle_channel(VideoState *is, int codec_type)
2736 {
2737     AVFormatContext *ic = is->ic;
2738     int start_index, stream_index;
2739     AVStream *st;
2740
2741     if (codec_type == AVMEDIA_TYPE_VIDEO)
2742         start_index = is->video_stream;
2743     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2744         start_index = is->audio_stream;
2745     else
2746         start_index = is->subtitle_stream;
2747     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2748         return;
2749     stream_index = start_index;
2750     for (;;) {
2751         if (++stream_index >= is->ic->nb_streams)
2752         {
2753             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2754             {
2755                 stream_index = -1;
2756                 goto the_end;
2757             } else
2758                 stream_index = 0;
2759         }
2760         if (stream_index == start_index)
2761             return;
2762         st = ic->streams[stream_index];
2763         if (st->codec->codec_type == codec_type) {
2764             /* check that parameters are OK */
2765             switch (codec_type) {
2766             case AVMEDIA_TYPE_AUDIO:
2767                 if (st->codec->sample_rate != 0 &&
2768                     st->codec->channels != 0)
2769                     goto the_end;
2770                 break;
2771             case AVMEDIA_TYPE_VIDEO:
2772             case AVMEDIA_TYPE_SUBTITLE:
2773                 goto the_end;
2774             default:
2775                 break;
2776             }
2777         }
2778     }
2779  the_end:
2780     stream_component_close(is, start_index);
2781     stream_component_open(is, stream_index);
2782 }
2783
2784
2785 static void toggle_full_screen(VideoState *is)
2786 {
2787     av_unused int i;
2788     is_full_screen = !is_full_screen;
2789 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2790     /* OS X needs to reallocate the SDL overlays */
2791     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2792         is->pictq[i].reallocate = 1;
2793     }
2794 #endif
2795     video_open(is, 1);
2796 }
2797
2798 static void toggle_pause(VideoState *is)
2799 {
2800     stream_toggle_pause(is);
2801     is->step = 0;
2802 }
2803
2804 static void step_to_next_frame(VideoState *is)
2805 {
2806     /* if the stream is paused unpause it, then step */
2807     if (is->paused)
2808         stream_toggle_pause(is);
2809     is->step = 1;
2810 }
2811
2812 static void toggle_audio_display(VideoState *is)
2813 {
2814     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2815     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2816     fill_rectangle(screen,
2817                 is->xleft, is->ytop, is->width, is->height,
2818                 bgcolor);
2819     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2820 }
2821
2822 /* handle an event sent by the GUI */
2823 static void event_loop(VideoState *cur_stream)
2824 {
2825     SDL_Event event;
2826     double incr, pos, frac;
2827
2828     for (;;) {
2829         double x;
2830         SDL_WaitEvent(&event);
2831         switch (event.type) {
2832         case SDL_KEYDOWN:
2833             if (exit_on_keydown) {
2834                 do_exit(cur_stream);
2835                 break;
2836             }
2837             switch (event.key.keysym.sym) {
2838             case SDLK_ESCAPE:
2839             case SDLK_q:
2840                 do_exit(cur_stream);
2841                 break;
2842             case SDLK_f:
2843                 toggle_full_screen(cur_stream);
2844                 cur_stream->force_refresh = 1;
2845                 break;
2846             case SDLK_p:
2847             case SDLK_SPACE:
2848                 toggle_pause(cur_stream);
2849                 break;
2850             case SDLK_s: // S: Step to next frame
2851                 step_to_next_frame(cur_stream);
2852                 break;
2853             case SDLK_a:
2854                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2855                 break;
2856             case SDLK_v:
2857                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2858                 break;
2859             case SDLK_t:
2860                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2861                 break;
2862             case SDLK_w:
2863                 toggle_audio_display(cur_stream);
2864                 cur_stream->force_refresh = 1;
2865                 break;
2866             case SDLK_PAGEUP:
2867                 incr = 600.0;
2868                 goto do_seek;
2869             case SDLK_PAGEDOWN:
2870                 incr = -600.0;
2871                 goto do_seek;
2872             case SDLK_LEFT:
2873                 incr = -10.0;
2874                 goto do_seek;
2875             case SDLK_RIGHT:
2876                 incr = 10.0;
2877                 goto do_seek;
2878             case SDLK_UP:
2879                 incr = 60.0;
2880                 goto do_seek;
2881             case SDLK_DOWN:
2882                 incr = -60.0;
2883             do_seek:
2884                     if (seek_by_bytes) {
2885                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2886                             pos = cur_stream->video_current_pos;
2887                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2888                             pos = cur_stream->audio_pkt.pos;
2889                         } else
2890                             pos = avio_tell(cur_stream->ic->pb);
2891                         if (cur_stream->ic->bit_rate)
2892                             incr *= cur_stream->ic->bit_rate / 8.0;
2893                         else
2894                             incr *= 180000.0;
2895                         pos += incr;
2896                         stream_seek(cur_stream, pos, incr, 1);
2897                     } else {
2898                         pos = get_master_clock(cur_stream);
2899                         pos += incr;
2900                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2901                     }
2902                 break;
2903             default:
2904                 break;
2905             }
2906             break;
2907         case SDL_VIDEOEXPOSE:
2908             cur_stream->force_refresh = 1;
2909             break;
2910         case SDL_MOUSEBUTTONDOWN:
2911             if (exit_on_mousedown) {
2912                 do_exit(cur_stream);
2913                 break;
2914             }
2915         case SDL_MOUSEMOTION:
2916             if (event.type == SDL_MOUSEBUTTONDOWN) {
2917                 x = event.button.x;
2918             } else {
2919                 if (event.motion.state != SDL_PRESSED)
2920                     break;
2921                 x = event.motion.x;
2922             }
2923                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2924                     uint64_t size =  avio_size(cur_stream->ic->pb);
2925                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2926                 } else {
2927                     int64_t ts;
2928                     int ns, hh, mm, ss;
2929                     int tns, thh, tmm, tss;
2930                     tns  = cur_stream->ic->duration / 1000000LL;
2931                     thh  = tns / 3600;
2932                     tmm  = (tns % 3600) / 60;
2933                     tss  = (tns % 60);
2934                     frac = x / cur_stream->width;
2935                     ns   = frac * tns;
2936                     hh   = ns / 3600;
2937                     mm   = (ns % 3600) / 60;
2938                     ss   = (ns % 60);
2939                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2940                             hh, mm, ss, thh, tmm, tss);
2941                     ts = frac * cur_stream->ic->duration;
2942                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2943                         ts += cur_stream->ic->start_time;
2944                     stream_seek(cur_stream, ts, 0, 0);
2945                 }
2946             break;
2947         case SDL_VIDEORESIZE:
2948                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2949                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2950                 screen_width  = cur_stream->width  = event.resize.w;
2951                 screen_height = cur_stream->height = event.resize.h;
2952                 cur_stream->force_refresh = 1;
2953             break;
2954         case SDL_QUIT:
2955         case FF_QUIT_EVENT:
2956             do_exit(cur_stream);
2957             break;
2958         case FF_ALLOC_EVENT:
2959             video_open(event.user.data1, 0);
2960             alloc_picture(event.user.data1);
2961             break;
2962         case FF_REFRESH_EVENT:
2963             video_refresh(event.user.data1);
2964             cur_stream->refresh = 0;
2965             break;
2966         default:
2967             break;
2968         }
2969     }
2970 }
2971
2972 static int opt_frame_size(const char *opt, const char *arg)
2973 {
2974     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2975     return opt_default("video_size", arg);
2976 }
2977
2978 static int opt_width(const char *opt, const char *arg)
2979 {
2980     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2981     return 0;
2982 }
2983
2984 static int opt_height(const char *opt, const char *arg)
2985 {
2986     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2987     return 0;
2988 }
2989
2990 static int opt_format(const char *opt, const char *arg)
2991 {
2992     file_iformat = av_find_input_format(arg);
2993     if (!file_iformat) {
2994         fprintf(stderr, "Unknown input format: %s\n", arg);
2995         return AVERROR(EINVAL);
2996     }
2997     return 0;
2998 }
2999
3000 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3001 {
3002     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3003     return opt_default("pixel_format", arg);
3004 }
3005
3006 static int opt_sync(const char *opt, const char *arg)
3007 {
3008     if (!strcmp(arg, "audio"))
3009         av_sync_type = AV_SYNC_AUDIO_MASTER;
3010     else if (!strcmp(arg, "video"))
3011         av_sync_type = AV_SYNC_VIDEO_MASTER;
3012     else if (!strcmp(arg, "ext"))
3013         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3014     else {
3015         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3016         exit(1);
3017     }
3018     return 0;
3019 }
3020
3021 static int opt_seek(const char *opt, const char *arg)
3022 {
3023     start_time = parse_time_or_die(opt, arg, 1);
3024     return 0;
3025 }
3026
3027 static int opt_duration(const char *opt, const char *arg)
3028 {
3029     duration = parse_time_or_die(opt, arg, 1);
3030     return 0;
3031 }
3032
3033 static int opt_show_mode(const char *opt, const char *arg)
3034 {
3035     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3036                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3037                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3038                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3039     return 0;
3040 }
3041
3042 static void opt_input_file(void *optctx, const char *filename)
3043 {
3044     if (input_filename) {
3045         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3046                 filename, input_filename);
3047         exit_program(1);
3048     }
3049     if (!strcmp(filename, "-"))
3050         filename = "pipe:";
3051     input_filename = filename;
3052 }
3053
3054 static int opt_codec(void *o, const char *opt, const char *arg)
3055 {
3056     switch(opt[strlen(opt)-1]){
3057     case 'a' :    audio_codec_name = arg; break;
3058     case 's' : subtitle_codec_name = arg; break;
3059     case 'v' :    video_codec_name = arg; break;
3060     }
3061     return 0;
3062 }
3063
3064 static int dummy;
3065
3066 static const OptionDef options[] = {
3067 #include "cmdutils_common_opts.h"
3068     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3069     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3070     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3071     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3072     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3073     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3074     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3075     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3076     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3077     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3078     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3079     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3080     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3081     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3082     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3083     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3084     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3085     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3086     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3087     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3088     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3089     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3090     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3091     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3092     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3093     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3094     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3095     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3096     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3097     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3098     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3099     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3100     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3101 #if CONFIG_AVFILTER
3102     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3103 #endif
3104     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3105     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3106     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3107     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3108     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3109     { NULL, },
3110 };
3111
3112 static void show_usage(void)
3113 {
3114     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3115     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3116     av_log(NULL, AV_LOG_INFO, "\n");
3117 }
3118
3119 static int opt_help(const char *opt, const char *arg)
3120 {
3121     av_log_set_callback(log_callback_help);
3122     show_usage();
3123     show_help_options(options, "Main options:\n",
3124                       OPT_EXPERT, 0);
3125     show_help_options(options, "\nAdvanced options:\n",
3126                       OPT_EXPERT, OPT_EXPERT);
3127     printf("\n");
3128     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3129     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3130 #if !CONFIG_AVFILTER
3131     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3132 #endif
3133     printf("\nWhile playing:\n"
3134            "q, ESC              quit\n"
3135            "f                   toggle full screen\n"
3136            "p, SPC              pause\n"
3137            "a                   cycle audio channel\n"
3138            "v                   cycle video channel\n"
3139            "t                   cycle subtitle channel\n"
3140            "w                   show audio waves\n"
3141            "s                   activate frame-step mode\n"
3142            "left/right          seek backward/forward 10 seconds\n"
3143            "down/up             seek backward/forward 1 minute\n"
3144            "page down/page up   seek backward/forward 10 minutes\n"
3145            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3146            );
3147     return 0;
3148 }
3149
3150 static int lockmgr(void **mtx, enum AVLockOp op)
3151 {
3152    switch(op) {
3153       case AV_LOCK_CREATE:
3154           *mtx = SDL_CreateMutex();
3155           if(!*mtx)
3156               return 1;
3157           return 0;
3158       case AV_LOCK_OBTAIN:
3159           return !!SDL_LockMutex(*mtx);
3160       case AV_LOCK_RELEASE:
3161           return !!SDL_UnlockMutex(*mtx);
3162       case AV_LOCK_DESTROY:
3163           SDL_DestroyMutex(*mtx);
3164           return 0;
3165    }
3166    return 1;
3167 }
3168
3169 /* Called from the main */
3170 int main(int argc, char **argv)
3171 {
3172     int flags;
3173     VideoState *is;
3174
3175     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3176     parse_loglevel(argc, argv, options);
3177
3178     /* register all codecs, demux and protocols */
3179     avcodec_register_all();
3180 #if CONFIG_AVDEVICE
3181     avdevice_register_all();
3182 #endif
3183 #if CONFIG_AVFILTER
3184     avfilter_register_all();
3185 #endif
3186     av_register_all();
3187     avformat_network_init();
3188
3189     init_opts();
3190
3191     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3192     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3193
3194     show_banner(argc, argv, options);
3195
3196     parse_options(NULL, argc, argv, options, opt_input_file);
3197
3198     if (!input_filename) {
3199         show_usage();
3200         fprintf(stderr, "An input file must be specified\n");
3201         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3202         exit(1);
3203     }
3204
3205     if (display_disable) {
3206         video_disable = 1;
3207     }
3208     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3209     if (audio_disable)
3210         flags &= ~SDL_INIT_AUDIO;
3211 #if !defined(__MINGW32__) && !defined(__APPLE__)
3212     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3213 #endif
3214     if (SDL_Init (flags)) {
3215         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3216         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3217         exit(1);
3218     }
3219
3220     if (!display_disable) {
3221 #if HAVE_SDL_VIDEO_SIZE
3222         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3223         fs_screen_width = vi->current_w;
3224         fs_screen_height = vi->current_h;
3225 #endif
3226     }
3227
3228     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3229     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3230     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3231
3232     if (av_lockmgr_register(lockmgr)) {
3233         fprintf(stderr, "Could not initialize lock manager!\n");
3234         do_exit(NULL);
3235     }
3236
3237     av_init_packet(&flush_pkt);
3238     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3239
3240     is = stream_open(input_filename, file_iformat);
3241     if (!is) {
3242         fprintf(stderr, "Failed to initialize VideoState!\n");
3243         do_exit(NULL);
3244     }
3245
3246     event_loop(is);
3247
3248     /* never returns */
3249
3250     return 0;
3251 }