]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge commit '8ac0f6767bf63d3e6b308ee6648ff02598b81e03'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct MyAVPacketList {
91     AVPacket pkt;
92     struct MyAVPacketList *next;
93     int serial;
94 } MyAVPacketList;
95
96 typedef struct PacketQueue {
97     MyAVPacketList *first_pkt, *last_pkt;
98     int nb_packets;
99     int size;
100     int abort_request;
101     int serial;
102     SDL_mutex *mutex;
103     SDL_cond *cond;
104 } PacketQueue;
105
106 #define VIDEO_PICTURE_QUEUE_SIZE 4
107 #define SUBPICTURE_QUEUE_SIZE 4
108
109 typedef struct VideoPicture {
110     double pts;             // presentation timestamp for this picture
111     int64_t pos;            // byte position in file
112     int skip;
113     SDL_Overlay *bmp;
114     int width, height; /* source height & width */
115     AVRational sample_aspect_ratio;
116     int allocated;
117     int reallocate;
118     int serial;
119
120 #if CONFIG_AVFILTER
121     AVFilterBufferRef *picref;
122 #endif
123 } VideoPicture;
124
125 typedef struct SubPicture {
126     double pts; /* presentation time stamp for this picture */
127     AVSubtitle sub;
128 } SubPicture;
129
130 typedef struct AudioParams {
131     int freq;
132     int channels;
133     int channel_layout;
134     enum AVSampleFormat fmt;
135 } AudioParams;
136
137 enum {
138     AV_SYNC_AUDIO_MASTER, /* default choice */
139     AV_SYNC_VIDEO_MASTER,
140     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
141 };
142
143 typedef struct VideoState {
144     SDL_Thread *read_tid;
145     SDL_Thread *video_tid;
146     SDL_Thread *refresh_tid;
147     AVInputFormat *iformat;
148     int no_background;
149     int abort_request;
150     int force_refresh;
151     int paused;
152     int last_paused;
153     int que_attachments_req;
154     int seek_req;
155     int seek_flags;
156     int64_t seek_pos;
157     int64_t seek_rel;
158     int read_pause_return;
159     AVFormatContext *ic;
160
161     int audio_stream;
162
163     int av_sync_type;
164     double external_clock;                   ///< external clock base
165     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
166     int64_t external_clock_time;             ///< last reference time
167
168     double audio_clock;
169     double audio_diff_cum; /* used for AV difference average computation */
170     double audio_diff_avg_coef;
171     double audio_diff_threshold;
172     int audio_diff_avg_count;
173     AVStream *audio_st;
174     PacketQueue audioq;
175     int audio_hw_buf_size;
176     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
177     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
178     uint8_t *audio_buf;
179     uint8_t *audio_buf1;
180     unsigned int audio_buf_size; /* in bytes */
181     int audio_buf_index; /* in bytes */
182     int audio_write_buf_size;
183     AVPacket audio_pkt_temp;
184     AVPacket audio_pkt;
185     int audio_pkt_temp_serial;
186     struct AudioParams audio_src;
187     struct AudioParams audio_tgt;
188     struct SwrContext *swr_ctx;
189     double audio_current_pts;
190     double audio_current_pts_drift;
191     int frame_drops_early;
192     int frame_drops_late;
193     AVFrame *frame;
194
195     enum ShowMode {
196         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
197     } show_mode;
198     int16_t sample_array[SAMPLE_ARRAY_SIZE];
199     int sample_array_index;
200     int last_i_start;
201     RDFTContext *rdft;
202     int rdft_bits;
203     FFTSample *rdft_data;
204     int xpos;
205
206     SDL_Thread *subtitle_tid;
207     int subtitle_stream;
208     int subtitle_stream_changed;
209     AVStream *subtitle_st;
210     PacketQueue subtitleq;
211     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
212     int subpq_size, subpq_rindex, subpq_windex;
213     SDL_mutex *subpq_mutex;
214     SDL_cond *subpq_cond;
215
216     double frame_timer;
217     double frame_last_pts;
218     double frame_last_duration;
219     double frame_last_dropped_pts;
220     double frame_last_returned_time;
221     double frame_last_filter_delay;
222     int64_t frame_last_dropped_pos;
223     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
224     int video_stream;
225     AVStream *video_st;
226     PacketQueue videoq;
227     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
228     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
229     int64_t video_current_pos;      // current displayed file pos
230     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
231     int pictq_size, pictq_rindex, pictq_windex;
232     SDL_mutex *pictq_mutex;
233     SDL_cond *pictq_cond;
234 #if !CONFIG_AVFILTER
235     struct SwsContext *img_convert_ctx;
236 #endif
237     SDL_Rect last_display_rect;
238
239     char filename[1024];
240     int width, height, xleft, ytop;
241     int step;
242
243 #if CONFIG_AVFILTER
244     AVFilterContext *in_video_filter;   // the first filter in the video chain
245     AVFilterContext *out_video_filter;  // the last filter in the video chain
246     int use_dr1;
247     FrameBuffer *buffer_pool;
248 #endif
249
250     int refresh;
251     int last_video_stream, last_audio_stream, last_subtitle_stream;
252
253     SDL_cond *continue_read_thread;
254 } VideoState;
255
256 /* options specified by the user */
257 static AVInputFormat *file_iformat;
258 static const char *input_filename;
259 static const char *window_title;
260 static int fs_screen_width;
261 static int fs_screen_height;
262 static int screen_width  = 0;
263 static int screen_height = 0;
264 static int audio_disable;
265 static int video_disable;
266 static int wanted_stream[AVMEDIA_TYPE_NB] = {
267     [AVMEDIA_TYPE_AUDIO]    = -1,
268     [AVMEDIA_TYPE_VIDEO]    = -1,
269     [AVMEDIA_TYPE_SUBTITLE] = -1,
270 };
271 static int seek_by_bytes = -1;
272 static int display_disable;
273 static int show_status = 1;
274 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
275 static int64_t start_time = AV_NOPTS_VALUE;
276 static int64_t duration = AV_NOPTS_VALUE;
277 static int workaround_bugs = 1;
278 static int fast = 0;
279 static int genpts = 0;
280 static int lowres = 0;
281 static int idct = FF_IDCT_AUTO;
282 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
283 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
284 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
285 static int error_concealment = 3;
286 static int decoder_reorder_pts = -1;
287 static int autoexit;
288 static int exit_on_keydown;
289 static int exit_on_mousedown;
290 static int loop = 1;
291 static int framedrop = -1;
292 static int infinite_buffer = -1;
293 static enum ShowMode show_mode = SHOW_MODE_NONE;
294 static const char *audio_codec_name;
295 static const char *subtitle_codec_name;
296 static const char *video_codec_name;
297 static int rdftspeed = 20;
298 #if CONFIG_AVFILTER
299 static char *vfilters = NULL;
300 #endif
301
302 /* current context */
303 static int is_full_screen;
304 static int64_t audio_callback_time;
305
306 static AVPacket flush_pkt;
307
308 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
309 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
310 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
311
312 static SDL_Surface *screen;
313
314 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
315
316 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
317 {
318     MyAVPacketList *pkt1;
319
320     if (q->abort_request)
321        return -1;
322
323     pkt1 = av_malloc(sizeof(MyAVPacketList));
324     if (!pkt1)
325         return -1;
326     pkt1->pkt = *pkt;
327     pkt1->next = NULL;
328     if (pkt == &flush_pkt)
329         q->serial++;
330     pkt1->serial = q->serial;
331
332     if (!q->last_pkt)
333         q->first_pkt = pkt1;
334     else
335         q->last_pkt->next = pkt1;
336     q->last_pkt = pkt1;
337     q->nb_packets++;
338     q->size += pkt1->pkt.size + sizeof(*pkt1);
339     /* XXX: should duplicate packet data in DV case */
340     SDL_CondSignal(q->cond);
341     return 0;
342 }
343
344 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
345 {
346     int ret;
347
348     /* duplicate the packet */
349     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
350         return -1;
351
352     SDL_LockMutex(q->mutex);
353     ret = packet_queue_put_private(q, pkt);
354     SDL_UnlockMutex(q->mutex);
355
356     if (pkt != &flush_pkt && ret < 0)
357         av_free_packet(pkt);
358
359     return ret;
360 }
361
362 /* packet queue handling */
363 static void packet_queue_init(PacketQueue *q)
364 {
365     memset(q, 0, sizeof(PacketQueue));
366     q->mutex = SDL_CreateMutex();
367     q->cond = SDL_CreateCond();
368     q->abort_request = 1;
369 }
370
371 static void packet_queue_flush(PacketQueue *q)
372 {
373     MyAVPacketList *pkt, *pkt1;
374
375     SDL_LockMutex(q->mutex);
376     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
377         pkt1 = pkt->next;
378         av_free_packet(&pkt->pkt);
379         av_freep(&pkt);
380     }
381     q->last_pkt = NULL;
382     q->first_pkt = NULL;
383     q->nb_packets = 0;
384     q->size = 0;
385     SDL_UnlockMutex(q->mutex);
386 }
387
388 static void packet_queue_destroy(PacketQueue *q)
389 {
390     packet_queue_flush(q);
391     SDL_DestroyMutex(q->mutex);
392     SDL_DestroyCond(q->cond);
393 }
394
395 static void packet_queue_abort(PacketQueue *q)
396 {
397     SDL_LockMutex(q->mutex);
398
399     q->abort_request = 1;
400
401     SDL_CondSignal(q->cond);
402
403     SDL_UnlockMutex(q->mutex);
404 }
405
406 static void packet_queue_start(PacketQueue *q)
407 {
408     SDL_LockMutex(q->mutex);
409     q->abort_request = 0;
410     packet_queue_put_private(q, &flush_pkt);
411     SDL_UnlockMutex(q->mutex);
412 }
413
414 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
415 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
416 {
417     MyAVPacketList *pkt1;
418     int ret;
419
420     SDL_LockMutex(q->mutex);
421
422     for (;;) {
423         if (q->abort_request) {
424             ret = -1;
425             break;
426         }
427
428         pkt1 = q->first_pkt;
429         if (pkt1) {
430             q->first_pkt = pkt1->next;
431             if (!q->first_pkt)
432                 q->last_pkt = NULL;
433             q->nb_packets--;
434             q->size -= pkt1->pkt.size + sizeof(*pkt1);
435             *pkt = pkt1->pkt;
436             if (serial)
437                 *serial = pkt1->serial;
438             av_free(pkt1);
439             ret = 1;
440             break;
441         } else if (!block) {
442             ret = 0;
443             break;
444         } else {
445             SDL_CondWait(q->cond, q->mutex);
446         }
447     }
448     SDL_UnlockMutex(q->mutex);
449     return ret;
450 }
451
452 static inline void fill_rectangle(SDL_Surface *screen,
453                                   int x, int y, int w, int h, int color, int update)
454 {
455     SDL_Rect rect;
456     rect.x = x;
457     rect.y = y;
458     rect.w = w;
459     rect.h = h;
460     SDL_FillRect(screen, &rect, color);
461     if (update && w > 0 && h > 0)
462         SDL_UpdateRect(screen, x, y, w, h);
463 }
464
465 /* draw only the border of a rectangle */
466 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
467 {
468     int w1, w2, h1, h2;
469
470     /* fill the background */
471     w1 = x;
472     if (w1 < 0)
473         w1 = 0;
474     w2 = width - (x + w);
475     if (w2 < 0)
476         w2 = 0;
477     h1 = y;
478     if (h1 < 0)
479         h1 = 0;
480     h2 = height - (y + h);
481     if (h2 < 0)
482         h2 = 0;
483     fill_rectangle(screen,
484                    xleft, ytop,
485                    w1, height,
486                    color, update);
487     fill_rectangle(screen,
488                    xleft + width - w2, ytop,
489                    w2, height,
490                    color, update);
491     fill_rectangle(screen,
492                    xleft + w1, ytop,
493                    width - w1 - w2, h1,
494                    color, update);
495     fill_rectangle(screen,
496                    xleft + w1, ytop + height - h2,
497                    width - w1 - w2, h2,
498                    color, update);
499 }
500
501 #define ALPHA_BLEND(a, oldp, newp, s)\
502 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
503
504 #define RGBA_IN(r, g, b, a, s)\
505 {\
506     unsigned int v = ((const uint32_t *)(s))[0];\
507     a = (v >> 24) & 0xff;\
508     r = (v >> 16) & 0xff;\
509     g = (v >> 8) & 0xff;\
510     b = v & 0xff;\
511 }
512
513 #define YUVA_IN(y, u, v, a, s, pal)\
514 {\
515     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
516     a = (val >> 24) & 0xff;\
517     y = (val >> 16) & 0xff;\
518     u = (val >> 8) & 0xff;\
519     v = val & 0xff;\
520 }
521
522 #define YUVA_OUT(d, y, u, v, a)\
523 {\
524     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
525 }
526
527
528 #define BPP 1
529
530 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
531 {
532     int wrap, wrap3, width2, skip2;
533     int y, u, v, a, u1, v1, a1, w, h;
534     uint8_t *lum, *cb, *cr;
535     const uint8_t *p;
536     const uint32_t *pal;
537     int dstx, dsty, dstw, dsth;
538
539     dstw = av_clip(rect->w, 0, imgw);
540     dsth = av_clip(rect->h, 0, imgh);
541     dstx = av_clip(rect->x, 0, imgw - dstw);
542     dsty = av_clip(rect->y, 0, imgh - dsth);
543     lum = dst->data[0] + dsty * dst->linesize[0];
544     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
545     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
546
547     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
548     skip2 = dstx >> 1;
549     wrap = dst->linesize[0];
550     wrap3 = rect->pict.linesize[0];
551     p = rect->pict.data[0];
552     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
553
554     if (dsty & 1) {
555         lum += dstx;
556         cb += skip2;
557         cr += skip2;
558
559         if (dstx & 1) {
560             YUVA_IN(y, u, v, a, p, pal);
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
563             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
564             cb++;
565             cr++;
566             lum++;
567             p += BPP;
568         }
569         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575
576             YUVA_IN(y, u, v, a, p + BPP, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += 2 * BPP;
586             lum += 2;
587         }
588         if (w) {
589             YUVA_IN(y, u, v, a, p, pal);
590             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
592             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
593             p++;
594             lum++;
595         }
596         p += wrap3 - dstw * BPP;
597         lum += wrap - dstw - dstx;
598         cb += dst->linesize[1] - width2 - skip2;
599         cr += dst->linesize[2] - width2 - skip2;
600     }
601     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
602         lum += dstx;
603         cb += skip2;
604         cr += skip2;
605
606         if (dstx & 1) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             p += wrap3;
613             lum += wrap;
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621             cb++;
622             cr++;
623             p += -wrap3 + BPP;
624             lum += -wrap + 1;
625         }
626         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
627             YUVA_IN(y, u, v, a, p, pal);
628             u1 = u;
629             v1 = v;
630             a1 = a;
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632
633             YUVA_IN(y, u, v, a, p + BPP, pal);
634             u1 += u;
635             v1 += v;
636             a1 += a;
637             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
638             p += wrap3;
639             lum += wrap;
640
641             YUVA_IN(y, u, v, a, p, pal);
642             u1 += u;
643             v1 += v;
644             a1 += a;
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646
647             YUVA_IN(y, u, v, a, p + BPP, pal);
648             u1 += u;
649             v1 += v;
650             a1 += a;
651             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
652
653             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
654             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
655
656             cb++;
657             cr++;
658             p += -wrap3 + 2 * BPP;
659             lum += -wrap + 2;
660         }
661         if (w) {
662             YUVA_IN(y, u, v, a, p, pal);
663             u1 = u;
664             v1 = v;
665             a1 = a;
666             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
667             p += wrap3;
668             lum += wrap;
669             YUVA_IN(y, u, v, a, p, pal);
670             u1 += u;
671             v1 += v;
672             a1 += a;
673             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
675             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
676             cb++;
677             cr++;
678             p += -wrap3 + BPP;
679             lum += -wrap + 1;
680         }
681         p += wrap3 + (wrap3 - dstw * BPP);
682         lum += wrap + (wrap - dstw - dstx);
683         cb += dst->linesize[1] - width2 - skip2;
684         cr += dst->linesize[2] - width2 - skip2;
685     }
686     /* handle odd height */
687     if (h) {
688         lum += dstx;
689         cb += skip2;
690         cr += skip2;
691
692         if (dstx & 1) {
693             YUVA_IN(y, u, v, a, p, pal);
694             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
695             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
696             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
697             cb++;
698             cr++;
699             lum++;
700             p += BPP;
701         }
702         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
703             YUVA_IN(y, u, v, a, p, pal);
704             u1 = u;
705             v1 = v;
706             a1 = a;
707             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
708
709             YUVA_IN(y, u, v, a, p + BPP, pal);
710             u1 += u;
711             v1 += v;
712             a1 += a;
713             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
714             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
715             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
716             cb++;
717             cr++;
718             p += 2 * BPP;
719             lum += 2;
720         }
721         if (w) {
722             YUVA_IN(y, u, v, a, p, pal);
723             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
724             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
725             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
726         }
727     }
728 }
729
730 static void free_subpicture(SubPicture *sp)
731 {
732     avsubtitle_free(&sp->sub);
733 }
734
735 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
736 {
737     float aspect_ratio;
738     int width, height, x, y;
739
740     if (vp->sample_aspect_ratio.num == 0)
741         aspect_ratio = 0;
742     else
743         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
744
745     if (aspect_ratio <= 0.0)
746         aspect_ratio = 1.0;
747     aspect_ratio *= (float)vp->width / (float)vp->height;
748
749     /* XXX: we suppose the screen has a 1.0 pixel ratio */
750     height = scr_height;
751     width = ((int)rint(height * aspect_ratio)) & ~1;
752     if (width > scr_width) {
753         width = scr_width;
754         height = ((int)rint(width / aspect_ratio)) & ~1;
755     }
756     x = (scr_width - width) / 2;
757     y = (scr_height - height) / 2;
758     rect->x = scr_xleft + x;
759     rect->y = scr_ytop  + y;
760     rect->w = FFMAX(width,  1);
761     rect->h = FFMAX(height, 1);
762 }
763
764 static void video_image_display(VideoState *is)
765 {
766     VideoPicture *vp;
767     SubPicture *sp;
768     AVPicture pict;
769     SDL_Rect rect;
770     int i;
771
772     vp = &is->pictq[is->pictq_rindex];
773     if (vp->bmp) {
774         if (is->subtitle_st) {
775             if (is->subpq_size > 0) {
776                 sp = &is->subpq[is->subpq_rindex];
777
778                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
779                     SDL_LockYUVOverlay (vp->bmp);
780
781                     pict.data[0] = vp->bmp->pixels[0];
782                     pict.data[1] = vp->bmp->pixels[2];
783                     pict.data[2] = vp->bmp->pixels[1];
784
785                     pict.linesize[0] = vp->bmp->pitches[0];
786                     pict.linesize[1] = vp->bmp->pitches[2];
787                     pict.linesize[2] = vp->bmp->pitches[1];
788
789                     for (i = 0; i < sp->sub.num_rects; i++)
790                         blend_subrect(&pict, sp->sub.rects[i],
791                                       vp->bmp->w, vp->bmp->h);
792
793                     SDL_UnlockYUVOverlay (vp->bmp);
794                 }
795             }
796         }
797
798         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
799
800         SDL_DisplayYUVOverlay(vp->bmp, &rect);
801
802         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
803             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
804             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
805             is->last_display_rect = rect;
806         }
807     }
808 }
809
810 static inline int compute_mod(int a, int b)
811 {
812     return a < 0 ? a%b + b : a%b;
813 }
814
815 static void video_audio_display(VideoState *s)
816 {
817     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818     int ch, channels, h, h2, bgcolor, fgcolor;
819     int16_t time_diff;
820     int rdft_bits, nb_freq;
821
822     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
823         ;
824     nb_freq = 1 << (rdft_bits - 1);
825
826     /* compute display index : center on currently output samples */
827     channels = s->audio_tgt.channels;
828     nb_display_channels = channels;
829     if (!s->paused) {
830         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
831         n = 2 * channels;
832         delay = s->audio_write_buf_size;
833         delay /= n;
834
835         /* to be more precise, we take into account the time spent since
836            the last buffer computation */
837         if (audio_callback_time) {
838             time_diff = av_gettime() - audio_callback_time;
839             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
840         }
841
842         delay += 2 * data_used;
843         if (delay < data_used)
844             delay = data_used;
845
846         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847         if (s->show_mode == SHOW_MODE_WAVES) {
848             h = INT_MIN;
849             for (i = 0; i < 1000; i += channels) {
850                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851                 int a = s->sample_array[idx];
852                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
853                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
854                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
855                 int score = a - d;
856                 if (h < score && (b ^ c) < 0) {
857                     h = score;
858                     i_start = idx;
859                 }
860             }
861         }
862
863         s->last_i_start = i_start;
864     } else {
865         i_start = s->last_i_start;
866     }
867
868     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869     if (s->show_mode == SHOW_MODE_WAVES) {
870         fill_rectangle(screen,
871                        s->xleft, s->ytop, s->width, s->height,
872                        bgcolor, 0);
873
874         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875
876         /* total height for one channel */
877         h = s->height / nb_display_channels;
878         /* graph height / 2 */
879         h2 = (h * 9) / 20;
880         for (ch = 0; ch < nb_display_channels; ch++) {
881             i = i_start + ch;
882             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883             for (x = 0; x < s->width; x++) {
884                 y = (s->sample_array[i] * h2) >> 15;
885                 if (y < 0) {
886                     y = -y;
887                     ys = y1 - y;
888                 } else {
889                     ys = y1;
890                 }
891                 fill_rectangle(screen,
892                                s->xleft + x, ys, 1, y,
893                                fgcolor, 0);
894                 i += channels;
895                 if (i >= SAMPLE_ARRAY_SIZE)
896                     i -= SAMPLE_ARRAY_SIZE;
897             }
898         }
899
900         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901
902         for (ch = 1; ch < nb_display_channels; ch++) {
903             y = s->ytop + ch * h;
904             fill_rectangle(screen,
905                            s->xleft, y, s->width, 1,
906                            fgcolor, 0);
907         }
908         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909     } else {
910         nb_display_channels= FFMIN(nb_display_channels, 2);
911         if (rdft_bits != s->rdft_bits) {
912             av_rdft_end(s->rdft);
913             av_free(s->rdft_data);
914             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915             s->rdft_bits = rdft_bits;
916             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
917         }
918         {
919             FFTSample *data[2];
920             for (ch = 0; ch < nb_display_channels; ch++) {
921                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
922                 i = i_start + ch;
923                 for (x = 0; x < 2 * nb_freq; x++) {
924                     double w = (x-nb_freq) * (1.0 / nb_freq);
925                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
926                     i += channels;
927                     if (i >= SAMPLE_ARRAY_SIZE)
928                         i -= SAMPLE_ARRAY_SIZE;
929                 }
930                 av_rdft_calc(s->rdft, data[ch]);
931             }
932             // least efficient way to do this, we should of course directly access it but its more than fast enough
933             for (y = 0; y < s->height; y++) {
934                 double w = 1 / sqrt(nb_freq);
935                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
936                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
937                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
938                 a = FFMIN(a, 255);
939                 b = FFMIN(b, 255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor, 0);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         if (!s->paused)
949             s->xpos++;
950         if (s->xpos >= s->width)
951             s->xpos= s->xleft;
952     }
953 }
954
955 static void stream_close(VideoState *is)
956 {
957     VideoPicture *vp;
958     int i;
959     /* XXX: use a special url_shutdown call to abort parse cleanly */
960     is->abort_request = 1;
961     SDL_WaitThread(is->read_tid, NULL);
962     SDL_WaitThread(is->refresh_tid, NULL);
963     packet_queue_destroy(&is->videoq);
964     packet_queue_destroy(&is->audioq);
965     packet_queue_destroy(&is->subtitleq);
966
967     /* free all pictures */
968     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
969         vp = &is->pictq[i];
970 #if CONFIG_AVFILTER
971         avfilter_unref_bufferp(&vp->picref);
972 #endif
973         if (vp->bmp) {
974             SDL_FreeYUVOverlay(vp->bmp);
975             vp->bmp = NULL;
976         }
977     }
978     SDL_DestroyMutex(is->pictq_mutex);
979     SDL_DestroyCond(is->pictq_cond);
980     SDL_DestroyMutex(is->subpq_mutex);
981     SDL_DestroyCond(is->subpq_cond);
982     SDL_DestroyCond(is->continue_read_thread);
983 #if !CONFIG_AVFILTER
984     if (is->img_convert_ctx)
985         sws_freeContext(is->img_convert_ctx);
986 #endif
987     av_free(is);
988 }
989
990 static void do_exit(VideoState *is)
991 {
992     if (is) {
993         stream_close(is);
994     }
995     av_lockmgr_register(NULL);
996     uninit_opts();
997 #if CONFIG_AVFILTER
998     avfilter_uninit();
999     av_freep(&vfilters);
1000 #endif
1001     avformat_network_deinit();
1002     if (show_status)
1003         printf("\n");
1004     SDL_Quit();
1005     av_log(NULL, AV_LOG_QUIET, "%s", "");
1006     exit(0);
1007 }
1008
1009 static void sigterm_handler(int sig)
1010 {
1011     exit(123);
1012 }
1013
1014 static int video_open(VideoState *is, int force_set_video_mode)
1015 {
1016     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1017     int w,h;
1018     VideoPicture *vp = &is->pictq[is->pictq_rindex];
1019     SDL_Rect rect;
1020
1021     if (is_full_screen) flags |= SDL_FULLSCREEN;
1022     else                flags |= SDL_RESIZABLE;
1023
1024     if (is_full_screen && fs_screen_width) {
1025         w = fs_screen_width;
1026         h = fs_screen_height;
1027     } else if (!is_full_screen && screen_width) {
1028         w = screen_width;
1029         h = screen_height;
1030     } else if (vp->width) {
1031         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1032         w = rect.w;
1033         h = rect.h;
1034     } else {
1035         w = 640;
1036         h = 480;
1037     }
1038     if (screen && is->width == screen->w && screen->w == w
1039        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1040         return 0;
1041     screen = SDL_SetVideoMode(w, h, 0, flags);
1042     if (!screen) {
1043         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1044         do_exit(is);
1045     }
1046     if (!window_title)
1047         window_title = input_filename;
1048     SDL_WM_SetCaption(window_title, window_title);
1049
1050     is->width  = screen->w;
1051     is->height = screen->h;
1052
1053     return 0;
1054 }
1055
1056 /* display the current picture, if any */
1057 static void video_display(VideoState *is)
1058 {
1059     if (!screen)
1060         video_open(is, 0);
1061     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1062         video_audio_display(is);
1063     else if (is->video_st)
1064         video_image_display(is);
1065 }
1066
1067 static int refresh_thread(void *opaque)
1068 {
1069     VideoState *is= opaque;
1070     while (!is->abort_request) {
1071         SDL_Event event;
1072         event.type = FF_REFRESH_EVENT;
1073         event.user.data1 = opaque;
1074         if (!is->refresh && (!is->paused || is->force_refresh)) {
1075             is->refresh = 1;
1076             SDL_PushEvent(&event);
1077         }
1078         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1079         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1080     }
1081     return 0;
1082 }
1083
1084 /* get the current audio clock value */
1085 static double get_audio_clock(VideoState *is)
1086 {
1087     if (is->paused) {
1088         return is->audio_current_pts;
1089     } else {
1090         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1091     }
1092 }
1093
1094 /* get the current video clock value */
1095 static double get_video_clock(VideoState *is)
1096 {
1097     if (is->paused) {
1098         return is->video_current_pts;
1099     } else {
1100         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1101     }
1102 }
1103
1104 /* get the current external clock value */
1105 static double get_external_clock(VideoState *is)
1106 {
1107     if (is->paused) {
1108         return is->external_clock;
1109     } else {
1110         return is->external_clock_drift + av_gettime() / 1000000.0;
1111     }
1112 }
1113
1114 static int get_master_sync_type(VideoState *is) {
1115     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1116         if (is->video_st)
1117             return AV_SYNC_VIDEO_MASTER;
1118         else
1119             return AV_SYNC_AUDIO_MASTER;
1120     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1121         if (is->audio_st)
1122             return AV_SYNC_AUDIO_MASTER;
1123         else
1124             return AV_SYNC_EXTERNAL_CLOCK;
1125     } else {
1126         return AV_SYNC_EXTERNAL_CLOCK;
1127     }
1128 }
1129
1130 /* get the current master clock value */
1131 static double get_master_clock(VideoState *is)
1132 {
1133     double val;
1134
1135     switch (get_master_sync_type(is)) {
1136         case AV_SYNC_VIDEO_MASTER:
1137             val = get_video_clock(is);
1138             break;
1139         case AV_SYNC_AUDIO_MASTER:
1140             val = get_audio_clock(is);
1141             break;
1142         default:
1143             val = get_external_clock(is);
1144             break;
1145     }
1146     return val;
1147 }
1148
1149 static void update_external_clock_pts(VideoState *is, double pts)
1150 {
1151    is->external_clock_time = av_gettime();
1152    is->external_clock = pts;
1153    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1154 }
1155
1156 static void check_external_clock_sync(VideoState *is, double pts) {
1157     if (fabs(get_external_clock(is) - pts) > AV_NOSYNC_THRESHOLD) {
1158         update_external_clock_pts(is, pts);
1159     }
1160 }
1161
1162 /* seek in the stream */
1163 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1164 {
1165     if (!is->seek_req) {
1166         is->seek_pos = pos;
1167         is->seek_rel = rel;
1168         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1169         if (seek_by_bytes)
1170             is->seek_flags |= AVSEEK_FLAG_BYTE;
1171         is->seek_req = 1;
1172     }
1173 }
1174
1175 /* pause or resume the video */
1176 static void stream_toggle_pause(VideoState *is)
1177 {
1178     if (is->paused) {
1179         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1180         if (is->read_pause_return != AVERROR(ENOSYS)) {
1181             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1182         }
1183         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1184     }
1185     update_external_clock_pts(is, get_external_clock(is));
1186     is->paused = !is->paused;
1187 }
1188
1189 static double compute_target_delay(double delay, VideoState *is)
1190 {
1191     double sync_threshold, diff;
1192
1193     /* update delay to follow master synchronisation source */
1194     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1195         /* if video is slave, we try to correct big delays by
1196            duplicating or deleting a frame */
1197         diff = get_video_clock(is) - get_master_clock(is);
1198
1199         /* skip or repeat frame. We take into account the
1200            delay to compute the threshold. I still don't know
1201            if it is the best guess */
1202         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1203         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1204             if (diff <= -sync_threshold)
1205                 delay = 0;
1206             else if (diff >= sync_threshold)
1207                 delay = 2 * delay;
1208         }
1209     }
1210
1211     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1212             delay, -diff);
1213
1214     return delay;
1215 }
1216
1217 static void pictq_next_picture(VideoState *is) {
1218     /* update queue size and signal for next picture */
1219     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1220         is->pictq_rindex = 0;
1221
1222     SDL_LockMutex(is->pictq_mutex);
1223     is->pictq_size--;
1224     SDL_CondSignal(is->pictq_cond);
1225     SDL_UnlockMutex(is->pictq_mutex);
1226 }
1227
1228 static void pictq_prev_picture(VideoState *is) {
1229     VideoPicture *prevvp;
1230     /* update queue size and signal for the previous picture */
1231     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1232     if (prevvp->allocated && !prevvp->skip) {
1233         SDL_LockMutex(is->pictq_mutex);
1234         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1235             if (--is->pictq_rindex == -1)
1236                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1237             is->pictq_size++;
1238         }
1239         SDL_CondSignal(is->pictq_cond);
1240         SDL_UnlockMutex(is->pictq_mutex);
1241     }
1242 }
1243
1244 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1245     double time = av_gettime() / 1000000.0;
1246     /* update current video pts */
1247     is->video_current_pts = pts;
1248     is->video_current_pts_drift = is->video_current_pts - time;
1249     is->video_current_pos = pos;
1250     is->frame_last_pts = pts;
1251     if (is->videoq.serial == serial)
1252         check_external_clock_sync(is, is->video_current_pts);
1253 }
1254
1255 /* called to display each frame */
1256 static void video_refresh(void *opaque)
1257 {
1258     VideoState *is = opaque;
1259     VideoPicture *vp;
1260     double time;
1261
1262     SubPicture *sp, *sp2;
1263
1264     if (is->video_st) {
1265         if (is->force_refresh)
1266             pictq_prev_picture(is);
1267 retry:
1268         if (is->pictq_size == 0) {
1269             SDL_LockMutex(is->pictq_mutex);
1270             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1271                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, 0);
1272                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1273             }
1274             SDL_UnlockMutex(is->pictq_mutex);
1275             // nothing to do, no picture to display in the que
1276         } else {
1277             double last_duration, duration, delay;
1278             /* dequeue the picture */
1279             vp = &is->pictq[is->pictq_rindex];
1280
1281             if (vp->skip) {
1282                 pictq_next_picture(is);
1283                 goto retry;
1284             }
1285
1286             if (is->paused)
1287                 goto display;
1288
1289             /* compute nominal last_duration */
1290             last_duration = vp->pts - is->frame_last_pts;
1291             if (last_duration > 0 && last_duration < 10.0) {
1292                 /* if duration of the last frame was sane, update last_duration in video state */
1293                 is->frame_last_duration = last_duration;
1294             }
1295             delay = compute_target_delay(is->frame_last_duration, is);
1296
1297             time= av_gettime()/1000000.0;
1298             if (time < is->frame_timer + delay)
1299                 return;
1300
1301             if (delay > 0)
1302                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1303
1304             SDL_LockMutex(is->pictq_mutex);
1305             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1306             SDL_UnlockMutex(is->pictq_mutex);
1307
1308             if (is->pictq_size > 1) {
1309                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1310                 duration = nextvp->pts - vp->pts;
1311                 if((framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1312                     is->frame_drops_late++;
1313                     pictq_next_picture(is);
1314                     goto retry;
1315                 }
1316             }
1317
1318             if (is->subtitle_st) {
1319                 if (is->subtitle_stream_changed) {
1320                     SDL_LockMutex(is->subpq_mutex);
1321
1322                     while (is->subpq_size) {
1323                         free_subpicture(&is->subpq[is->subpq_rindex]);
1324
1325                         /* update queue size and signal for next picture */
1326                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1327                             is->subpq_rindex = 0;
1328
1329                         is->subpq_size--;
1330                     }
1331                     is->subtitle_stream_changed = 0;
1332
1333                     SDL_CondSignal(is->subpq_cond);
1334                     SDL_UnlockMutex(is->subpq_mutex);
1335                 } else {
1336                     if (is->subpq_size > 0) {
1337                         sp = &is->subpq[is->subpq_rindex];
1338
1339                         if (is->subpq_size > 1)
1340                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1341                         else
1342                             sp2 = NULL;
1343
1344                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1345                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1346                         {
1347                             free_subpicture(sp);
1348
1349                             /* update queue size and signal for next picture */
1350                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1351                                 is->subpq_rindex = 0;
1352
1353                             SDL_LockMutex(is->subpq_mutex);
1354                             is->subpq_size--;
1355                             SDL_CondSignal(is->subpq_cond);
1356                             SDL_UnlockMutex(is->subpq_mutex);
1357                         }
1358                     }
1359                 }
1360             }
1361
1362 display:
1363             /* display picture */
1364             if (!display_disable)
1365                 video_display(is);
1366
1367             pictq_next_picture(is);
1368         }
1369     } else if (is->audio_st) {
1370         /* draw the next audio frame */
1371
1372         /* if only audio stream, then display the audio bars (better
1373            than nothing, just to test the implementation */
1374
1375         /* display picture */
1376         if (!display_disable)
1377             video_display(is);
1378     }
1379     is->force_refresh = 0;
1380     if (show_status) {
1381         static int64_t last_time;
1382         int64_t cur_time;
1383         int aqsize, vqsize, sqsize;
1384         double av_diff;
1385
1386         cur_time = av_gettime();
1387         if (!last_time || (cur_time - last_time) >= 30000) {
1388             aqsize = 0;
1389             vqsize = 0;
1390             sqsize = 0;
1391             if (is->audio_st)
1392                 aqsize = is->audioq.size;
1393             if (is->video_st)
1394                 vqsize = is->videoq.size;
1395             if (is->subtitle_st)
1396                 sqsize = is->subtitleq.size;
1397             av_diff = 0;
1398             if (is->audio_st && is->video_st)
1399                 av_diff = get_audio_clock(is) - get_video_clock(is);
1400             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1401                    get_master_clock(is),
1402                    av_diff,
1403                    is->frame_drops_early + is->frame_drops_late,
1404                    aqsize / 1024,
1405                    vqsize / 1024,
1406                    sqsize,
1407                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1408                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1409             fflush(stdout);
1410             last_time = cur_time;
1411         }
1412     }
1413 }
1414
1415 /* allocate a picture (needs to do that in main thread to avoid
1416    potential locking problems */
1417 static void alloc_picture(VideoState *is)
1418 {
1419     VideoPicture *vp;
1420
1421     vp = &is->pictq[is->pictq_windex];
1422
1423     if (vp->bmp)
1424         SDL_FreeYUVOverlay(vp->bmp);
1425
1426 #if CONFIG_AVFILTER
1427     avfilter_unref_bufferp(&vp->picref);
1428 #endif
1429
1430     video_open(is, 0);
1431
1432     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1433                                    SDL_YV12_OVERLAY,
1434                                    screen);
1435     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1436         /* SDL allocates a buffer smaller than requested if the video
1437          * overlay hardware is unable to support the requested size. */
1438         fprintf(stderr, "Error: the video system does not support an image\n"
1439                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1440                         "to reduce the image size.\n", vp->width, vp->height );
1441         do_exit(is);
1442     }
1443
1444     SDL_LockMutex(is->pictq_mutex);
1445     vp->allocated = 1;
1446     SDL_CondSignal(is->pictq_cond);
1447     SDL_UnlockMutex(is->pictq_mutex);
1448 }
1449
1450 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
1451 {
1452     VideoPicture *vp;
1453     double frame_delay, pts = pts1;
1454
1455     /* compute the exact PTS for the picture if it is omitted in the stream
1456      * pts1 is the dts of the pkt / pts of the frame */
1457     if (pts != 0) {
1458         /* update video clock with pts, if present */
1459         is->video_clock = pts;
1460     } else {
1461         pts = is->video_clock;
1462     }
1463     /* update video clock for next frame */
1464     frame_delay = av_q2d(is->video_st->codec->time_base);
1465     /* for MPEG2, the frame can be repeated, so we update the
1466        clock accordingly */
1467     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1468     is->video_clock += frame_delay;
1469
1470 #if defined(DEBUG_SYNC) && 0
1471     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1472            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1473 #endif
1474
1475     /* wait until we have space to put a new picture */
1476     SDL_LockMutex(is->pictq_mutex);
1477
1478     /* keep the last already displayed picture in the queue */
1479     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1480            !is->videoq.abort_request) {
1481         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1482     }
1483     SDL_UnlockMutex(is->pictq_mutex);
1484
1485     if (is->videoq.abort_request)
1486         return -1;
1487
1488     vp = &is->pictq[is->pictq_windex];
1489
1490 #if CONFIG_AVFILTER
1491     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1492 #else
1493     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1494 #endif
1495
1496     /* alloc or resize hardware picture buffer */
1497     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1498         vp->width  != src_frame->width ||
1499         vp->height != src_frame->height) {
1500         SDL_Event event;
1501
1502         vp->allocated  = 0;
1503         vp->reallocate = 0;
1504         vp->width = src_frame->width;
1505         vp->height = src_frame->height;
1506
1507         /* the allocation must be done in the main thread to avoid
1508            locking problems. */
1509         event.type = FF_ALLOC_EVENT;
1510         event.user.data1 = is;
1511         SDL_PushEvent(&event);
1512
1513         /* wait until the picture is allocated */
1514         SDL_LockMutex(is->pictq_mutex);
1515         while (!vp->allocated && !is->videoq.abort_request) {
1516             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1517         }
1518         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1519         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1520             while (!vp->allocated) {
1521                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1522             }
1523         }
1524         SDL_UnlockMutex(is->pictq_mutex);
1525
1526         if (is->videoq.abort_request)
1527             return -1;
1528     }
1529
1530     /* if the frame is not skipped, then display it */
1531     if (vp->bmp) {
1532         AVPicture pict = { { 0 } };
1533 #if CONFIG_AVFILTER
1534         avfilter_unref_bufferp(&vp->picref);
1535         vp->picref = src_frame->opaque;
1536 #endif
1537
1538         /* get a pointer on the bitmap */
1539         SDL_LockYUVOverlay (vp->bmp);
1540
1541         pict.data[0] = vp->bmp->pixels[0];
1542         pict.data[1] = vp->bmp->pixels[2];
1543         pict.data[2] = vp->bmp->pixels[1];
1544
1545         pict.linesize[0] = vp->bmp->pitches[0];
1546         pict.linesize[1] = vp->bmp->pitches[2];
1547         pict.linesize[2] = vp->bmp->pitches[1];
1548
1549 #if CONFIG_AVFILTER
1550         // FIXME use direct rendering
1551         av_picture_copy(&pict, (AVPicture *)src_frame,
1552                         src_frame->format, vp->width, vp->height);
1553 #else
1554         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1555         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1556             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1557             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1558         if (is->img_convert_ctx == NULL) {
1559             fprintf(stderr, "Cannot initialize the conversion context\n");
1560             exit(1);
1561         }
1562         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1563                   0, vp->height, pict.data, pict.linesize);
1564 #endif
1565         /* update the bitmap content */
1566         SDL_UnlockYUVOverlay(vp->bmp);
1567
1568         vp->pts = pts;
1569         vp->pos = pos;
1570         vp->skip = 0;
1571         vp->serial = serial;
1572
1573         /* now we can update the picture count */
1574         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1575             is->pictq_windex = 0;
1576         SDL_LockMutex(is->pictq_mutex);
1577         is->pictq_size++;
1578         SDL_UnlockMutex(is->pictq_mutex);
1579     }
1580     return 0;
1581 }
1582
1583 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1584 {
1585     int got_picture, i;
1586
1587     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1588         return -1;
1589
1590     if (pkt->data == flush_pkt.data) {
1591         avcodec_flush_buffers(is->video_st->codec);
1592
1593         SDL_LockMutex(is->pictq_mutex);
1594         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1595         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1596             is->pictq[i].skip = 1;
1597         }
1598         while (is->pictq_size && !is->videoq.abort_request) {
1599             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1600         }
1601         is->video_current_pos = -1;
1602         is->frame_last_pts = AV_NOPTS_VALUE;
1603         is->frame_last_duration = 0;
1604         is->frame_timer = (double)av_gettime() / 1000000.0;
1605         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1606         SDL_UnlockMutex(is->pictq_mutex);
1607
1608         return 0;
1609     }
1610
1611     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1612         return 0;
1613
1614     if (got_picture) {
1615         int ret = 1;
1616
1617         if (decoder_reorder_pts == -1) {
1618             *pts = av_frame_get_best_effort_timestamp(frame);
1619         } else if (decoder_reorder_pts) {
1620             *pts = frame->pkt_pts;
1621         } else {
1622             *pts = frame->pkt_dts;
1623         }
1624
1625         if (*pts == AV_NOPTS_VALUE) {
1626             *pts = 0;
1627         }
1628
1629         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1630             SDL_LockMutex(is->pictq_mutex);
1631             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1632                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1633                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1634                 double ptsdiff = dpts - is->frame_last_pts;
1635                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1636                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1637                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1638                     is->frame_last_dropped_pos = pkt->pos;
1639                     is->frame_last_dropped_pts = dpts;
1640                     is->frame_drops_early++;
1641                     ret = 0;
1642                 }
1643             }
1644             SDL_UnlockMutex(is->pictq_mutex);
1645         }
1646
1647         return ret;
1648     }
1649     return 0;
1650 }
1651
1652 #if CONFIG_AVFILTER
1653 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1654                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1655 {
1656     int ret;
1657     AVFilterInOut *outputs = NULL, *inputs = NULL;
1658
1659     if (filtergraph) {
1660         outputs = avfilter_inout_alloc();
1661         inputs  = avfilter_inout_alloc();
1662         if (!outputs || !inputs) {
1663             ret = AVERROR(ENOMEM);
1664             goto fail;
1665         }
1666
1667         outputs->name       = av_strdup("in");
1668         outputs->filter_ctx = source_ctx;
1669         outputs->pad_idx    = 0;
1670         outputs->next       = NULL;
1671
1672         inputs->name        = av_strdup("out");
1673         inputs->filter_ctx  = sink_ctx;
1674         inputs->pad_idx     = 0;
1675         inputs->next        = NULL;
1676
1677         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1678             goto fail;
1679     } else {
1680         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1681             goto fail;
1682     }
1683
1684     ret = avfilter_graph_config(graph, NULL);
1685 fail:
1686     avfilter_inout_free(&outputs);
1687     avfilter_inout_free(&inputs);
1688     return ret;
1689 }
1690
1691 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1692 {
1693     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1694     char sws_flags_str[128];
1695     char buffersrc_args[256];
1696     int ret;
1697     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1698     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1699     AVCodecContext *codec = is->video_st->codec;
1700
1701     if (!buffersink_params)
1702         return AVERROR(ENOMEM);
1703
1704     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1705     graph->scale_sws_opts = av_strdup(sws_flags_str);
1706
1707     snprintf(buffersrc_args, sizeof(buffersrc_args),
1708              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1709              codec->width, codec->height, codec->pix_fmt,
1710              is->video_st->time_base.num, is->video_st->time_base.den,
1711              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1712
1713     if ((ret = avfilter_graph_create_filter(&filt_src,
1714                                             avfilter_get_by_name("buffer"),
1715                                             "ffplay_buffer", buffersrc_args, NULL,
1716                                             graph)) < 0)
1717         goto fail;
1718
1719     buffersink_params->pixel_fmts = pix_fmts;
1720     ret = avfilter_graph_create_filter(&filt_out,
1721                                        avfilter_get_by_name("ffbuffersink"),
1722                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1723     if (ret < 0)
1724         goto fail;
1725
1726     /* SDL YUV code is not handling odd width/height for some driver
1727      * combinations, therefore we crop the picture to an even width/height. */
1728     if ((ret = avfilter_graph_create_filter(&filt_crop,
1729                                             avfilter_get_by_name("crop"),
1730                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1731         goto fail;
1732     if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1733         goto fail;
1734
1735     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1736         goto fail;
1737
1738     is->in_video_filter  = filt_src;
1739     is->out_video_filter = filt_out;
1740
1741 fail:
1742     av_freep(&buffersink_params);
1743     return ret;
1744 }
1745
1746 #endif  /* CONFIG_AVFILTER */
1747
1748 static int video_thread(void *arg)
1749 {
1750     AVPacket pkt = { 0 };
1751     VideoState *is = arg;
1752     AVFrame *frame = avcodec_alloc_frame();
1753     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1754     double pts;
1755     int ret;
1756     int serial = 0;
1757
1758 #if CONFIG_AVFILTER
1759     AVCodecContext *codec = is->video_st->codec;
1760     AVFilterGraph *graph = avfilter_graph_alloc();
1761     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1762     int last_w = 0;
1763     int last_h = 0;
1764     enum AVPixelFormat last_format = -2;
1765
1766     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1767         is->use_dr1 = 1;
1768         codec->get_buffer     = codec_get_buffer;
1769         codec->release_buffer = codec_release_buffer;
1770         codec->opaque         = &is->buffer_pool;
1771     }
1772 #endif
1773
1774     for (;;) {
1775 #if CONFIG_AVFILTER
1776         AVFilterBufferRef *picref;
1777         AVRational tb;
1778 #endif
1779         while (is->paused && !is->videoq.abort_request)
1780             SDL_Delay(10);
1781
1782         avcodec_get_frame_defaults(frame);
1783         av_free_packet(&pkt);
1784
1785         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1786         if (ret < 0)
1787             goto the_end;
1788
1789         if (!ret)
1790             continue;
1791
1792 #if CONFIG_AVFILTER
1793         if (   last_w != is->video_st->codec->width
1794             || last_h != is->video_st->codec->height
1795             || last_format != is->video_st->codec->pix_fmt) {
1796             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1797                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1798             avfilter_graph_free(&graph);
1799             graph = avfilter_graph_alloc();
1800             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1801                 SDL_Event event;
1802                 event.type = FF_QUIT_EVENT;
1803                 event.user.data1 = is;
1804                 SDL_PushEvent(&event);
1805                 av_free_packet(&pkt);
1806                 goto the_end;
1807             }
1808             filt_in  = is->in_video_filter;
1809             filt_out = is->out_video_filter;
1810             last_w = is->video_st->codec->width;
1811             last_h = is->video_st->codec->height;
1812             last_format = is->video_st->codec->pix_fmt;
1813         }
1814
1815         frame->pts = pts_int;
1816         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1817         if (is->use_dr1 && frame->opaque) {
1818             FrameBuffer      *buf = frame->opaque;
1819             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1820                                         frame->data, frame->linesize,
1821                                         AV_PERM_READ | AV_PERM_PRESERVE,
1822                                         frame->width, frame->height,
1823                                         frame->format);
1824
1825             avfilter_copy_frame_props(fb, frame);
1826             fb->buf->priv           = buf;
1827             fb->buf->free           = filter_release_buffer;
1828
1829             buf->refcount++;
1830             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1831
1832         } else
1833             av_buffersrc_write_frame(filt_in, frame);
1834
1835         av_free_packet(&pkt);
1836
1837         while (ret >= 0) {
1838             is->frame_last_returned_time = av_gettime() / 1000000.0;
1839
1840             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1841             if (ret < 0) {
1842                 ret = 0;
1843                 break;
1844             }
1845
1846             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1847             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1848                 is->frame_last_filter_delay = 0;
1849
1850             avfilter_copy_buf_props(frame, picref);
1851
1852             pts_int = picref->pts;
1853             tb      = filt_out->inputs[0]->time_base;
1854             pos     = picref->pos;
1855             frame->opaque = picref;
1856
1857             if (av_cmp_q(tb, is->video_st->time_base)) {
1858                 av_unused int64_t pts1 = pts_int;
1859                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1860                 av_dlog(NULL, "video_thread(): "
1861                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1862                         tb.num, tb.den, pts1,
1863                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1864             }
1865             pts = pts_int * av_q2d(is->video_st->time_base);
1866             ret = queue_picture(is, frame, pts, pos, serial);
1867         }
1868 #else
1869         pts = pts_int * av_q2d(is->video_st->time_base);
1870         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1871 #endif
1872
1873         if (ret < 0)
1874             goto the_end;
1875
1876         if (is->step)
1877             stream_toggle_pause(is);
1878     }
1879  the_end:
1880     avcodec_flush_buffers(is->video_st->codec);
1881 #if CONFIG_AVFILTER
1882     avfilter_graph_free(&graph);
1883 #endif
1884     av_free_packet(&pkt);
1885     avcodec_free_frame(&frame);
1886     return 0;
1887 }
1888
1889 static int subtitle_thread(void *arg)
1890 {
1891     VideoState *is = arg;
1892     SubPicture *sp;
1893     AVPacket pkt1, *pkt = &pkt1;
1894     int got_subtitle;
1895     double pts;
1896     int i, j;
1897     int r, g, b, y, u, v, a;
1898
1899     for (;;) {
1900         while (is->paused && !is->subtitleq.abort_request) {
1901             SDL_Delay(10);
1902         }
1903         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1904             break;
1905
1906         if (pkt->data == flush_pkt.data) {
1907             avcodec_flush_buffers(is->subtitle_st->codec);
1908             continue;
1909         }
1910         SDL_LockMutex(is->subpq_mutex);
1911         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1912                !is->subtitleq.abort_request) {
1913             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1914         }
1915         SDL_UnlockMutex(is->subpq_mutex);
1916
1917         if (is->subtitleq.abort_request)
1918             return 0;
1919
1920         sp = &is->subpq[is->subpq_windex];
1921
1922        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1923            this packet, if any */
1924         pts = 0;
1925         if (pkt->pts != AV_NOPTS_VALUE)
1926             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1927
1928         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1929                                  &got_subtitle, pkt);
1930         if (got_subtitle && sp->sub.format == 0) {
1931             if (sp->sub.pts != AV_NOPTS_VALUE)
1932                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1933             sp->pts = pts;
1934
1935             for (i = 0; i < sp->sub.num_rects; i++)
1936             {
1937                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1938                 {
1939                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1940                     y = RGB_TO_Y_CCIR(r, g, b);
1941                     u = RGB_TO_U_CCIR(r, g, b, 0);
1942                     v = RGB_TO_V_CCIR(r, g, b, 0);
1943                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1944                 }
1945             }
1946
1947             /* now we can update the picture count */
1948             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1949                 is->subpq_windex = 0;
1950             SDL_LockMutex(is->subpq_mutex);
1951             is->subpq_size++;
1952             SDL_UnlockMutex(is->subpq_mutex);
1953         }
1954         av_free_packet(pkt);
1955     }
1956     return 0;
1957 }
1958
1959 /* copy samples for viewing in editor window */
1960 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1961 {
1962     int size, len;
1963
1964     size = samples_size / sizeof(short);
1965     while (size > 0) {
1966         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1967         if (len > size)
1968             len = size;
1969         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1970         samples += len;
1971         is->sample_array_index += len;
1972         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1973             is->sample_array_index = 0;
1974         size -= len;
1975     }
1976 }
1977
1978 /* return the wanted number of samples to get better sync if sync_type is video
1979  * or external master clock */
1980 static int synchronize_audio(VideoState *is, int nb_samples)
1981 {
1982     int wanted_nb_samples = nb_samples;
1983
1984     /* if not master, then we try to remove or add samples to correct the clock */
1985     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
1986         double diff, avg_diff;
1987         int min_nb_samples, max_nb_samples;
1988
1989         diff = get_audio_clock(is) - get_master_clock(is);
1990
1991         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1992             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1993             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1994                 /* not enough measures to have a correct estimate */
1995                 is->audio_diff_avg_count++;
1996             } else {
1997                 /* estimate the A-V difference */
1998                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1999
2000                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2001                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2002                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2003                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2004                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2005                 }
2006                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2007                         diff, avg_diff, wanted_nb_samples - nb_samples,
2008                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2009             }
2010         } else {
2011             /* too big difference : may be initial PTS errors, so
2012                reset A-V filter */
2013             is->audio_diff_avg_count = 0;
2014             is->audio_diff_cum       = 0;
2015         }
2016     }
2017
2018     return wanted_nb_samples;
2019 }
2020
2021 /* decode one audio frame and returns its uncompressed size */
2022 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2023 {
2024     AVPacket *pkt_temp = &is->audio_pkt_temp;
2025     AVPacket *pkt = &is->audio_pkt;
2026     AVCodecContext *dec = is->audio_st->codec;
2027     int len1, len2, data_size, resampled_data_size;
2028     int64_t dec_channel_layout;
2029     int got_frame;
2030     double pts;
2031     int new_packet = 0;
2032     int flush_complete = 0;
2033     int wanted_nb_samples;
2034
2035     for (;;) {
2036         /* NOTE: the audio packet can contain several frames */
2037         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2038             if (!is->frame) {
2039                 if (!(is->frame = avcodec_alloc_frame()))
2040                     return AVERROR(ENOMEM);
2041             } else
2042                 avcodec_get_frame_defaults(is->frame);
2043
2044             if (is->paused)
2045                 return -1;
2046
2047             if (flush_complete)
2048                 break;
2049             new_packet = 0;
2050             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2051             if (len1 < 0) {
2052                 /* if error, we skip the frame */
2053                 pkt_temp->size = 0;
2054                 break;
2055             }
2056
2057             pkt_temp->data += len1;
2058             pkt_temp->size -= len1;
2059
2060             if (!got_frame) {
2061                 /* stop sending empty packets if the decoder is finished */
2062                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2063                     flush_complete = 1;
2064                 continue;
2065             }
2066             data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
2067                                                    is->frame->nb_samples,
2068                                                    is->frame->format, 1);
2069
2070             dec_channel_layout =
2071                 (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2072                 is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
2073             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2074
2075             if (is->frame->format        != is->audio_src.fmt            ||
2076                 dec_channel_layout       != is->audio_src.channel_layout ||
2077                 is->frame->sample_rate   != is->audio_src.freq           ||
2078                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2079                 swr_free(&is->swr_ctx);
2080                 is->swr_ctx = swr_alloc_set_opts(NULL,
2081                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2082                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2083                                                  0, NULL);
2084                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2085                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2086                         is->frame->sample_rate,   av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
2087                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2088                     break;
2089                 }
2090                 is->audio_src.channel_layout = dec_channel_layout;
2091                 is->audio_src.channels = is->frame->channels;
2092                 is->audio_src.freq = is->frame->sample_rate;
2093                 is->audio_src.fmt = is->frame->format;
2094             }
2095
2096             if (is->swr_ctx) {
2097                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2098                 uint8_t *out[] = {is->audio_buf2};
2099                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2100                 if (wanted_nb_samples != is->frame->nb_samples) {
2101                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2102                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2103                         fprintf(stderr, "swr_set_compensation() failed\n");
2104                         break;
2105                     }
2106                 }
2107                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2108                 if (len2 < 0) {
2109                     fprintf(stderr, "swr_convert() failed\n");
2110                     break;
2111                 }
2112                 if (len2 == out_count) {
2113                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2114                     swr_init(is->swr_ctx);
2115                 }
2116                 is->audio_buf = is->audio_buf2;
2117                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2118             } else {
2119                 is->audio_buf = is->frame->data[0];
2120                 resampled_data_size = data_size;
2121             }
2122
2123             /* if no pts, then compute it */
2124             pts = is->audio_clock;
2125             *pts_ptr = pts;
2126             is->audio_clock += (double)data_size /
2127                 (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
2128 #ifdef DEBUG
2129             {
2130                 static double last_clock;
2131                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2132                        is->audio_clock - last_clock,
2133                        is->audio_clock, pts);
2134                 last_clock = is->audio_clock;
2135             }
2136 #endif
2137             return resampled_data_size;
2138         }
2139
2140         /* free the current packet */
2141         if (pkt->data)
2142             av_free_packet(pkt);
2143         memset(pkt_temp, 0, sizeof(*pkt_temp));
2144
2145         if (is->paused || is->audioq.abort_request) {
2146             return -1;
2147         }
2148
2149         if (is->audioq.nb_packets == 0)
2150             SDL_CondSignal(is->continue_read_thread);
2151
2152         /* read next packet */
2153         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2154             return -1;
2155
2156         if (pkt->data == flush_pkt.data) {
2157             avcodec_flush_buffers(dec);
2158             flush_complete = 0;
2159         }
2160
2161         *pkt_temp = *pkt;
2162
2163         /* if update the audio clock with the pts */
2164         if (pkt->pts != AV_NOPTS_VALUE) {
2165             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2166         }
2167     }
2168 }
2169
2170 /* prepare a new audio buffer */
2171 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2172 {
2173     VideoState *is = opaque;
2174     int audio_size, len1;
2175     int bytes_per_sec;
2176     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2177     double pts;
2178
2179     audio_callback_time = av_gettime();
2180
2181     while (len > 0) {
2182         if (is->audio_buf_index >= is->audio_buf_size) {
2183            audio_size = audio_decode_frame(is, &pts);
2184            if (audio_size < 0) {
2185                 /* if error, just output silence */
2186                is->audio_buf      = is->silence_buf;
2187                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2188            } else {
2189                if (is->show_mode != SHOW_MODE_VIDEO)
2190                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2191                is->audio_buf_size = audio_size;
2192            }
2193            is->audio_buf_index = 0;
2194         }
2195         len1 = is->audio_buf_size - is->audio_buf_index;
2196         if (len1 > len)
2197             len1 = len;
2198         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2199         len -= len1;
2200         stream += len1;
2201         is->audio_buf_index += len1;
2202     }
2203     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2204     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2205     /* Let's assume the audio driver that is used by SDL has two periods. */
2206     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2207     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2208     if (is->audioq.serial == is->audio_pkt_temp_serial)
2209         check_external_clock_sync(is, is->audio_current_pts);
2210 }
2211
2212 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2213 {
2214     SDL_AudioSpec wanted_spec, spec;
2215     const char *env;
2216     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2217
2218     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2219     if (env) {
2220         wanted_nb_channels = atoi(env);
2221         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2222     }
2223     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2224         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2225         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2226     }
2227     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2228     wanted_spec.freq = wanted_sample_rate;
2229     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2230         fprintf(stderr, "Invalid sample rate or channel count!\n");
2231         return -1;
2232     }
2233     wanted_spec.format = AUDIO_S16SYS;
2234     wanted_spec.silence = 0;
2235     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2236     wanted_spec.callback = sdl_audio_callback;
2237     wanted_spec.userdata = opaque;
2238     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2239         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2240         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2241         if (!wanted_spec.channels) {
2242             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2243             return -1;
2244         }
2245         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2246     }
2247     if (spec.format != AUDIO_S16SYS) {
2248         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2249         return -1;
2250     }
2251     if (spec.channels != wanted_spec.channels) {
2252         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2253         if (!wanted_channel_layout) {
2254             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2255             return -1;
2256         }
2257     }
2258
2259     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2260     audio_hw_params->freq = spec.freq;
2261     audio_hw_params->channel_layout = wanted_channel_layout;
2262     audio_hw_params->channels =  spec.channels;
2263     return spec.size;
2264 }
2265
2266 /* open a given stream. Return 0 if OK */
2267 static int stream_component_open(VideoState *is, int stream_index)
2268 {
2269     AVFormatContext *ic = is->ic;
2270     AVCodecContext *avctx;
2271     AVCodec *codec;
2272     AVDictionary *opts;
2273     AVDictionaryEntry *t = NULL;
2274
2275     if (stream_index < 0 || stream_index >= ic->nb_streams)
2276         return -1;
2277     avctx = ic->streams[stream_index]->codec;
2278
2279     codec = avcodec_find_decoder(avctx->codec_id);
2280
2281     switch(avctx->codec_type){
2282         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2283         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2284         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2285     }
2286     if (!codec)
2287         return -1;
2288
2289     avctx->workaround_bugs   = workaround_bugs;
2290     avctx->lowres            = lowres;
2291     if(avctx->lowres > codec->max_lowres){
2292         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2293                 codec->max_lowres);
2294         avctx->lowres= codec->max_lowres;
2295     }
2296     avctx->idct_algo         = idct;
2297     avctx->skip_frame        = skip_frame;
2298     avctx->skip_idct         = skip_idct;
2299     avctx->skip_loop_filter  = skip_loop_filter;
2300     avctx->error_concealment = error_concealment;
2301
2302     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2303     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2304     if(codec->capabilities & CODEC_CAP_DR1)
2305         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2306
2307     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2308     if (!av_dict_get(opts, "threads", NULL, 0))
2309         av_dict_set(&opts, "threads", "auto", 0);
2310     if (avcodec_open2(avctx, codec, &opts) < 0)
2311         return -1;
2312     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2313         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2314         return AVERROR_OPTION_NOT_FOUND;
2315     }
2316
2317     /* prepare audio output */
2318     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2319         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2320         if (audio_hw_buf_size < 0)
2321             return -1;
2322         is->audio_hw_buf_size = audio_hw_buf_size;
2323         is->audio_tgt = is->audio_src;
2324     }
2325
2326     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2327     switch (avctx->codec_type) {
2328     case AVMEDIA_TYPE_AUDIO:
2329         is->audio_stream = stream_index;
2330         is->audio_st = ic->streams[stream_index];
2331         is->audio_buf_size  = 0;
2332         is->audio_buf_index = 0;
2333
2334         /* init averaging filter */
2335         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2336         is->audio_diff_avg_count = 0;
2337         /* since we do not have a precise anough audio fifo fullness,
2338            we correct audio sync only if larger than this threshold */
2339         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2340
2341         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2342         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2343         packet_queue_start(&is->audioq);
2344         SDL_PauseAudio(0);
2345         break;
2346     case AVMEDIA_TYPE_VIDEO:
2347         is->video_stream = stream_index;
2348         is->video_st = ic->streams[stream_index];
2349
2350         packet_queue_start(&is->videoq);
2351         is->video_tid = SDL_CreateThread(video_thread, is);
2352         break;
2353     case AVMEDIA_TYPE_SUBTITLE:
2354         is->subtitle_stream = stream_index;
2355         is->subtitle_st = ic->streams[stream_index];
2356         packet_queue_start(&is->subtitleq);
2357
2358         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2359         break;
2360     default:
2361         break;
2362     }
2363     return 0;
2364 }
2365
2366 static void stream_component_close(VideoState *is, int stream_index)
2367 {
2368     AVFormatContext *ic = is->ic;
2369     AVCodecContext *avctx;
2370
2371     if (stream_index < 0 || stream_index >= ic->nb_streams)
2372         return;
2373     avctx = ic->streams[stream_index]->codec;
2374
2375     switch (avctx->codec_type) {
2376     case AVMEDIA_TYPE_AUDIO:
2377         packet_queue_abort(&is->audioq);
2378
2379         SDL_CloseAudio();
2380
2381         packet_queue_flush(&is->audioq);
2382         av_free_packet(&is->audio_pkt);
2383         swr_free(&is->swr_ctx);
2384         av_freep(&is->audio_buf1);
2385         is->audio_buf = NULL;
2386         avcodec_free_frame(&is->frame);
2387
2388         if (is->rdft) {
2389             av_rdft_end(is->rdft);
2390             av_freep(&is->rdft_data);
2391             is->rdft = NULL;
2392             is->rdft_bits = 0;
2393         }
2394         break;
2395     case AVMEDIA_TYPE_VIDEO:
2396         packet_queue_abort(&is->videoq);
2397
2398         /* note: we also signal this mutex to make sure we deblock the
2399            video thread in all cases */
2400         SDL_LockMutex(is->pictq_mutex);
2401         SDL_CondSignal(is->pictq_cond);
2402         SDL_UnlockMutex(is->pictq_mutex);
2403
2404         SDL_WaitThread(is->video_tid, NULL);
2405
2406         packet_queue_flush(&is->videoq);
2407         break;
2408     case AVMEDIA_TYPE_SUBTITLE:
2409         packet_queue_abort(&is->subtitleq);
2410
2411         /* note: we also signal this mutex to make sure we deblock the
2412            video thread in all cases */
2413         SDL_LockMutex(is->subpq_mutex);
2414         is->subtitle_stream_changed = 1;
2415
2416         SDL_CondSignal(is->subpq_cond);
2417         SDL_UnlockMutex(is->subpq_mutex);
2418
2419         SDL_WaitThread(is->subtitle_tid, NULL);
2420
2421         packet_queue_flush(&is->subtitleq);
2422         break;
2423     default:
2424         break;
2425     }
2426
2427     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2428     avcodec_close(avctx);
2429 #if CONFIG_AVFILTER
2430     free_buffer_pool(&is->buffer_pool);
2431 #endif
2432     switch (avctx->codec_type) {
2433     case AVMEDIA_TYPE_AUDIO:
2434         is->audio_st = NULL;
2435         is->audio_stream = -1;
2436         break;
2437     case AVMEDIA_TYPE_VIDEO:
2438         is->video_st = NULL;
2439         is->video_stream = -1;
2440         break;
2441     case AVMEDIA_TYPE_SUBTITLE:
2442         is->subtitle_st = NULL;
2443         is->subtitle_stream = -1;
2444         break;
2445     default:
2446         break;
2447     }
2448 }
2449
2450 static int decode_interrupt_cb(void *ctx)
2451 {
2452     VideoState *is = ctx;
2453     return is->abort_request;
2454 }
2455
2456 static int is_realtime(AVFormatContext *s)
2457 {
2458     if(   !strcmp(s->iformat->name, "rtp")
2459        || !strcmp(s->iformat->name, "rtsp")
2460        || !strcmp(s->iformat->name, "sdp")
2461     )
2462         return 1;
2463
2464     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2465                  || !strncmp(s->filename, "udp:", 4)
2466                 )
2467     )
2468         return 1;
2469     return 0;
2470 }
2471
2472 /* this thread gets the stream from the disk or the network */
2473 static int read_thread(void *arg)
2474 {
2475     VideoState *is = arg;
2476     AVFormatContext *ic = NULL;
2477     int err, i, ret;
2478     int st_index[AVMEDIA_TYPE_NB];
2479     AVPacket pkt1, *pkt = &pkt1;
2480     int eof = 0;
2481     int pkt_in_play_range = 0;
2482     AVDictionaryEntry *t;
2483     AVDictionary **opts;
2484     int orig_nb_streams;
2485     SDL_mutex *wait_mutex = SDL_CreateMutex();
2486
2487     memset(st_index, -1, sizeof(st_index));
2488     is->last_video_stream = is->video_stream = -1;
2489     is->last_audio_stream = is->audio_stream = -1;
2490     is->last_subtitle_stream = is->subtitle_stream = -1;
2491
2492     ic = avformat_alloc_context();
2493     ic->interrupt_callback.callback = decode_interrupt_cb;
2494     ic->interrupt_callback.opaque = is;
2495     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2496     if (err < 0) {
2497         print_error(is->filename, err);
2498         ret = -1;
2499         goto fail;
2500     }
2501     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2502         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2503         ret = AVERROR_OPTION_NOT_FOUND;
2504         goto fail;
2505     }
2506     is->ic = ic;
2507
2508     if (genpts)
2509         ic->flags |= AVFMT_FLAG_GENPTS;
2510
2511     opts = setup_find_stream_info_opts(ic, codec_opts);
2512     orig_nb_streams = ic->nb_streams;
2513
2514     err = avformat_find_stream_info(ic, opts);
2515     if (err < 0) {
2516         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2517         ret = -1;
2518         goto fail;
2519     }
2520     for (i = 0; i < orig_nb_streams; i++)
2521         av_dict_free(&opts[i]);
2522     av_freep(&opts);
2523
2524     if (ic->pb)
2525         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2526
2527     if (seek_by_bytes < 0)
2528         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2529
2530     /* if seeking requested, we execute it */
2531     if (start_time != AV_NOPTS_VALUE) {
2532         int64_t timestamp;
2533
2534         timestamp = start_time;
2535         /* add the stream start time */
2536         if (ic->start_time != AV_NOPTS_VALUE)
2537             timestamp += ic->start_time;
2538         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2539         if (ret < 0) {
2540             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2541                     is->filename, (double)timestamp / AV_TIME_BASE);
2542         }
2543     }
2544
2545     for (i = 0; i < ic->nb_streams; i++)
2546         ic->streams[i]->discard = AVDISCARD_ALL;
2547     if (!video_disable)
2548         st_index[AVMEDIA_TYPE_VIDEO] =
2549             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2550                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2551     if (!audio_disable)
2552         st_index[AVMEDIA_TYPE_AUDIO] =
2553             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2554                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2555                                 st_index[AVMEDIA_TYPE_VIDEO],
2556                                 NULL, 0);
2557     if (!video_disable)
2558         st_index[AVMEDIA_TYPE_SUBTITLE] =
2559             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2560                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2561                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2562                                  st_index[AVMEDIA_TYPE_AUDIO] :
2563                                  st_index[AVMEDIA_TYPE_VIDEO]),
2564                                 NULL, 0);
2565     if (show_status) {
2566         av_dump_format(ic, 0, is->filename, 0);
2567     }
2568
2569     is->show_mode = show_mode;
2570
2571     /* open the streams */
2572     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2573         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2574     }
2575
2576     ret = -1;
2577     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2578         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2579     }
2580     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2581     if (is->show_mode == SHOW_MODE_NONE)
2582         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2583
2584     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2585         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2586     }
2587
2588     if (is->video_stream < 0 && is->audio_stream < 0) {
2589         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2590         ret = -1;
2591         goto fail;
2592     }
2593
2594     if (infinite_buffer < 0 && is_realtime(ic))
2595         infinite_buffer = 1;
2596
2597     for (;;) {
2598         if (is->abort_request)
2599             break;
2600         if (is->paused != is->last_paused) {
2601             is->last_paused = is->paused;
2602             if (is->paused)
2603                 is->read_pause_return = av_read_pause(ic);
2604             else
2605                 av_read_play(ic);
2606         }
2607 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2608         if (is->paused &&
2609                 (!strcmp(ic->iformat->name, "rtsp") ||
2610                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2611             /* wait 10 ms to avoid trying to get another packet */
2612             /* XXX: horrible */
2613             SDL_Delay(10);
2614             continue;
2615         }
2616 #endif
2617         if (is->seek_req) {
2618             int64_t seek_target = is->seek_pos;
2619             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2620             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2621 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2622 //      of the seek_pos/seek_rel variables
2623
2624             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2625             if (ret < 0) {
2626                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2627             } else {
2628                 if (is->audio_stream >= 0) {
2629                     packet_queue_flush(&is->audioq);
2630                     packet_queue_put(&is->audioq, &flush_pkt);
2631                 }
2632                 if (is->subtitle_stream >= 0) {
2633                     packet_queue_flush(&is->subtitleq);
2634                     packet_queue_put(&is->subtitleq, &flush_pkt);
2635                 }
2636                 if (is->video_stream >= 0) {
2637                     packet_queue_flush(&is->videoq);
2638                     packet_queue_put(&is->videoq, &flush_pkt);
2639                 }
2640             }
2641             update_external_clock_pts(is, (seek_target + ic->start_time) / (double)AV_TIME_BASE);
2642             is->seek_req = 0;
2643             eof = 0;
2644         }
2645         if (is->que_attachments_req) {
2646             avformat_queue_attached_pictures(ic);
2647             is->que_attachments_req = 0;
2648         }
2649
2650         /* if the queue are full, no need to read more */
2651         if (infinite_buffer<1 &&
2652               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2653             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2654                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2655                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2656             /* wait 10 ms */
2657             SDL_LockMutex(wait_mutex);
2658             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2659             SDL_UnlockMutex(wait_mutex);
2660             continue;
2661         }
2662         if (eof) {
2663             if (is->video_stream >= 0) {
2664                 av_init_packet(pkt);
2665                 pkt->data = NULL;
2666                 pkt->size = 0;
2667                 pkt->stream_index = is->video_stream;
2668                 packet_queue_put(&is->videoq, pkt);
2669             }
2670             if (is->audio_stream >= 0 &&
2671                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2672                 av_init_packet(pkt);
2673                 pkt->data = NULL;
2674                 pkt->size = 0;
2675                 pkt->stream_index = is->audio_stream;
2676                 packet_queue_put(&is->audioq, pkt);
2677             }
2678             SDL_Delay(10);
2679             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2680                 if (loop != 1 && (!loop || --loop)) {
2681                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2682                 } else if (autoexit) {
2683                     ret = AVERROR_EOF;
2684                     goto fail;
2685                 }
2686             }
2687             eof=0;
2688             continue;
2689         }
2690         ret = av_read_frame(ic, pkt);
2691         if (ret < 0) {
2692             if (ret == AVERROR_EOF || url_feof(ic->pb))
2693                 eof = 1;
2694             if (ic->pb && ic->pb->error)
2695                 break;
2696             SDL_LockMutex(wait_mutex);
2697             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2698             SDL_UnlockMutex(wait_mutex);
2699             continue;
2700         }
2701         /* check if packet is in play range specified by user, then queue, otherwise discard */
2702         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2703                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2704                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2705                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2706                 <= ((double)duration / 1000000);
2707         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2708             packet_queue_put(&is->audioq, pkt);
2709         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2710             packet_queue_put(&is->videoq, pkt);
2711         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2712             packet_queue_put(&is->subtitleq, pkt);
2713         } else {
2714             av_free_packet(pkt);
2715         }
2716     }
2717     /* wait until the end */
2718     while (!is->abort_request) {
2719         SDL_Delay(100);
2720     }
2721
2722     ret = 0;
2723  fail:
2724     /* close each stream */
2725     if (is->audio_stream >= 0)
2726         stream_component_close(is, is->audio_stream);
2727     if (is->video_stream >= 0)
2728         stream_component_close(is, is->video_stream);
2729     if (is->subtitle_stream >= 0)
2730         stream_component_close(is, is->subtitle_stream);
2731     if (is->ic) {
2732         avformat_close_input(&is->ic);
2733     }
2734
2735     if (ret != 0) {
2736         SDL_Event event;
2737
2738         event.type = FF_QUIT_EVENT;
2739         event.user.data1 = is;
2740         SDL_PushEvent(&event);
2741     }
2742     SDL_DestroyMutex(wait_mutex);
2743     return 0;
2744 }
2745
2746 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2747 {
2748     VideoState *is;
2749
2750     is = av_mallocz(sizeof(VideoState));
2751     if (!is)
2752         return NULL;
2753     av_strlcpy(is->filename, filename, sizeof(is->filename));
2754     is->iformat = iformat;
2755     is->ytop    = 0;
2756     is->xleft   = 0;
2757
2758     /* start video display */
2759     is->pictq_mutex = SDL_CreateMutex();
2760     is->pictq_cond  = SDL_CreateCond();
2761
2762     is->subpq_mutex = SDL_CreateMutex();
2763     is->subpq_cond  = SDL_CreateCond();
2764
2765     packet_queue_init(&is->videoq);
2766     packet_queue_init(&is->audioq);
2767     packet_queue_init(&is->subtitleq);
2768
2769     is->continue_read_thread = SDL_CreateCond();
2770
2771     update_external_clock_pts(is, 0.0);
2772     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2773     is->video_current_pts_drift = is->audio_current_pts_drift;
2774     is->av_sync_type = av_sync_type;
2775     is->read_tid     = SDL_CreateThread(read_thread, is);
2776     if (!is->read_tid) {
2777         av_free(is);
2778         return NULL;
2779     }
2780     return is;
2781 }
2782
2783 static void stream_cycle_channel(VideoState *is, int codec_type)
2784 {
2785     AVFormatContext *ic = is->ic;
2786     int start_index, stream_index;
2787     int old_index;
2788     AVStream *st;
2789
2790     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2791         start_index = is->last_video_stream;
2792         old_index = is->video_stream;
2793     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2794         start_index = is->last_audio_stream;
2795         old_index = is->audio_stream;
2796     } else {
2797         start_index = is->last_subtitle_stream;
2798         old_index = is->subtitle_stream;
2799     }
2800     stream_index = start_index;
2801     for (;;) {
2802         if (++stream_index >= is->ic->nb_streams)
2803         {
2804             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2805             {
2806                 stream_index = -1;
2807                 is->last_subtitle_stream = -1;
2808                 goto the_end;
2809             }
2810             if (start_index == -1)
2811                 return;
2812             stream_index = 0;
2813         }
2814         if (stream_index == start_index)
2815             return;
2816         st = ic->streams[stream_index];
2817         if (st->codec->codec_type == codec_type) {
2818             /* check that parameters are OK */
2819             switch (codec_type) {
2820             case AVMEDIA_TYPE_AUDIO:
2821                 if (st->codec->sample_rate != 0 &&
2822                     st->codec->channels != 0)
2823                     goto the_end;
2824                 break;
2825             case AVMEDIA_TYPE_VIDEO:
2826             case AVMEDIA_TYPE_SUBTITLE:
2827                 goto the_end;
2828             default:
2829                 break;
2830             }
2831         }
2832     }
2833  the_end:
2834     stream_component_close(is, old_index);
2835     stream_component_open(is, stream_index);
2836     if (codec_type == AVMEDIA_TYPE_VIDEO)
2837         is->que_attachments_req = 1;
2838 }
2839
2840
2841 static void toggle_full_screen(VideoState *is)
2842 {
2843 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2844     /* OS X needs to reallocate the SDL overlays */
2845     int i;
2846     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2847         is->pictq[i].reallocate = 1;
2848 #endif
2849     is_full_screen = !is_full_screen;
2850     video_open(is, 1);
2851 }
2852
2853 static void toggle_pause(VideoState *is)
2854 {
2855     stream_toggle_pause(is);
2856     is->step = 0;
2857 }
2858
2859 static void step_to_next_frame(VideoState *is)
2860 {
2861     /* if the stream is paused unpause it, then step */
2862     if (is->paused)
2863         stream_toggle_pause(is);
2864     is->step = 1;
2865 }
2866
2867 static void toggle_audio_display(VideoState *is)
2868 {
2869     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2870     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2871     fill_rectangle(screen,
2872                 is->xleft, is->ytop, is->width, is->height,
2873                 bgcolor, 1);
2874 }
2875
2876 /* handle an event sent by the GUI */
2877 static void event_loop(VideoState *cur_stream)
2878 {
2879     SDL_Event event;
2880     double incr, pos, frac;
2881
2882     for (;;) {
2883         double x;
2884         SDL_WaitEvent(&event);
2885         switch (event.type) {
2886         case SDL_KEYDOWN:
2887             if (exit_on_keydown) {
2888                 do_exit(cur_stream);
2889                 break;
2890             }
2891             switch (event.key.keysym.sym) {
2892             case SDLK_ESCAPE:
2893             case SDLK_q:
2894                 do_exit(cur_stream);
2895                 break;
2896             case SDLK_f:
2897                 toggle_full_screen(cur_stream);
2898                 cur_stream->force_refresh = 1;
2899                 break;
2900             case SDLK_p:
2901             case SDLK_SPACE:
2902                 toggle_pause(cur_stream);
2903                 break;
2904             case SDLK_s: // S: Step to next frame
2905                 step_to_next_frame(cur_stream);
2906                 break;
2907             case SDLK_a:
2908                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2909                 break;
2910             case SDLK_v:
2911                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2912                 break;
2913             case SDLK_t:
2914                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2915                 break;
2916             case SDLK_w:
2917                 toggle_audio_display(cur_stream);
2918                 cur_stream->force_refresh = 1;
2919                 break;
2920             case SDLK_PAGEUP:
2921                 incr = 600.0;
2922                 goto do_seek;
2923             case SDLK_PAGEDOWN:
2924                 incr = -600.0;
2925                 goto do_seek;
2926             case SDLK_LEFT:
2927                 incr = -10.0;
2928                 goto do_seek;
2929             case SDLK_RIGHT:
2930                 incr = 10.0;
2931                 goto do_seek;
2932             case SDLK_UP:
2933                 incr = 60.0;
2934                 goto do_seek;
2935             case SDLK_DOWN:
2936                 incr = -60.0;
2937             do_seek:
2938                     if (seek_by_bytes) {
2939                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2940                             pos = cur_stream->video_current_pos;
2941                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2942                             pos = cur_stream->audio_pkt.pos;
2943                         } else
2944                             pos = avio_tell(cur_stream->ic->pb);
2945                         if (cur_stream->ic->bit_rate)
2946                             incr *= cur_stream->ic->bit_rate / 8.0;
2947                         else
2948                             incr *= 180000.0;
2949                         pos += incr;
2950                         stream_seek(cur_stream, pos, incr, 1);
2951                     } else {
2952                         pos = get_master_clock(cur_stream);
2953                         pos += incr;
2954                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2955                     }
2956                 break;
2957             default:
2958                 break;
2959             }
2960             break;
2961         case SDL_VIDEOEXPOSE:
2962             cur_stream->force_refresh = 1;
2963             break;
2964         case SDL_MOUSEBUTTONDOWN:
2965             if (exit_on_mousedown) {
2966                 do_exit(cur_stream);
2967                 break;
2968             }
2969         case SDL_MOUSEMOTION:
2970             if (event.type == SDL_MOUSEBUTTONDOWN) {
2971                 x = event.button.x;
2972             } else {
2973                 if (event.motion.state != SDL_PRESSED)
2974                     break;
2975                 x = event.motion.x;
2976             }
2977                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2978                     uint64_t size =  avio_size(cur_stream->ic->pb);
2979                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2980                 } else {
2981                     int64_t ts;
2982                     int ns, hh, mm, ss;
2983                     int tns, thh, tmm, tss;
2984                     tns  = cur_stream->ic->duration / 1000000LL;
2985                     thh  = tns / 3600;
2986                     tmm  = (tns % 3600) / 60;
2987                     tss  = (tns % 60);
2988                     frac = x / cur_stream->width;
2989                     ns   = frac * tns;
2990                     hh   = ns / 3600;
2991                     mm   = (ns % 3600) / 60;
2992                     ss   = (ns % 60);
2993                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2994                             hh, mm, ss, thh, tmm, tss);
2995                     ts = frac * cur_stream->ic->duration;
2996                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2997                         ts += cur_stream->ic->start_time;
2998                     stream_seek(cur_stream, ts, 0, 0);
2999                 }
3000             break;
3001         case SDL_VIDEORESIZE:
3002                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3003                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3004                 screen_width  = cur_stream->width  = event.resize.w;
3005                 screen_height = cur_stream->height = event.resize.h;
3006                 cur_stream->force_refresh = 1;
3007             break;
3008         case SDL_QUIT:
3009         case FF_QUIT_EVENT:
3010             do_exit(cur_stream);
3011             break;
3012         case FF_ALLOC_EVENT:
3013             alloc_picture(event.user.data1);
3014             break;
3015         case FF_REFRESH_EVENT:
3016             video_refresh(event.user.data1);
3017             cur_stream->refresh = 0;
3018             break;
3019         default:
3020             break;
3021         }
3022     }
3023 }
3024
3025 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3026 {
3027     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3028     return opt_default(NULL, "video_size", arg);
3029 }
3030
3031 static int opt_width(void *optctx, const char *opt, const char *arg)
3032 {
3033     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3034     return 0;
3035 }
3036
3037 static int opt_height(void *optctx, const char *opt, const char *arg)
3038 {
3039     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3040     return 0;
3041 }
3042
3043 static int opt_format(void *optctx, const char *opt, const char *arg)
3044 {
3045     file_iformat = av_find_input_format(arg);
3046     if (!file_iformat) {
3047         fprintf(stderr, "Unknown input format: %s\n", arg);
3048         return AVERROR(EINVAL);
3049     }
3050     return 0;
3051 }
3052
3053 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3054 {
3055     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3056     return opt_default(NULL, "pixel_format", arg);
3057 }
3058
3059 static int opt_sync(void *optctx, const char *opt, const char *arg)
3060 {
3061     if (!strcmp(arg, "audio"))
3062         av_sync_type = AV_SYNC_AUDIO_MASTER;
3063     else if (!strcmp(arg, "video"))
3064         av_sync_type = AV_SYNC_VIDEO_MASTER;
3065     else if (!strcmp(arg, "ext"))
3066         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3067     else {
3068         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3069         exit(1);
3070     }
3071     return 0;
3072 }
3073
3074 static int opt_seek(void *optctx, const char *opt, const char *arg)
3075 {
3076     start_time = parse_time_or_die(opt, arg, 1);
3077     return 0;
3078 }
3079
3080 static int opt_duration(void *optctx, const char *opt, const char *arg)
3081 {
3082     duration = parse_time_or_die(opt, arg, 1);
3083     return 0;
3084 }
3085
3086 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3087 {
3088     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3089                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3090                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3091                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3092     return 0;
3093 }
3094
3095 static void opt_input_file(void *optctx, const char *filename)
3096 {
3097     if (input_filename) {
3098         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3099                 filename, input_filename);
3100         exit(1);
3101     }
3102     if (!strcmp(filename, "-"))
3103         filename = "pipe:";
3104     input_filename = filename;
3105 }
3106
3107 static int opt_codec(void *o, const char *opt, const char *arg)
3108 {
3109     switch(opt[strlen(opt)-1]){
3110     case 'a' :    audio_codec_name = arg; break;
3111     case 's' : subtitle_codec_name = arg; break;
3112     case 'v' :    video_codec_name = arg; break;
3113     }
3114     return 0;
3115 }
3116
3117 static int dummy;
3118
3119 static const OptionDef options[] = {
3120 #include "cmdutils_common_opts.h"
3121     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3122     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3123     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3124     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3125     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3126     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3127     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3128     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3129     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3130     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3131     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3132     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3133     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3134     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3135     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3136     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3137     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3138     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3139     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3140     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3141     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3142     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3143     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3144     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3145     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3146     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3147     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3148     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3149     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3150     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3151     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3152     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3153     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3154     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3155 #if CONFIG_AVFILTER
3156     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3157 #endif
3158     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3159     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3160     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3161     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3162     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3163     { NULL, },
3164 };
3165
3166 static void show_usage(void)
3167 {
3168     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3169     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3170     av_log(NULL, AV_LOG_INFO, "\n");
3171 }
3172
3173 void show_help_default(const char *opt, const char *arg)
3174 {
3175     av_log_set_callback(log_callback_help);
3176     show_usage();
3177     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3178     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3179     printf("\n");
3180     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3181     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3182 #if !CONFIG_AVFILTER
3183     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3184 #else
3185     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3186 #endif
3187     printf("\nWhile playing:\n"
3188            "q, ESC              quit\n"
3189            "f                   toggle full screen\n"
3190            "p, SPC              pause\n"
3191            "a                   cycle audio channel\n"
3192            "v                   cycle video channel\n"
3193            "t                   cycle subtitle channel\n"
3194            "w                   show audio waves\n"
3195            "s                   activate frame-step mode\n"
3196            "left/right          seek backward/forward 10 seconds\n"
3197            "down/up             seek backward/forward 1 minute\n"
3198            "page down/page up   seek backward/forward 10 minutes\n"
3199            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3200            );
3201 }
3202
3203 static int lockmgr(void **mtx, enum AVLockOp op)
3204 {
3205    switch(op) {
3206       case AV_LOCK_CREATE:
3207           *mtx = SDL_CreateMutex();
3208           if(!*mtx)
3209               return 1;
3210           return 0;
3211       case AV_LOCK_OBTAIN:
3212           return !!SDL_LockMutex(*mtx);
3213       case AV_LOCK_RELEASE:
3214           return !!SDL_UnlockMutex(*mtx);
3215       case AV_LOCK_DESTROY:
3216           SDL_DestroyMutex(*mtx);
3217           return 0;
3218    }
3219    return 1;
3220 }
3221
3222 /* Called from the main */
3223 int main(int argc, char **argv)
3224 {
3225     int flags;
3226     VideoState *is;
3227     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3228
3229     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3230     parse_loglevel(argc, argv, options);
3231
3232     /* register all codecs, demux and protocols */
3233     avcodec_register_all();
3234 #if CONFIG_AVDEVICE
3235     avdevice_register_all();
3236 #endif
3237 #if CONFIG_AVFILTER
3238     avfilter_register_all();
3239 #endif
3240     av_register_all();
3241     avformat_network_init();
3242
3243     init_opts();
3244
3245     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3246     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3247
3248     show_banner(argc, argv, options);
3249
3250     parse_options(NULL, argc, argv, options, opt_input_file);
3251
3252     if (!input_filename) {
3253         show_usage();
3254         fprintf(stderr, "An input file must be specified\n");
3255         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3256         exit(1);
3257     }
3258
3259     if (display_disable) {
3260         video_disable = 1;
3261     }
3262     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3263     if (audio_disable)
3264         flags &= ~SDL_INIT_AUDIO;
3265     if (display_disable)
3266         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3267 #if !defined(__MINGW32__) && !defined(__APPLE__)
3268     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3269 #endif
3270     if (SDL_Init (flags)) {
3271         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3272         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3273         exit(1);
3274     }
3275
3276     if (!display_disable) {
3277 #if HAVE_SDL_VIDEO_SIZE
3278         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3279         fs_screen_width = vi->current_w;
3280         fs_screen_height = vi->current_h;
3281 #endif
3282     }
3283
3284     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3285     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3286     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3287
3288     if (av_lockmgr_register(lockmgr)) {
3289         fprintf(stderr, "Could not initialize lock manager!\n");
3290         do_exit(NULL);
3291     }
3292
3293     av_init_packet(&flush_pkt);
3294     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3295
3296     is = stream_open(input_filename, file_iformat);
3297     if (!is) {
3298         fprintf(stderr, "Failed to initialize VideoState!\n");
3299         do_exit(NULL);
3300     }
3301
3302     event_loop(is);
3303
3304     /* never returns */
3305
3306     return 0;
3307 }