]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge commit 'ceb754d041f5f6327fd9195a5f43575af9516daa'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct MyAVPacketList {
91     AVPacket pkt;
92     struct MyAVPacketList *next;
93     int serial;
94 } MyAVPacketList;
95
96 typedef struct PacketQueue {
97     MyAVPacketList *first_pkt, *last_pkt;
98     int nb_packets;
99     int size;
100     int abort_request;
101     int serial;
102     SDL_mutex *mutex;
103     SDL_cond *cond;
104 } PacketQueue;
105
106 #define VIDEO_PICTURE_QUEUE_SIZE 4
107 #define SUBPICTURE_QUEUE_SIZE 4
108
109 typedef struct VideoPicture {
110     double pts;             // presentation timestamp for this picture
111     int64_t pos;            // byte position in file
112     int skip;
113     SDL_Overlay *bmp;
114     int width, height; /* source height & width */
115     AVRational sample_aspect_ratio;
116     int allocated;
117     int reallocate;
118     int serial;
119
120 #if CONFIG_AVFILTER
121     AVFilterBufferRef *picref;
122 #endif
123 } VideoPicture;
124
125 typedef struct SubPicture {
126     double pts; /* presentation time stamp for this picture */
127     AVSubtitle sub;
128 } SubPicture;
129
130 typedef struct AudioParams {
131     int freq;
132     int channels;
133     int channel_layout;
134     enum AVSampleFormat fmt;
135 } AudioParams;
136
137 enum {
138     AV_SYNC_AUDIO_MASTER, /* default choice */
139     AV_SYNC_VIDEO_MASTER,
140     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
141 };
142
143 typedef struct VideoState {
144     SDL_Thread *read_tid;
145     SDL_Thread *video_tid;
146     SDL_Thread *refresh_tid;
147     AVInputFormat *iformat;
148     int no_background;
149     int abort_request;
150     int force_refresh;
151     int paused;
152     int last_paused;
153     int que_attachments_req;
154     int seek_req;
155     int seek_flags;
156     int64_t seek_pos;
157     int64_t seek_rel;
158     int read_pause_return;
159     AVFormatContext *ic;
160
161     int audio_stream;
162
163     int av_sync_type;
164     double external_clock;                   ///< external clock base
165     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
166     int64_t external_clock_time;             ///< last reference time
167
168     double audio_clock;
169     double audio_diff_cum; /* used for AV difference average computation */
170     double audio_diff_avg_coef;
171     double audio_diff_threshold;
172     int audio_diff_avg_count;
173     AVStream *audio_st;
174     PacketQueue audioq;
175     int audio_hw_buf_size;
176     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
177     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
178     uint8_t *audio_buf;
179     uint8_t *audio_buf1;
180     unsigned int audio_buf_size; /* in bytes */
181     int audio_buf_index; /* in bytes */
182     int audio_write_buf_size;
183     AVPacket audio_pkt_temp;
184     AVPacket audio_pkt;
185     int audio_pkt_temp_serial;
186     struct AudioParams audio_src;
187     struct AudioParams audio_tgt;
188     struct SwrContext *swr_ctx;
189     double audio_current_pts;
190     double audio_current_pts_drift;
191     int frame_drops_early;
192     int frame_drops_late;
193     AVFrame *frame;
194
195     enum ShowMode {
196         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
197     } show_mode;
198     int16_t sample_array[SAMPLE_ARRAY_SIZE];
199     int sample_array_index;
200     int last_i_start;
201     RDFTContext *rdft;
202     int rdft_bits;
203     FFTSample *rdft_data;
204     int xpos;
205
206     SDL_Thread *subtitle_tid;
207     int subtitle_stream;
208     int subtitle_stream_changed;
209     AVStream *subtitle_st;
210     PacketQueue subtitleq;
211     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
212     int subpq_size, subpq_rindex, subpq_windex;
213     SDL_mutex *subpq_mutex;
214     SDL_cond *subpq_cond;
215
216     double frame_timer;
217     double frame_last_pts;
218     double frame_last_duration;
219     double frame_last_dropped_pts;
220     double frame_last_returned_time;
221     double frame_last_filter_delay;
222     int64_t frame_last_dropped_pos;
223     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
224     int video_stream;
225     AVStream *video_st;
226     PacketQueue videoq;
227     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
228     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
229     int64_t video_current_pos;      // current displayed file pos
230     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
231     int pictq_size, pictq_rindex, pictq_windex;
232     SDL_mutex *pictq_mutex;
233     SDL_cond *pictq_cond;
234 #if !CONFIG_AVFILTER
235     struct SwsContext *img_convert_ctx;
236 #endif
237
238     char filename[1024];
239     int width, height, xleft, ytop;
240     int step;
241
242 #if CONFIG_AVFILTER
243     AVFilterContext *in_video_filter;   // the first filter in the video chain
244     AVFilterContext *out_video_filter;  // the last filter in the video chain
245     int use_dr1;
246     FrameBuffer *buffer_pool;
247 #endif
248
249     int refresh;
250     int last_video_stream, last_audio_stream, last_subtitle_stream;
251
252     SDL_cond *continue_read_thread;
253 } VideoState;
254
255 /* options specified by the user */
256 static AVInputFormat *file_iformat;
257 static const char *input_filename;
258 static const char *window_title;
259 static int fs_screen_width;
260 static int fs_screen_height;
261 static int screen_width  = 0;
262 static int screen_height = 0;
263 static int audio_disable;
264 static int video_disable;
265 static int wanted_stream[AVMEDIA_TYPE_NB] = {
266     [AVMEDIA_TYPE_AUDIO]    = -1,
267     [AVMEDIA_TYPE_VIDEO]    = -1,
268     [AVMEDIA_TYPE_SUBTITLE] = -1,
269 };
270 static int seek_by_bytes = -1;
271 static int display_disable;
272 static int show_status = 1;
273 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
274 static int64_t start_time = AV_NOPTS_VALUE;
275 static int64_t duration = AV_NOPTS_VALUE;
276 static int workaround_bugs = 1;
277 static int fast = 0;
278 static int genpts = 0;
279 static int lowres = 0;
280 static int idct = FF_IDCT_AUTO;
281 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
282 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
283 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
284 static int error_concealment = 3;
285 static int decoder_reorder_pts = -1;
286 static int autoexit;
287 static int exit_on_keydown;
288 static int exit_on_mousedown;
289 static int loop = 1;
290 static int framedrop = -1;
291 static int infinite_buffer = -1;
292 static enum ShowMode show_mode = SHOW_MODE_NONE;
293 static const char *audio_codec_name;
294 static const char *subtitle_codec_name;
295 static const char *video_codec_name;
296 static int rdftspeed = 20;
297 #if CONFIG_AVFILTER
298 static char *vfilters = NULL;
299 #endif
300
301 /* current context */
302 static int is_full_screen;
303 static int64_t audio_callback_time;
304
305 static AVPacket flush_pkt;
306
307 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
308 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
309 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
310
311 static SDL_Surface *screen;
312
313 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
314
315 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
316 {
317     MyAVPacketList *pkt1;
318
319     if (q->abort_request)
320        return -1;
321
322     pkt1 = av_malloc(sizeof(MyAVPacketList));
323     if (!pkt1)
324         return -1;
325     pkt1->pkt = *pkt;
326     pkt1->next = NULL;
327     if (pkt == &flush_pkt)
328         q->serial++;
329     pkt1->serial = q->serial;
330
331     if (!q->last_pkt)
332         q->first_pkt = pkt1;
333     else
334         q->last_pkt->next = pkt1;
335     q->last_pkt = pkt1;
336     q->nb_packets++;
337     q->size += pkt1->pkt.size + sizeof(*pkt1);
338     /* XXX: should duplicate packet data in DV case */
339     SDL_CondSignal(q->cond);
340     return 0;
341 }
342
343 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
344 {
345     int ret;
346
347     /* duplicate the packet */
348     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
349         return -1;
350
351     SDL_LockMutex(q->mutex);
352     ret = packet_queue_put_private(q, pkt);
353     SDL_UnlockMutex(q->mutex);
354
355     if (pkt != &flush_pkt && ret < 0)
356         av_free_packet(pkt);
357
358     return ret;
359 }
360
361 /* packet queue handling */
362 static void packet_queue_init(PacketQueue *q)
363 {
364     memset(q, 0, sizeof(PacketQueue));
365     q->mutex = SDL_CreateMutex();
366     q->cond = SDL_CreateCond();
367     q->abort_request = 1;
368 }
369
370 static void packet_queue_flush(PacketQueue *q)
371 {
372     MyAVPacketList *pkt, *pkt1;
373
374     SDL_LockMutex(q->mutex);
375     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
376         pkt1 = pkt->next;
377         av_free_packet(&pkt->pkt);
378         av_freep(&pkt);
379     }
380     q->last_pkt = NULL;
381     q->first_pkt = NULL;
382     q->nb_packets = 0;
383     q->size = 0;
384     SDL_UnlockMutex(q->mutex);
385 }
386
387 static void packet_queue_destroy(PacketQueue *q)
388 {
389     packet_queue_flush(q);
390     SDL_DestroyMutex(q->mutex);
391     SDL_DestroyCond(q->cond);
392 }
393
394 static void packet_queue_abort(PacketQueue *q)
395 {
396     SDL_LockMutex(q->mutex);
397
398     q->abort_request = 1;
399
400     SDL_CondSignal(q->cond);
401
402     SDL_UnlockMutex(q->mutex);
403 }
404
405 static void packet_queue_start(PacketQueue *q)
406 {
407     SDL_LockMutex(q->mutex);
408     q->abort_request = 0;
409     packet_queue_put_private(q, &flush_pkt);
410     SDL_UnlockMutex(q->mutex);
411 }
412
413 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
414 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
415 {
416     MyAVPacketList *pkt1;
417     int ret;
418
419     SDL_LockMutex(q->mutex);
420
421     for (;;) {
422         if (q->abort_request) {
423             ret = -1;
424             break;
425         }
426
427         pkt1 = q->first_pkt;
428         if (pkt1) {
429             q->first_pkt = pkt1->next;
430             if (!q->first_pkt)
431                 q->last_pkt = NULL;
432             q->nb_packets--;
433             q->size -= pkt1->pkt.size + sizeof(*pkt1);
434             *pkt = pkt1->pkt;
435             if (serial)
436                 *serial = pkt1->serial;
437             av_free(pkt1);
438             ret = 1;
439             break;
440         } else if (!block) {
441             ret = 0;
442             break;
443         } else {
444             SDL_CondWait(q->cond, q->mutex);
445         }
446     }
447     SDL_UnlockMutex(q->mutex);
448     return ret;
449 }
450
451 static inline void fill_rectangle(SDL_Surface *screen,
452                                   int x, int y, int w, int h, int color)
453 {
454     SDL_Rect rect;
455     rect.x = x;
456     rect.y = y;
457     rect.w = w;
458     rect.h = h;
459     SDL_FillRect(screen, &rect, color);
460 }
461
462 #define ALPHA_BLEND(a, oldp, newp, s)\
463 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
464
465 #define RGBA_IN(r, g, b, a, s)\
466 {\
467     unsigned int v = ((const uint32_t *)(s))[0];\
468     a = (v >> 24) & 0xff;\
469     r = (v >> 16) & 0xff;\
470     g = (v >> 8) & 0xff;\
471     b = v & 0xff;\
472 }
473
474 #define YUVA_IN(y, u, v, a, s, pal)\
475 {\
476     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
477     a = (val >> 24) & 0xff;\
478     y = (val >> 16) & 0xff;\
479     u = (val >> 8) & 0xff;\
480     v = val & 0xff;\
481 }
482
483 #define YUVA_OUT(d, y, u, v, a)\
484 {\
485     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
486 }
487
488
489 #define BPP 1
490
491 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
492 {
493     int wrap, wrap3, width2, skip2;
494     int y, u, v, a, u1, v1, a1, w, h;
495     uint8_t *lum, *cb, *cr;
496     const uint8_t *p;
497     const uint32_t *pal;
498     int dstx, dsty, dstw, dsth;
499
500     dstw = av_clip(rect->w, 0, imgw);
501     dsth = av_clip(rect->h, 0, imgh);
502     dstx = av_clip(rect->x, 0, imgw - dstw);
503     dsty = av_clip(rect->y, 0, imgh - dsth);
504     lum = dst->data[0] + dsty * dst->linesize[0];
505     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
506     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
507
508     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
509     skip2 = dstx >> 1;
510     wrap = dst->linesize[0];
511     wrap3 = rect->pict.linesize[0];
512     p = rect->pict.data[0];
513     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
514
515     if (dsty & 1) {
516         lum += dstx;
517         cb += skip2;
518         cr += skip2;
519
520         if (dstx & 1) {
521             YUVA_IN(y, u, v, a, p, pal);
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
524             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
525             cb++;
526             cr++;
527             lum++;
528             p += BPP;
529         }
530         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
531             YUVA_IN(y, u, v, a, p, pal);
532             u1 = u;
533             v1 = v;
534             a1 = a;
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
537             YUVA_IN(y, u, v, a, p + BPP, pal);
538             u1 += u;
539             v1 += v;
540             a1 += a;
541             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
543             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
544             cb++;
545             cr++;
546             p += 2 * BPP;
547             lum += 2;
548         }
549         if (w) {
550             YUVA_IN(y, u, v, a, p, pal);
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
553             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
554             p++;
555             lum++;
556         }
557         p += wrap3 - dstw * BPP;
558         lum += wrap - dstw - dstx;
559         cb += dst->linesize[1] - width2 - skip2;
560         cr += dst->linesize[2] - width2 - skip2;
561     }
562     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
563         lum += dstx;
564         cb += skip2;
565         cr += skip2;
566
567         if (dstx & 1) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 = u;
590             v1 = v;
591             a1 = a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599             p += wrap3;
600             lum += wrap;
601
602             YUVA_IN(y, u, v, a, p, pal);
603             u1 += u;
604             v1 += v;
605             a1 += a;
606             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607
608             YUVA_IN(y, u, v, a, p + BPP, pal);
609             u1 += u;
610             v1 += v;
611             a1 += a;
612             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613
614             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
615             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
616
617             cb++;
618             cr++;
619             p += -wrap3 + 2 * BPP;
620             lum += -wrap + 2;
621         }
622         if (w) {
623             YUVA_IN(y, u, v, a, p, pal);
624             u1 = u;
625             v1 = v;
626             a1 = a;
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628             p += wrap3;
629             lum += wrap;
630             YUVA_IN(y, u, v, a, p, pal);
631             u1 += u;
632             v1 += v;
633             a1 += a;
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
636             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
637             cb++;
638             cr++;
639             p += -wrap3 + BPP;
640             lum += -wrap + 1;
641         }
642         p += wrap3 + (wrap3 - dstw * BPP);
643         lum += wrap + (wrap - dstw - dstx);
644         cb += dst->linesize[1] - width2 - skip2;
645         cr += dst->linesize[2] - width2 - skip2;
646     }
647     /* handle odd height */
648     if (h) {
649         lum += dstx;
650         cb += skip2;
651         cr += skip2;
652
653         if (dstx & 1) {
654             YUVA_IN(y, u, v, a, p, pal);
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
657             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
658             cb++;
659             cr++;
660             lum++;
661             p += BPP;
662         }
663         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
664             YUVA_IN(y, u, v, a, p, pal);
665             u1 = u;
666             v1 = v;
667             a1 = a;
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669
670             YUVA_IN(y, u, v, a, p + BPP, pal);
671             u1 += u;
672             v1 += v;
673             a1 += a;
674             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
675             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
676             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
677             cb++;
678             cr++;
679             p += 2 * BPP;
680             lum += 2;
681         }
682         if (w) {
683             YUVA_IN(y, u, v, a, p, pal);
684             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
685             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
686             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
687         }
688     }
689 }
690
691 static void free_subpicture(SubPicture *sp)
692 {
693     avsubtitle_free(&sp->sub);
694 }
695
696 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
697 {
698     float aspect_ratio;
699     int width, height, x, y;
700
701     if (vp->sample_aspect_ratio.num == 0)
702         aspect_ratio = 0;
703     else
704         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
705
706     if (aspect_ratio <= 0.0)
707         aspect_ratio = 1.0;
708     aspect_ratio *= (float)vp->width / (float)vp->height;
709
710     /* XXX: we suppose the screen has a 1.0 pixel ratio */
711     height = scr_height;
712     width = ((int)rint(height * aspect_ratio)) & ~1;
713     if (width > scr_width) {
714         width = scr_width;
715         height = ((int)rint(width / aspect_ratio)) & ~1;
716     }
717     x = (scr_width - width) / 2;
718     y = (scr_height - height) / 2;
719     rect->x = scr_xleft + x;
720     rect->y = scr_ytop  + y;
721     rect->w = FFMAX(width,  1);
722     rect->h = FFMAX(height, 1);
723 }
724
725 static void video_image_display(VideoState *is)
726 {
727     VideoPicture *vp;
728     SubPicture *sp;
729     AVPicture pict;
730     SDL_Rect rect;
731     int i;
732
733     vp = &is->pictq[is->pictq_rindex];
734     if (vp->bmp) {
735         if (is->subtitle_st) {
736             if (is->subpq_size > 0) {
737                 sp = &is->subpq[is->subpq_rindex];
738
739                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
740                     SDL_LockYUVOverlay (vp->bmp);
741
742                     pict.data[0] = vp->bmp->pixels[0];
743                     pict.data[1] = vp->bmp->pixels[2];
744                     pict.data[2] = vp->bmp->pixels[1];
745
746                     pict.linesize[0] = vp->bmp->pitches[0];
747                     pict.linesize[1] = vp->bmp->pitches[2];
748                     pict.linesize[2] = vp->bmp->pitches[1];
749
750                     for (i = 0; i < sp->sub.num_rects; i++)
751                         blend_subrect(&pict, sp->sub.rects[i],
752                                       vp->bmp->w, vp->bmp->h);
753
754                     SDL_UnlockYUVOverlay (vp->bmp);
755                 }
756             }
757         }
758
759         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
760
761         SDL_DisplayYUVOverlay(vp->bmp, &rect);
762     }
763 }
764
765 static inline int compute_mod(int a, int b)
766 {
767     return a < 0 ? a%b + b : a%b;
768 }
769
770 static void video_audio_display(VideoState *s)
771 {
772     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
773     int ch, channels, h, h2, bgcolor, fgcolor;
774     int16_t time_diff;
775     int rdft_bits, nb_freq;
776
777     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
778         ;
779     nb_freq = 1 << (rdft_bits - 1);
780
781     /* compute display index : center on currently output samples */
782     channels = s->audio_tgt.channels;
783     nb_display_channels = channels;
784     if (!s->paused) {
785         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
786         n = 2 * channels;
787         delay = s->audio_write_buf_size;
788         delay /= n;
789
790         /* to be more precise, we take into account the time spent since
791            the last buffer computation */
792         if (audio_callback_time) {
793             time_diff = av_gettime() - audio_callback_time;
794             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
795         }
796
797         delay += 2 * data_used;
798         if (delay < data_used)
799             delay = data_used;
800
801         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
802         if (s->show_mode == SHOW_MODE_WAVES) {
803             h = INT_MIN;
804             for (i = 0; i < 1000; i += channels) {
805                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
806                 int a = s->sample_array[idx];
807                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
808                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
809                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
810                 int score = a - d;
811                 if (h < score && (b ^ c) < 0) {
812                     h = score;
813                     i_start = idx;
814                 }
815             }
816         }
817
818         s->last_i_start = i_start;
819     } else {
820         i_start = s->last_i_start;
821     }
822
823     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
824     if (s->show_mode == SHOW_MODE_WAVES) {
825         fill_rectangle(screen,
826                        s->xleft, s->ytop, s->width, s->height,
827                        bgcolor);
828
829         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
830
831         /* total height for one channel */
832         h = s->height / nb_display_channels;
833         /* graph height / 2 */
834         h2 = (h * 9) / 20;
835         for (ch = 0; ch < nb_display_channels; ch++) {
836             i = i_start + ch;
837             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
838             for (x = 0; x < s->width; x++) {
839                 y = (s->sample_array[i] * h2) >> 15;
840                 if (y < 0) {
841                     y = -y;
842                     ys = y1 - y;
843                 } else {
844                     ys = y1;
845                 }
846                 fill_rectangle(screen,
847                                s->xleft + x, ys, 1, y,
848                                fgcolor);
849                 i += channels;
850                 if (i >= SAMPLE_ARRAY_SIZE)
851                     i -= SAMPLE_ARRAY_SIZE;
852             }
853         }
854
855         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
856
857         for (ch = 1; ch < nb_display_channels; ch++) {
858             y = s->ytop + ch * h;
859             fill_rectangle(screen,
860                            s->xleft, y, s->width, 1,
861                            fgcolor);
862         }
863         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
864     } else {
865         nb_display_channels= FFMIN(nb_display_channels, 2);
866         if (rdft_bits != s->rdft_bits) {
867             av_rdft_end(s->rdft);
868             av_free(s->rdft_data);
869             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
870             s->rdft_bits = rdft_bits;
871             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
872         }
873         {
874             FFTSample *data[2];
875             for (ch = 0; ch < nb_display_channels; ch++) {
876                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
877                 i = i_start + ch;
878                 for (x = 0; x < 2 * nb_freq; x++) {
879                     double w = (x-nb_freq) * (1.0 / nb_freq);
880                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
881                     i += channels;
882                     if (i >= SAMPLE_ARRAY_SIZE)
883                         i -= SAMPLE_ARRAY_SIZE;
884                 }
885                 av_rdft_calc(s->rdft, data[ch]);
886             }
887             // least efficient way to do this, we should of course directly access it but its more than fast enough
888             for (y = 0; y < s->height; y++) {
889                 double w = 1 / sqrt(nb_freq);
890                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
891                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
892                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
893                 a = FFMIN(a, 255);
894                 b = FFMIN(b, 255);
895                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
896
897                 fill_rectangle(screen,
898                             s->xpos, s->height-y, 1, 1,
899                             fgcolor);
900             }
901         }
902         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
903         if (!s->paused)
904             s->xpos++;
905         if (s->xpos >= s->width)
906             s->xpos= s->xleft;
907     }
908 }
909
910 static void stream_close(VideoState *is)
911 {
912     VideoPicture *vp;
913     int i;
914     /* XXX: use a special url_shutdown call to abort parse cleanly */
915     is->abort_request = 1;
916     SDL_WaitThread(is->read_tid, NULL);
917     SDL_WaitThread(is->refresh_tid, NULL);
918     packet_queue_destroy(&is->videoq);
919     packet_queue_destroy(&is->audioq);
920     packet_queue_destroy(&is->subtitleq);
921
922     /* free all pictures */
923     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
924         vp = &is->pictq[i];
925 #if CONFIG_AVFILTER
926         avfilter_unref_bufferp(&vp->picref);
927 #endif
928         if (vp->bmp) {
929             SDL_FreeYUVOverlay(vp->bmp);
930             vp->bmp = NULL;
931         }
932     }
933     SDL_DestroyMutex(is->pictq_mutex);
934     SDL_DestroyCond(is->pictq_cond);
935     SDL_DestroyMutex(is->subpq_mutex);
936     SDL_DestroyCond(is->subpq_cond);
937     SDL_DestroyCond(is->continue_read_thread);
938 #if !CONFIG_AVFILTER
939     if (is->img_convert_ctx)
940         sws_freeContext(is->img_convert_ctx);
941 #endif
942     av_free(is);
943 }
944
945 static void do_exit(VideoState *is)
946 {
947     if (is) {
948         stream_close(is);
949     }
950     av_lockmgr_register(NULL);
951     uninit_opts();
952 #if CONFIG_AVFILTER
953     avfilter_uninit();
954     av_freep(&vfilters);
955 #endif
956     avformat_network_deinit();
957     if (show_status)
958         printf("\n");
959     SDL_Quit();
960     av_log(NULL, AV_LOG_QUIET, "%s", "");
961     exit(0);
962 }
963
964 static void sigterm_handler(int sig)
965 {
966     exit(123);
967 }
968
969 static int video_open(VideoState *is, int force_set_video_mode)
970 {
971     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
972     int w,h;
973     VideoPicture *vp = &is->pictq[is->pictq_rindex];
974     SDL_Rect rect;
975
976     if (is_full_screen) flags |= SDL_FULLSCREEN;
977     else                flags |= SDL_RESIZABLE;
978
979     if (is_full_screen && fs_screen_width) {
980         w = fs_screen_width;
981         h = fs_screen_height;
982     } else if (!is_full_screen && screen_width) {
983         w = screen_width;
984         h = screen_height;
985     } else if (vp->width) {
986         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
987         w = rect.w;
988         h = rect.h;
989     } else {
990         w = 640;
991         h = 480;
992     }
993     if (screen && is->width == screen->w && screen->w == w
994        && is->height== screen->h && screen->h == h && !force_set_video_mode)
995         return 0;
996     screen = SDL_SetVideoMode(w, h, 0, flags);
997     if (!screen) {
998         fprintf(stderr, "SDL: could not set video mode - exiting\n");
999         do_exit(is);
1000     }
1001     if (!window_title)
1002         window_title = input_filename;
1003     SDL_WM_SetCaption(window_title, window_title);
1004
1005     is->width  = screen->w;
1006     is->height = screen->h;
1007
1008     return 0;
1009 }
1010
1011 /* display the current picture, if any */
1012 static void video_display(VideoState *is)
1013 {
1014     if (!screen)
1015         video_open(is, 0);
1016     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1017         video_audio_display(is);
1018     else if (is->video_st)
1019         video_image_display(is);
1020 }
1021
1022 static int refresh_thread(void *opaque)
1023 {
1024     VideoState *is= opaque;
1025     while (!is->abort_request) {
1026         SDL_Event event;
1027         event.type = FF_REFRESH_EVENT;
1028         event.user.data1 = opaque;
1029         if (!is->refresh && (!is->paused || is->force_refresh)) {
1030             is->refresh = 1;
1031             SDL_PushEvent(&event);
1032         }
1033         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1034         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1035     }
1036     return 0;
1037 }
1038
1039 /* get the current audio clock value */
1040 static double get_audio_clock(VideoState *is)
1041 {
1042     if (is->paused) {
1043         return is->audio_current_pts;
1044     } else {
1045         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1046     }
1047 }
1048
1049 /* get the current video clock value */
1050 static double get_video_clock(VideoState *is)
1051 {
1052     if (is->paused) {
1053         return is->video_current_pts;
1054     } else {
1055         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1056     }
1057 }
1058
1059 /* get the current external clock value */
1060 static double get_external_clock(VideoState *is)
1061 {
1062     if (is->paused) {
1063         return is->external_clock;
1064     } else {
1065         return is->external_clock_drift + av_gettime() / 1000000.0;
1066     }
1067 }
1068
1069 static int get_master_sync_type(VideoState *is) {
1070     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1071         if (is->video_st)
1072             return AV_SYNC_VIDEO_MASTER;
1073         else
1074             return AV_SYNC_AUDIO_MASTER;
1075     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1076         if (is->audio_st)
1077             return AV_SYNC_AUDIO_MASTER;
1078         else
1079             return AV_SYNC_EXTERNAL_CLOCK;
1080     } else {
1081         return AV_SYNC_EXTERNAL_CLOCK;
1082     }
1083 }
1084
1085 /* get the current master clock value */
1086 static double get_master_clock(VideoState *is)
1087 {
1088     double val;
1089
1090     switch (get_master_sync_type(is)) {
1091         case AV_SYNC_VIDEO_MASTER:
1092             val = get_video_clock(is);
1093             break;
1094         case AV_SYNC_AUDIO_MASTER:
1095             val = get_audio_clock(is);
1096             break;
1097         default:
1098             val = get_external_clock(is);
1099             break;
1100     }
1101     return val;
1102 }
1103
1104 static void update_external_clock_pts(VideoState *is, double pts)
1105 {
1106    is->external_clock_time = av_gettime();
1107    is->external_clock = pts;
1108    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1109 }
1110
1111 static void check_external_clock_sync(VideoState *is, double pts) {
1112     if (fabs(get_external_clock(is) - pts) > AV_NOSYNC_THRESHOLD) {
1113         update_external_clock_pts(is, pts);
1114     }
1115 }
1116
1117 /* seek in the stream */
1118 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1119 {
1120     if (!is->seek_req) {
1121         is->seek_pos = pos;
1122         is->seek_rel = rel;
1123         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1124         if (seek_by_bytes)
1125             is->seek_flags |= AVSEEK_FLAG_BYTE;
1126         is->seek_req = 1;
1127     }
1128 }
1129
1130 /* pause or resume the video */
1131 static void stream_toggle_pause(VideoState *is)
1132 {
1133     if (is->paused) {
1134         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1135         if (is->read_pause_return != AVERROR(ENOSYS)) {
1136             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1137         }
1138         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1139     }
1140     update_external_clock_pts(is, get_external_clock(is));
1141     is->paused = !is->paused;
1142 }
1143
1144 static double compute_target_delay(double delay, VideoState *is)
1145 {
1146     double sync_threshold, diff;
1147
1148     /* update delay to follow master synchronisation source */
1149     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1150         /* if video is slave, we try to correct big delays by
1151            duplicating or deleting a frame */
1152         diff = get_video_clock(is) - get_master_clock(is);
1153
1154         /* skip or repeat frame. We take into account the
1155            delay to compute the threshold. I still don't know
1156            if it is the best guess */
1157         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1158         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1159             if (diff <= -sync_threshold)
1160                 delay = 0;
1161             else if (diff >= sync_threshold)
1162                 delay = 2 * delay;
1163         }
1164     }
1165
1166     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1167             delay, -diff);
1168
1169     return delay;
1170 }
1171
1172 static void pictq_next_picture(VideoState *is) {
1173     /* update queue size and signal for next picture */
1174     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1175         is->pictq_rindex = 0;
1176
1177     SDL_LockMutex(is->pictq_mutex);
1178     is->pictq_size--;
1179     SDL_CondSignal(is->pictq_cond);
1180     SDL_UnlockMutex(is->pictq_mutex);
1181 }
1182
1183 static void pictq_prev_picture(VideoState *is) {
1184     VideoPicture *prevvp;
1185     /* update queue size and signal for the previous picture */
1186     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1187     if (prevvp->allocated && !prevvp->skip) {
1188         SDL_LockMutex(is->pictq_mutex);
1189         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1190             if (--is->pictq_rindex == -1)
1191                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1192             is->pictq_size++;
1193         }
1194         SDL_CondSignal(is->pictq_cond);
1195         SDL_UnlockMutex(is->pictq_mutex);
1196     }
1197 }
1198
1199 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1200     double time = av_gettime() / 1000000.0;
1201     /* update current video pts */
1202     is->video_current_pts = pts;
1203     is->video_current_pts_drift = is->video_current_pts - time;
1204     is->video_current_pos = pos;
1205     is->frame_last_pts = pts;
1206     if (is->videoq.serial == serial)
1207         check_external_clock_sync(is, is->video_current_pts);
1208 }
1209
1210 /* called to display each frame */
1211 static void video_refresh(void *opaque)
1212 {
1213     VideoState *is = opaque;
1214     VideoPicture *vp;
1215     double time;
1216
1217     SubPicture *sp, *sp2;
1218
1219     if (is->video_st) {
1220         if (is->force_refresh)
1221             pictq_prev_picture(is);
1222 retry:
1223         if (is->pictq_size == 0) {
1224             SDL_LockMutex(is->pictq_mutex);
1225             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1226                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, 0);
1227                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1228             }
1229             SDL_UnlockMutex(is->pictq_mutex);
1230             // nothing to do, no picture to display in the que
1231         } else {
1232             double last_duration, duration, delay;
1233             /* dequeue the picture */
1234             vp = &is->pictq[is->pictq_rindex];
1235
1236             if (vp->skip) {
1237                 pictq_next_picture(is);
1238                 goto retry;
1239             }
1240
1241             if (is->paused)
1242                 goto display;
1243
1244             /* compute nominal last_duration */
1245             last_duration = vp->pts - is->frame_last_pts;
1246             if (last_duration > 0 && last_duration < 10.0) {
1247                 /* if duration of the last frame was sane, update last_duration in video state */
1248                 is->frame_last_duration = last_duration;
1249             }
1250             delay = compute_target_delay(is->frame_last_duration, is);
1251
1252             time= av_gettime()/1000000.0;
1253             if (time < is->frame_timer + delay)
1254                 return;
1255
1256             if (delay > 0)
1257                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1258
1259             SDL_LockMutex(is->pictq_mutex);
1260             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1261             SDL_UnlockMutex(is->pictq_mutex);
1262
1263             if (is->pictq_size > 1) {
1264                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1265                 duration = nextvp->pts - vp->pts;
1266                 if((framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1267                     is->frame_drops_late++;
1268                     pictq_next_picture(is);
1269                     goto retry;
1270                 }
1271             }
1272
1273             if (is->subtitle_st) {
1274                 if (is->subtitle_stream_changed) {
1275                     SDL_LockMutex(is->subpq_mutex);
1276
1277                     while (is->subpq_size) {
1278                         free_subpicture(&is->subpq[is->subpq_rindex]);
1279
1280                         /* update queue size and signal for next picture */
1281                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1282                             is->subpq_rindex = 0;
1283
1284                         is->subpq_size--;
1285                     }
1286                     is->subtitle_stream_changed = 0;
1287
1288                     SDL_CondSignal(is->subpq_cond);
1289                     SDL_UnlockMutex(is->subpq_mutex);
1290                 } else {
1291                     if (is->subpq_size > 0) {
1292                         sp = &is->subpq[is->subpq_rindex];
1293
1294                         if (is->subpq_size > 1)
1295                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1296                         else
1297                             sp2 = NULL;
1298
1299                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1300                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1301                         {
1302                             free_subpicture(sp);
1303
1304                             /* update queue size and signal for next picture */
1305                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1306                                 is->subpq_rindex = 0;
1307
1308                             SDL_LockMutex(is->subpq_mutex);
1309                             is->subpq_size--;
1310                             SDL_CondSignal(is->subpq_cond);
1311                             SDL_UnlockMutex(is->subpq_mutex);
1312                         }
1313                     }
1314                 }
1315             }
1316
1317 display:
1318             /* display picture */
1319             if (!display_disable)
1320                 video_display(is);
1321
1322             pictq_next_picture(is);
1323         }
1324     } else if (is->audio_st) {
1325         /* draw the next audio frame */
1326
1327         /* if only audio stream, then display the audio bars (better
1328            than nothing, just to test the implementation */
1329
1330         /* display picture */
1331         if (!display_disable)
1332             video_display(is);
1333     }
1334     is->force_refresh = 0;
1335     if (show_status) {
1336         static int64_t last_time;
1337         int64_t cur_time;
1338         int aqsize, vqsize, sqsize;
1339         double av_diff;
1340
1341         cur_time = av_gettime();
1342         if (!last_time || (cur_time - last_time) >= 30000) {
1343             aqsize = 0;
1344             vqsize = 0;
1345             sqsize = 0;
1346             if (is->audio_st)
1347                 aqsize = is->audioq.size;
1348             if (is->video_st)
1349                 vqsize = is->videoq.size;
1350             if (is->subtitle_st)
1351                 sqsize = is->subtitleq.size;
1352             av_diff = 0;
1353             if (is->audio_st && is->video_st)
1354                 av_diff = get_audio_clock(is) - get_video_clock(is);
1355             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1356                    get_master_clock(is),
1357                    av_diff,
1358                    is->frame_drops_early + is->frame_drops_late,
1359                    aqsize / 1024,
1360                    vqsize / 1024,
1361                    sqsize,
1362                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1363                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1364             fflush(stdout);
1365             last_time = cur_time;
1366         }
1367     }
1368 }
1369
1370 /* allocate a picture (needs to do that in main thread to avoid
1371    potential locking problems */
1372 static void alloc_picture(VideoState *is)
1373 {
1374     VideoPicture *vp;
1375
1376     vp = &is->pictq[is->pictq_windex];
1377
1378     if (vp->bmp)
1379         SDL_FreeYUVOverlay(vp->bmp);
1380
1381 #if CONFIG_AVFILTER
1382     avfilter_unref_bufferp(&vp->picref);
1383 #endif
1384
1385     video_open(is, 0);
1386
1387     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1388                                    SDL_YV12_OVERLAY,
1389                                    screen);
1390     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1391         /* SDL allocates a buffer smaller than requested if the video
1392          * overlay hardware is unable to support the requested size. */
1393         fprintf(stderr, "Error: the video system does not support an image\n"
1394                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1395                         "to reduce the image size.\n", vp->width, vp->height );
1396         do_exit(is);
1397     }
1398
1399     SDL_LockMutex(is->pictq_mutex);
1400     vp->allocated = 1;
1401     SDL_CondSignal(is->pictq_cond);
1402     SDL_UnlockMutex(is->pictq_mutex);
1403 }
1404
1405 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
1406 {
1407     VideoPicture *vp;
1408     double frame_delay, pts = pts1;
1409
1410     /* compute the exact PTS for the picture if it is omitted in the stream
1411      * pts1 is the dts of the pkt / pts of the frame */
1412     if (pts != 0) {
1413         /* update video clock with pts, if present */
1414         is->video_clock = pts;
1415     } else {
1416         pts = is->video_clock;
1417     }
1418     /* update video clock for next frame */
1419     frame_delay = av_q2d(is->video_st->codec->time_base);
1420     /* for MPEG2, the frame can be repeated, so we update the
1421        clock accordingly */
1422     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1423     is->video_clock += frame_delay;
1424
1425 #if defined(DEBUG_SYNC) && 0
1426     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1427            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1428 #endif
1429
1430     /* wait until we have space to put a new picture */
1431     SDL_LockMutex(is->pictq_mutex);
1432
1433     /* keep the last already displayed picture in the queue */
1434     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1435            !is->videoq.abort_request) {
1436         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1437     }
1438     SDL_UnlockMutex(is->pictq_mutex);
1439
1440     if (is->videoq.abort_request)
1441         return -1;
1442
1443     vp = &is->pictq[is->pictq_windex];
1444
1445 #if CONFIG_AVFILTER
1446     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1447 #else
1448     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1449 #endif
1450
1451     /* alloc or resize hardware picture buffer */
1452     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1453         vp->width  != src_frame->width ||
1454         vp->height != src_frame->height) {
1455         SDL_Event event;
1456
1457         vp->allocated  = 0;
1458         vp->reallocate = 0;
1459         vp->width = src_frame->width;
1460         vp->height = src_frame->height;
1461
1462         /* the allocation must be done in the main thread to avoid
1463            locking problems. */
1464         event.type = FF_ALLOC_EVENT;
1465         event.user.data1 = is;
1466         SDL_PushEvent(&event);
1467
1468         /* wait until the picture is allocated */
1469         SDL_LockMutex(is->pictq_mutex);
1470         while (!vp->allocated && !is->videoq.abort_request) {
1471             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1472         }
1473         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1474         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1475             while (!vp->allocated) {
1476                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1477             }
1478         }
1479         SDL_UnlockMutex(is->pictq_mutex);
1480
1481         if (is->videoq.abort_request)
1482             return -1;
1483     }
1484
1485     /* if the frame is not skipped, then display it */
1486     if (vp->bmp) {
1487         AVPicture pict = { { 0 } };
1488 #if CONFIG_AVFILTER
1489         avfilter_unref_bufferp(&vp->picref);
1490         vp->picref = src_frame->opaque;
1491 #endif
1492
1493         /* get a pointer on the bitmap */
1494         SDL_LockYUVOverlay (vp->bmp);
1495
1496         pict.data[0] = vp->bmp->pixels[0];
1497         pict.data[1] = vp->bmp->pixels[2];
1498         pict.data[2] = vp->bmp->pixels[1];
1499
1500         pict.linesize[0] = vp->bmp->pitches[0];
1501         pict.linesize[1] = vp->bmp->pitches[2];
1502         pict.linesize[2] = vp->bmp->pitches[1];
1503
1504 #if CONFIG_AVFILTER
1505         // FIXME use direct rendering
1506         av_picture_copy(&pict, (AVPicture *)src_frame,
1507                         src_frame->format, vp->width, vp->height);
1508 #else
1509         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1510         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1511             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1512             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1513         if (is->img_convert_ctx == NULL) {
1514             fprintf(stderr, "Cannot initialize the conversion context\n");
1515             exit(1);
1516         }
1517         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1518                   0, vp->height, pict.data, pict.linesize);
1519 #endif
1520         /* update the bitmap content */
1521         SDL_UnlockYUVOverlay(vp->bmp);
1522
1523         vp->pts = pts;
1524         vp->pos = pos;
1525         vp->skip = 0;
1526         vp->serial = serial;
1527
1528         /* now we can update the picture count */
1529         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1530             is->pictq_windex = 0;
1531         SDL_LockMutex(is->pictq_mutex);
1532         is->pictq_size++;
1533         SDL_UnlockMutex(is->pictq_mutex);
1534     }
1535     return 0;
1536 }
1537
1538 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1539 {
1540     int got_picture, i;
1541
1542     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1543         return -1;
1544
1545     if (pkt->data == flush_pkt.data) {
1546         avcodec_flush_buffers(is->video_st->codec);
1547
1548         SDL_LockMutex(is->pictq_mutex);
1549         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1550         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1551             is->pictq[i].skip = 1;
1552         }
1553         while (is->pictq_size && !is->videoq.abort_request) {
1554             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1555         }
1556         is->video_current_pos = -1;
1557         is->frame_last_pts = AV_NOPTS_VALUE;
1558         is->frame_last_duration = 0;
1559         is->frame_timer = (double)av_gettime() / 1000000.0;
1560         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1561         SDL_UnlockMutex(is->pictq_mutex);
1562
1563         return 0;
1564     }
1565
1566     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1567         return 0;
1568
1569     if (got_picture) {
1570         int ret = 1;
1571
1572         if (decoder_reorder_pts == -1) {
1573             *pts = av_frame_get_best_effort_timestamp(frame);
1574         } else if (decoder_reorder_pts) {
1575             *pts = frame->pkt_pts;
1576         } else {
1577             *pts = frame->pkt_dts;
1578         }
1579
1580         if (*pts == AV_NOPTS_VALUE) {
1581             *pts = 0;
1582         }
1583
1584         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1585             SDL_LockMutex(is->pictq_mutex);
1586             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1587                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1588                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1589                 double ptsdiff = dpts - is->frame_last_pts;
1590                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1591                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1592                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1593                     is->frame_last_dropped_pos = pkt->pos;
1594                     is->frame_last_dropped_pts = dpts;
1595                     is->frame_drops_early++;
1596                     ret = 0;
1597                 }
1598             }
1599             SDL_UnlockMutex(is->pictq_mutex);
1600         }
1601
1602         return ret;
1603     }
1604     return 0;
1605 }
1606
1607 #if CONFIG_AVFILTER
1608 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1609                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1610 {
1611     int ret;
1612     AVFilterInOut *outputs = NULL, *inputs = NULL;
1613
1614     if (filtergraph) {
1615         outputs = avfilter_inout_alloc();
1616         inputs  = avfilter_inout_alloc();
1617         if (!outputs || !inputs) {
1618             ret = AVERROR(ENOMEM);
1619             goto fail;
1620         }
1621
1622         outputs->name       = av_strdup("in");
1623         outputs->filter_ctx = source_ctx;
1624         outputs->pad_idx    = 0;
1625         outputs->next       = NULL;
1626
1627         inputs->name        = av_strdup("out");
1628         inputs->filter_ctx  = sink_ctx;
1629         inputs->pad_idx     = 0;
1630         inputs->next        = NULL;
1631
1632         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1633             goto fail;
1634     } else {
1635         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1636             goto fail;
1637     }
1638
1639     return avfilter_graph_config(graph, NULL);
1640 fail:
1641     avfilter_inout_free(&outputs);
1642     avfilter_inout_free(&inputs);
1643     return ret;
1644 }
1645
1646 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1647 {
1648     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1649     char sws_flags_str[128];
1650     char buffersrc_args[256];
1651     int ret;
1652     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1653     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format, *filt_crop;
1654     AVCodecContext *codec = is->video_st->codec;
1655
1656     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1657     graph->scale_sws_opts = av_strdup(sws_flags_str);
1658
1659     snprintf(buffersrc_args, sizeof(buffersrc_args),
1660              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1661              codec->width, codec->height, codec->pix_fmt,
1662              is->video_st->time_base.num, is->video_st->time_base.den,
1663              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1664
1665     if ((ret = avfilter_graph_create_filter(&filt_src,
1666                                             avfilter_get_by_name("buffer"),
1667                                             "ffplay_buffer", buffersrc_args, NULL,
1668                                             graph)) < 0)
1669         return ret;
1670
1671     buffersink_params->pixel_fmts = pix_fmts;
1672     ret = avfilter_graph_create_filter(&filt_out,
1673                                        avfilter_get_by_name("ffbuffersink"),
1674                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1675     av_freep(&buffersink_params);
1676     if (ret < 0)
1677         return ret;
1678
1679     /* SDL YUV code is not handling odd width/height for some driver
1680      * combinations, therefore we crop the picture to an even width/height. */
1681     if ((ret = avfilter_graph_create_filter(&filt_crop,
1682                                             avfilter_get_by_name("crop"),
1683                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1684         return ret;
1685     if ((ret = avfilter_graph_create_filter(&filt_format,
1686                                             avfilter_get_by_name("format"),
1687                                             "format", "yuv420p", NULL, graph)) < 0)
1688         return ret;
1689     if ((ret = avfilter_link(filt_crop, 0, filt_format, 0)) < 0)
1690         return ret;
1691     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1692         return ret;
1693
1694     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1695         return ret;
1696
1697     is->in_video_filter  = filt_src;
1698     is->out_video_filter = filt_out;
1699
1700     return ret;
1701 }
1702
1703 #endif  /* CONFIG_AVFILTER */
1704
1705 static int video_thread(void *arg)
1706 {
1707     AVPacket pkt = { 0 };
1708     VideoState *is = arg;
1709     AVFrame *frame = avcodec_alloc_frame();
1710     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1711     double pts;
1712     int ret;
1713     int serial = 0;
1714
1715 #if CONFIG_AVFILTER
1716     AVCodecContext *codec = is->video_st->codec;
1717     AVFilterGraph *graph = avfilter_graph_alloc();
1718     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1719     int last_w = 0;
1720     int last_h = 0;
1721     enum AVPixelFormat last_format = -2;
1722
1723     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1724         is->use_dr1 = 1;
1725         codec->get_buffer     = codec_get_buffer;
1726         codec->release_buffer = codec_release_buffer;
1727         codec->opaque         = &is->buffer_pool;
1728     }
1729 #endif
1730
1731     for (;;) {
1732 #if CONFIG_AVFILTER
1733         AVFilterBufferRef *picref;
1734         AVRational tb;
1735 #endif
1736         while (is->paused && !is->videoq.abort_request)
1737             SDL_Delay(10);
1738
1739         avcodec_get_frame_defaults(frame);
1740         av_free_packet(&pkt);
1741
1742         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1743         if (ret < 0)
1744             goto the_end;
1745
1746         if (!ret)
1747             continue;
1748
1749 #if CONFIG_AVFILTER
1750         if (   last_w != is->video_st->codec->width
1751             || last_h != is->video_st->codec->height
1752             || last_format != is->video_st->codec->pix_fmt) {
1753             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1754                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1755             avfilter_graph_free(&graph);
1756             graph = avfilter_graph_alloc();
1757             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1758                 SDL_Event event;
1759                 event.type = FF_QUIT_EVENT;
1760                 event.user.data1 = is;
1761                 SDL_PushEvent(&event);
1762                 av_free_packet(&pkt);
1763                 goto the_end;
1764             }
1765             filt_in  = is->in_video_filter;
1766             filt_out = is->out_video_filter;
1767             last_w = is->video_st->codec->width;
1768             last_h = is->video_st->codec->height;
1769             last_format = is->video_st->codec->pix_fmt;
1770         }
1771
1772         frame->pts = pts_int;
1773         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1774         if (is->use_dr1 && frame->opaque) {
1775             FrameBuffer      *buf = frame->opaque;
1776             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1777                                         frame->data, frame->linesize,
1778                                         AV_PERM_READ | AV_PERM_PRESERVE,
1779                                         frame->width, frame->height,
1780                                         frame->format);
1781
1782             avfilter_copy_frame_props(fb, frame);
1783             fb->buf->priv           = buf;
1784             fb->buf->free           = filter_release_buffer;
1785
1786             buf->refcount++;
1787             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1788
1789         } else
1790             av_buffersrc_write_frame(filt_in, frame);
1791
1792         av_free_packet(&pkt);
1793
1794         while (ret >= 0) {
1795             is->frame_last_returned_time = av_gettime() / 1000000.0;
1796
1797             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1798             if (ret < 0) {
1799                 ret = 0;
1800                 break;
1801             }
1802
1803             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1804             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1805                 is->frame_last_filter_delay = 0;
1806
1807             avfilter_copy_buf_props(frame, picref);
1808
1809             pts_int = picref->pts;
1810             tb      = filt_out->inputs[0]->time_base;
1811             pos     = picref->pos;
1812             frame->opaque = picref;
1813
1814             if (av_cmp_q(tb, is->video_st->time_base)) {
1815                 av_unused int64_t pts1 = pts_int;
1816                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1817                 av_dlog(NULL, "video_thread(): "
1818                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1819                         tb.num, tb.den, pts1,
1820                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1821             }
1822             pts = pts_int * av_q2d(is->video_st->time_base);
1823             ret = queue_picture(is, frame, pts, pos, serial);
1824         }
1825 #else
1826         pts = pts_int * av_q2d(is->video_st->time_base);
1827         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1828 #endif
1829
1830         if (ret < 0)
1831             goto the_end;
1832
1833         if (is->step)
1834             stream_toggle_pause(is);
1835     }
1836  the_end:
1837     avcodec_flush_buffers(is->video_st->codec);
1838 #if CONFIG_AVFILTER
1839     avfilter_graph_free(&graph);
1840 #endif
1841     av_free_packet(&pkt);
1842     avcodec_free_frame(&frame);
1843     return 0;
1844 }
1845
1846 static int subtitle_thread(void *arg)
1847 {
1848     VideoState *is = arg;
1849     SubPicture *sp;
1850     AVPacket pkt1, *pkt = &pkt1;
1851     int got_subtitle;
1852     double pts;
1853     int i, j;
1854     int r, g, b, y, u, v, a;
1855
1856     for (;;) {
1857         while (is->paused && !is->subtitleq.abort_request) {
1858             SDL_Delay(10);
1859         }
1860         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1861             break;
1862
1863         if (pkt->data == flush_pkt.data) {
1864             avcodec_flush_buffers(is->subtitle_st->codec);
1865             continue;
1866         }
1867         SDL_LockMutex(is->subpq_mutex);
1868         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1869                !is->subtitleq.abort_request) {
1870             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1871         }
1872         SDL_UnlockMutex(is->subpq_mutex);
1873
1874         if (is->subtitleq.abort_request)
1875             return 0;
1876
1877         sp = &is->subpq[is->subpq_windex];
1878
1879        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1880            this packet, if any */
1881         pts = 0;
1882         if (pkt->pts != AV_NOPTS_VALUE)
1883             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1884
1885         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1886                                  &got_subtitle, pkt);
1887         if (got_subtitle && sp->sub.format == 0) {
1888             if (sp->sub.pts != AV_NOPTS_VALUE)
1889                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1890             sp->pts = pts;
1891
1892             for (i = 0; i < sp->sub.num_rects; i++)
1893             {
1894                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1895                 {
1896                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1897                     y = RGB_TO_Y_CCIR(r, g, b);
1898                     u = RGB_TO_U_CCIR(r, g, b, 0);
1899                     v = RGB_TO_V_CCIR(r, g, b, 0);
1900                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1901                 }
1902             }
1903
1904             /* now we can update the picture count */
1905             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1906                 is->subpq_windex = 0;
1907             SDL_LockMutex(is->subpq_mutex);
1908             is->subpq_size++;
1909             SDL_UnlockMutex(is->subpq_mutex);
1910         }
1911         av_free_packet(pkt);
1912     }
1913     return 0;
1914 }
1915
1916 /* copy samples for viewing in editor window */
1917 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1918 {
1919     int size, len;
1920
1921     size = samples_size / sizeof(short);
1922     while (size > 0) {
1923         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1924         if (len > size)
1925             len = size;
1926         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1927         samples += len;
1928         is->sample_array_index += len;
1929         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1930             is->sample_array_index = 0;
1931         size -= len;
1932     }
1933 }
1934
1935 /* return the wanted number of samples to get better sync if sync_type is video
1936  * or external master clock */
1937 static int synchronize_audio(VideoState *is, int nb_samples)
1938 {
1939     int wanted_nb_samples = nb_samples;
1940
1941     /* if not master, then we try to remove or add samples to correct the clock */
1942     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
1943         double diff, avg_diff;
1944         int min_nb_samples, max_nb_samples;
1945
1946         diff = get_audio_clock(is) - get_master_clock(is);
1947
1948         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1949             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1950             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1951                 /* not enough measures to have a correct estimate */
1952                 is->audio_diff_avg_count++;
1953             } else {
1954                 /* estimate the A-V difference */
1955                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1956
1957                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1958                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1959                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1960                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1961                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1962                 }
1963                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1964                         diff, avg_diff, wanted_nb_samples - nb_samples,
1965                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1966             }
1967         } else {
1968             /* too big difference : may be initial PTS errors, so
1969                reset A-V filter */
1970             is->audio_diff_avg_count = 0;
1971             is->audio_diff_cum       = 0;
1972         }
1973     }
1974
1975     return wanted_nb_samples;
1976 }
1977
1978 /* decode one audio frame and returns its uncompressed size */
1979 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1980 {
1981     AVPacket *pkt_temp = &is->audio_pkt_temp;
1982     AVPacket *pkt = &is->audio_pkt;
1983     AVCodecContext *dec = is->audio_st->codec;
1984     int len1, len2, data_size, resampled_data_size;
1985     int64_t dec_channel_layout;
1986     int got_frame;
1987     double pts;
1988     int new_packet = 0;
1989     int flush_complete = 0;
1990     int wanted_nb_samples;
1991
1992     for (;;) {
1993         /* NOTE: the audio packet can contain several frames */
1994         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1995             if (!is->frame) {
1996                 if (!(is->frame = avcodec_alloc_frame()))
1997                     return AVERROR(ENOMEM);
1998             } else
1999                 avcodec_get_frame_defaults(is->frame);
2000
2001             if (is->paused)
2002                 return -1;
2003
2004             if (flush_complete)
2005                 break;
2006             new_packet = 0;
2007             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2008             if (len1 < 0) {
2009                 /* if error, we skip the frame */
2010                 pkt_temp->size = 0;
2011                 break;
2012             }
2013
2014             pkt_temp->data += len1;
2015             pkt_temp->size -= len1;
2016
2017             if (!got_frame) {
2018                 /* stop sending empty packets if the decoder is finished */
2019                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2020                     flush_complete = 1;
2021                 continue;
2022             }
2023             data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
2024                                                    is->frame->nb_samples,
2025                                                    is->frame->format, 1);
2026
2027             dec_channel_layout =
2028                 (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2029                 is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
2030             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2031
2032             if (is->frame->format        != is->audio_src.fmt            ||
2033                 dec_channel_layout       != is->audio_src.channel_layout ||
2034                 is->frame->sample_rate   != is->audio_src.freq           ||
2035                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2036                 swr_free(&is->swr_ctx);
2037                 is->swr_ctx = swr_alloc_set_opts(NULL,
2038                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2039                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2040                                                  0, NULL);
2041                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2042                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2043                         is->frame->sample_rate,   av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
2044                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2045                     break;
2046                 }
2047                 is->audio_src.channel_layout = dec_channel_layout;
2048                 is->audio_src.channels = is->frame->channels;
2049                 is->audio_src.freq = is->frame->sample_rate;
2050                 is->audio_src.fmt = is->frame->format;
2051             }
2052
2053             if (is->swr_ctx) {
2054                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2055                 uint8_t *out[] = {is->audio_buf2};
2056                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2057                 if (wanted_nb_samples != is->frame->nb_samples) {
2058                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2059                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2060                         fprintf(stderr, "swr_set_compensation() failed\n");
2061                         break;
2062                     }
2063                 }
2064                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2065                 if (len2 < 0) {
2066                     fprintf(stderr, "swr_convert() failed\n");
2067                     break;
2068                 }
2069                 if (len2 == out_count) {
2070                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2071                     swr_init(is->swr_ctx);
2072                 }
2073                 is->audio_buf = is->audio_buf2;
2074                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2075             } else {
2076                 is->audio_buf = is->frame->data[0];
2077                 resampled_data_size = data_size;
2078             }
2079
2080             /* if no pts, then compute it */
2081             pts = is->audio_clock;
2082             *pts_ptr = pts;
2083             is->audio_clock += (double)data_size /
2084                 (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
2085 #ifdef DEBUG
2086             {
2087                 static double last_clock;
2088                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2089                        is->audio_clock - last_clock,
2090                        is->audio_clock, pts);
2091                 last_clock = is->audio_clock;
2092             }
2093 #endif
2094             return resampled_data_size;
2095         }
2096
2097         /* free the current packet */
2098         if (pkt->data)
2099             av_free_packet(pkt);
2100         memset(pkt_temp, 0, sizeof(*pkt_temp));
2101
2102         if (is->paused || is->audioq.abort_request) {
2103             return -1;
2104         }
2105
2106         if (is->audioq.nb_packets == 0)
2107             SDL_CondSignal(is->continue_read_thread);
2108
2109         /* read next packet */
2110         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2111             return -1;
2112
2113         if (pkt->data == flush_pkt.data) {
2114             avcodec_flush_buffers(dec);
2115             flush_complete = 0;
2116         }
2117
2118         *pkt_temp = *pkt;
2119
2120         /* if update the audio clock with the pts */
2121         if (pkt->pts != AV_NOPTS_VALUE) {
2122             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2123         }
2124     }
2125 }
2126
2127 /* prepare a new audio buffer */
2128 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2129 {
2130     VideoState *is = opaque;
2131     int audio_size, len1;
2132     int bytes_per_sec;
2133     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2134     double pts;
2135
2136     audio_callback_time = av_gettime();
2137
2138     while (len > 0) {
2139         if (is->audio_buf_index >= is->audio_buf_size) {
2140            audio_size = audio_decode_frame(is, &pts);
2141            if (audio_size < 0) {
2142                 /* if error, just output silence */
2143                is->audio_buf      = is->silence_buf;
2144                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2145            } else {
2146                if (is->show_mode != SHOW_MODE_VIDEO)
2147                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2148                is->audio_buf_size = audio_size;
2149            }
2150            is->audio_buf_index = 0;
2151         }
2152         len1 = is->audio_buf_size - is->audio_buf_index;
2153         if (len1 > len)
2154             len1 = len;
2155         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2156         len -= len1;
2157         stream += len1;
2158         is->audio_buf_index += len1;
2159     }
2160     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2161     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2162     /* Let's assume the audio driver that is used by SDL has two periods. */
2163     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2164     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2165     if (is->audioq.serial == is->audio_pkt_temp_serial)
2166         check_external_clock_sync(is, is->audio_current_pts);
2167 }
2168
2169 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2170 {
2171     SDL_AudioSpec wanted_spec, spec;
2172     const char *env;
2173     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2174
2175     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2176     if (env) {
2177         wanted_nb_channels = atoi(env);
2178         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2179     }
2180     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2181         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2182         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2183     }
2184     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2185     wanted_spec.freq = wanted_sample_rate;
2186     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2187         fprintf(stderr, "Invalid sample rate or channel count!\n");
2188         return -1;
2189     }
2190     wanted_spec.format = AUDIO_S16SYS;
2191     wanted_spec.silence = 0;
2192     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2193     wanted_spec.callback = sdl_audio_callback;
2194     wanted_spec.userdata = opaque;
2195     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2196         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2197         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2198         if (!wanted_spec.channels) {
2199             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2200             return -1;
2201         }
2202         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2203     }
2204     if (spec.format != AUDIO_S16SYS) {
2205         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2206         return -1;
2207     }
2208     if (spec.channels != wanted_spec.channels) {
2209         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2210         if (!wanted_channel_layout) {
2211             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2212             return -1;
2213         }
2214     }
2215
2216     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2217     audio_hw_params->freq = spec.freq;
2218     audio_hw_params->channel_layout = wanted_channel_layout;
2219     audio_hw_params->channels =  spec.channels;
2220     return spec.size;
2221 }
2222
2223 /* open a given stream. Return 0 if OK */
2224 static int stream_component_open(VideoState *is, int stream_index)
2225 {
2226     AVFormatContext *ic = is->ic;
2227     AVCodecContext *avctx;
2228     AVCodec *codec;
2229     AVDictionary *opts;
2230     AVDictionaryEntry *t = NULL;
2231
2232     if (stream_index < 0 || stream_index >= ic->nb_streams)
2233         return -1;
2234     avctx = ic->streams[stream_index]->codec;
2235
2236     codec = avcodec_find_decoder(avctx->codec_id);
2237     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2238
2239     switch(avctx->codec_type){
2240         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2241         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2242         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2243     }
2244     if (!codec)
2245         return -1;
2246
2247     avctx->workaround_bugs   = workaround_bugs;
2248     avctx->lowres            = lowres;
2249     if(avctx->lowres > codec->max_lowres){
2250         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2251                 codec->max_lowres);
2252         avctx->lowres= codec->max_lowres;
2253     }
2254     avctx->idct_algo         = idct;
2255     avctx->skip_frame        = skip_frame;
2256     avctx->skip_idct         = skip_idct;
2257     avctx->skip_loop_filter  = skip_loop_filter;
2258     avctx->error_concealment = error_concealment;
2259
2260     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2261     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2262     if(codec->capabilities & CODEC_CAP_DR1)
2263         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2264
2265     if (!av_dict_get(opts, "threads", NULL, 0))
2266         av_dict_set(&opts, "threads", "auto", 0);
2267     if (!codec ||
2268         avcodec_open2(avctx, codec, &opts) < 0)
2269         return -1;
2270     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2271         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2272         return AVERROR_OPTION_NOT_FOUND;
2273     }
2274
2275     /* prepare audio output */
2276     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2277         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2278         if (audio_hw_buf_size < 0)
2279             return -1;
2280         is->audio_hw_buf_size = audio_hw_buf_size;
2281         is->audio_tgt = is->audio_src;
2282     }
2283
2284     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2285     switch (avctx->codec_type) {
2286     case AVMEDIA_TYPE_AUDIO:
2287         is->audio_stream = stream_index;
2288         is->audio_st = ic->streams[stream_index];
2289         is->audio_buf_size  = 0;
2290         is->audio_buf_index = 0;
2291
2292         /* init averaging filter */
2293         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2294         is->audio_diff_avg_count = 0;
2295         /* since we do not have a precise anough audio fifo fullness,
2296            we correct audio sync only if larger than this threshold */
2297         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2298
2299         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2300         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2301         packet_queue_start(&is->audioq);
2302         SDL_PauseAudio(0);
2303         break;
2304     case AVMEDIA_TYPE_VIDEO:
2305         is->video_stream = stream_index;
2306         is->video_st = ic->streams[stream_index];
2307
2308         packet_queue_start(&is->videoq);
2309         is->video_tid = SDL_CreateThread(video_thread, is);
2310         break;
2311     case AVMEDIA_TYPE_SUBTITLE:
2312         is->subtitle_stream = stream_index;
2313         is->subtitle_st = ic->streams[stream_index];
2314         packet_queue_start(&is->subtitleq);
2315
2316         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2317         break;
2318     default:
2319         break;
2320     }
2321     return 0;
2322 }
2323
2324 static void stream_component_close(VideoState *is, int stream_index)
2325 {
2326     AVFormatContext *ic = is->ic;
2327     AVCodecContext *avctx;
2328
2329     if (stream_index < 0 || stream_index >= ic->nb_streams)
2330         return;
2331     avctx = ic->streams[stream_index]->codec;
2332
2333     switch (avctx->codec_type) {
2334     case AVMEDIA_TYPE_AUDIO:
2335         packet_queue_abort(&is->audioq);
2336
2337         SDL_CloseAudio();
2338
2339         packet_queue_flush(&is->audioq);
2340         av_free_packet(&is->audio_pkt);
2341         swr_free(&is->swr_ctx);
2342         av_freep(&is->audio_buf1);
2343         is->audio_buf = NULL;
2344         avcodec_free_frame(&is->frame);
2345
2346         if (is->rdft) {
2347             av_rdft_end(is->rdft);
2348             av_freep(&is->rdft_data);
2349             is->rdft = NULL;
2350             is->rdft_bits = 0;
2351         }
2352         break;
2353     case AVMEDIA_TYPE_VIDEO:
2354         packet_queue_abort(&is->videoq);
2355
2356         /* note: we also signal this mutex to make sure we deblock the
2357            video thread in all cases */
2358         SDL_LockMutex(is->pictq_mutex);
2359         SDL_CondSignal(is->pictq_cond);
2360         SDL_UnlockMutex(is->pictq_mutex);
2361
2362         SDL_WaitThread(is->video_tid, NULL);
2363
2364         packet_queue_flush(&is->videoq);
2365         break;
2366     case AVMEDIA_TYPE_SUBTITLE:
2367         packet_queue_abort(&is->subtitleq);
2368
2369         /* note: we also signal this mutex to make sure we deblock the
2370            video thread in all cases */
2371         SDL_LockMutex(is->subpq_mutex);
2372         is->subtitle_stream_changed = 1;
2373
2374         SDL_CondSignal(is->subpq_cond);
2375         SDL_UnlockMutex(is->subpq_mutex);
2376
2377         SDL_WaitThread(is->subtitle_tid, NULL);
2378
2379         packet_queue_flush(&is->subtitleq);
2380         break;
2381     default:
2382         break;
2383     }
2384
2385     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2386     avcodec_close(avctx);
2387 #if CONFIG_AVFILTER
2388     free_buffer_pool(&is->buffer_pool);
2389 #endif
2390     switch (avctx->codec_type) {
2391     case AVMEDIA_TYPE_AUDIO:
2392         is->audio_st = NULL;
2393         is->audio_stream = -1;
2394         break;
2395     case AVMEDIA_TYPE_VIDEO:
2396         is->video_st = NULL;
2397         is->video_stream = -1;
2398         break;
2399     case AVMEDIA_TYPE_SUBTITLE:
2400         is->subtitle_st = NULL;
2401         is->subtitle_stream = -1;
2402         break;
2403     default:
2404         break;
2405     }
2406 }
2407
2408 static int decode_interrupt_cb(void *ctx)
2409 {
2410     VideoState *is = ctx;
2411     return is->abort_request;
2412 }
2413
2414 static int is_realtime(AVFormatContext *s)
2415 {
2416     if(   !strcmp(s->iformat->name, "rtp")
2417        || !strcmp(s->iformat->name, "rtsp")
2418        || !strcmp(s->iformat->name, "sdp")
2419     )
2420         return 1;
2421
2422     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2423                  || !strncmp(s->filename, "udp:", 4)
2424                 )
2425     )
2426         return 1;
2427     return 0;
2428 }
2429
2430 /* this thread gets the stream from the disk or the network */
2431 static int read_thread(void *arg)
2432 {
2433     VideoState *is = arg;
2434     AVFormatContext *ic = NULL;
2435     int err, i, ret;
2436     int st_index[AVMEDIA_TYPE_NB];
2437     AVPacket pkt1, *pkt = &pkt1;
2438     int eof = 0;
2439     int pkt_in_play_range = 0;
2440     AVDictionaryEntry *t;
2441     AVDictionary **opts;
2442     int orig_nb_streams;
2443     SDL_mutex *wait_mutex = SDL_CreateMutex();
2444
2445     memset(st_index, -1, sizeof(st_index));
2446     is->last_video_stream = is->video_stream = -1;
2447     is->last_audio_stream = is->audio_stream = -1;
2448     is->last_subtitle_stream = is->subtitle_stream = -1;
2449
2450     ic = avformat_alloc_context();
2451     ic->interrupt_callback.callback = decode_interrupt_cb;
2452     ic->interrupt_callback.opaque = is;
2453     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2454     if (err < 0) {
2455         print_error(is->filename, err);
2456         ret = -1;
2457         goto fail;
2458     }
2459     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2460         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2461         ret = AVERROR_OPTION_NOT_FOUND;
2462         goto fail;
2463     }
2464     is->ic = ic;
2465
2466     if (genpts)
2467         ic->flags |= AVFMT_FLAG_GENPTS;
2468
2469     opts = setup_find_stream_info_opts(ic, codec_opts);
2470     orig_nb_streams = ic->nb_streams;
2471
2472     err = avformat_find_stream_info(ic, opts);
2473     if (err < 0) {
2474         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2475         ret = -1;
2476         goto fail;
2477     }
2478     for (i = 0; i < orig_nb_streams; i++)
2479         av_dict_free(&opts[i]);
2480     av_freep(&opts);
2481
2482     if (ic->pb)
2483         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2484
2485     if (seek_by_bytes < 0)
2486         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2487
2488     /* if seeking requested, we execute it */
2489     if (start_time != AV_NOPTS_VALUE) {
2490         int64_t timestamp;
2491
2492         timestamp = start_time;
2493         /* add the stream start time */
2494         if (ic->start_time != AV_NOPTS_VALUE)
2495             timestamp += ic->start_time;
2496         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2497         if (ret < 0) {
2498             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2499                     is->filename, (double)timestamp / AV_TIME_BASE);
2500         }
2501     }
2502
2503     for (i = 0; i < ic->nb_streams; i++)
2504         ic->streams[i]->discard = AVDISCARD_ALL;
2505     if (!video_disable)
2506         st_index[AVMEDIA_TYPE_VIDEO] =
2507             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2508                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2509     if (!audio_disable)
2510         st_index[AVMEDIA_TYPE_AUDIO] =
2511             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2512                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2513                                 st_index[AVMEDIA_TYPE_VIDEO],
2514                                 NULL, 0);
2515     if (!video_disable)
2516         st_index[AVMEDIA_TYPE_SUBTITLE] =
2517             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2518                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2519                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2520                                  st_index[AVMEDIA_TYPE_AUDIO] :
2521                                  st_index[AVMEDIA_TYPE_VIDEO]),
2522                                 NULL, 0);
2523     if (show_status) {
2524         av_dump_format(ic, 0, is->filename, 0);
2525     }
2526
2527     is->show_mode = show_mode;
2528
2529     /* open the streams */
2530     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2531         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2532     }
2533
2534     ret = -1;
2535     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2536         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2537     }
2538     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2539     if (is->show_mode == SHOW_MODE_NONE)
2540         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2541
2542     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2543         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2544     }
2545
2546     if (is->video_stream < 0 && is->audio_stream < 0) {
2547         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2548         ret = -1;
2549         goto fail;
2550     }
2551
2552     if (infinite_buffer < 0 && is_realtime(ic))
2553         infinite_buffer = 1;
2554
2555     for (;;) {
2556         if (is->abort_request)
2557             break;
2558         if (is->paused != is->last_paused) {
2559             is->last_paused = is->paused;
2560             if (is->paused)
2561                 is->read_pause_return = av_read_pause(ic);
2562             else
2563                 av_read_play(ic);
2564         }
2565 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2566         if (is->paused &&
2567                 (!strcmp(ic->iformat->name, "rtsp") ||
2568                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2569             /* wait 10 ms to avoid trying to get another packet */
2570             /* XXX: horrible */
2571             SDL_Delay(10);
2572             continue;
2573         }
2574 #endif
2575         if (is->seek_req) {
2576             int64_t seek_target = is->seek_pos;
2577             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2578             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2579 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2580 //      of the seek_pos/seek_rel variables
2581
2582             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2583             if (ret < 0) {
2584                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2585             } else {
2586                 if (is->audio_stream >= 0) {
2587                     packet_queue_flush(&is->audioq);
2588                     packet_queue_put(&is->audioq, &flush_pkt);
2589                 }
2590                 if (is->subtitle_stream >= 0) {
2591                     packet_queue_flush(&is->subtitleq);
2592                     packet_queue_put(&is->subtitleq, &flush_pkt);
2593                 }
2594                 if (is->video_stream >= 0) {
2595                     packet_queue_flush(&is->videoq);
2596                     packet_queue_put(&is->videoq, &flush_pkt);
2597                 }
2598             }
2599             update_external_clock_pts(is, (seek_target + ic->start_time) / (double)AV_TIME_BASE);
2600             is->seek_req = 0;
2601             eof = 0;
2602         }
2603         if (is->que_attachments_req) {
2604             avformat_queue_attached_pictures(ic);
2605             is->que_attachments_req = 0;
2606         }
2607
2608         /* if the queue are full, no need to read more */
2609         if (infinite_buffer<1 &&
2610               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2611             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2612                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2613                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2614             /* wait 10 ms */
2615             SDL_LockMutex(wait_mutex);
2616             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2617             SDL_UnlockMutex(wait_mutex);
2618             continue;
2619         }
2620         if (eof) {
2621             if (is->video_stream >= 0) {
2622                 av_init_packet(pkt);
2623                 pkt->data = NULL;
2624                 pkt->size = 0;
2625                 pkt->stream_index = is->video_stream;
2626                 packet_queue_put(&is->videoq, pkt);
2627             }
2628             if (is->audio_stream >= 0 &&
2629                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2630                 av_init_packet(pkt);
2631                 pkt->data = NULL;
2632                 pkt->size = 0;
2633                 pkt->stream_index = is->audio_stream;
2634                 packet_queue_put(&is->audioq, pkt);
2635             }
2636             SDL_Delay(10);
2637             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2638                 if (loop != 1 && (!loop || --loop)) {
2639                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2640                 } else if (autoexit) {
2641                     ret = AVERROR_EOF;
2642                     goto fail;
2643                 }
2644             }
2645             eof=0;
2646             continue;
2647         }
2648         ret = av_read_frame(ic, pkt);
2649         if (ret < 0) {
2650             if (ret == AVERROR_EOF || url_feof(ic->pb))
2651                 eof = 1;
2652             if (ic->pb && ic->pb->error)
2653                 break;
2654             SDL_LockMutex(wait_mutex);
2655             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2656             SDL_UnlockMutex(wait_mutex);
2657             continue;
2658         }
2659         /* check if packet is in play range specified by user, then queue, otherwise discard */
2660         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2661                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2662                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2663                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2664                 <= ((double)duration / 1000000);
2665         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2666             packet_queue_put(&is->audioq, pkt);
2667         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2668             packet_queue_put(&is->videoq, pkt);
2669         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2670             packet_queue_put(&is->subtitleq, pkt);
2671         } else {
2672             av_free_packet(pkt);
2673         }
2674     }
2675     /* wait until the end */
2676     while (!is->abort_request) {
2677         SDL_Delay(100);
2678     }
2679
2680     ret = 0;
2681  fail:
2682     /* close each stream */
2683     if (is->audio_stream >= 0)
2684         stream_component_close(is, is->audio_stream);
2685     if (is->video_stream >= 0)
2686         stream_component_close(is, is->video_stream);
2687     if (is->subtitle_stream >= 0)
2688         stream_component_close(is, is->subtitle_stream);
2689     if (is->ic) {
2690         avformat_close_input(&is->ic);
2691     }
2692
2693     if (ret != 0) {
2694         SDL_Event event;
2695
2696         event.type = FF_QUIT_EVENT;
2697         event.user.data1 = is;
2698         SDL_PushEvent(&event);
2699     }
2700     SDL_DestroyMutex(wait_mutex);
2701     return 0;
2702 }
2703
2704 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2705 {
2706     VideoState *is;
2707
2708     is = av_mallocz(sizeof(VideoState));
2709     if (!is)
2710         return NULL;
2711     av_strlcpy(is->filename, filename, sizeof(is->filename));
2712     is->iformat = iformat;
2713     is->ytop    = 0;
2714     is->xleft   = 0;
2715
2716     /* start video display */
2717     is->pictq_mutex = SDL_CreateMutex();
2718     is->pictq_cond  = SDL_CreateCond();
2719
2720     is->subpq_mutex = SDL_CreateMutex();
2721     is->subpq_cond  = SDL_CreateCond();
2722
2723     packet_queue_init(&is->videoq);
2724     packet_queue_init(&is->audioq);
2725     packet_queue_init(&is->subtitleq);
2726
2727     is->continue_read_thread = SDL_CreateCond();
2728
2729     update_external_clock_pts(is, 0.0);
2730     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2731     is->video_current_pts_drift = is->audio_current_pts_drift;
2732     is->av_sync_type = av_sync_type;
2733     is->read_tid     = SDL_CreateThread(read_thread, is);
2734     if (!is->read_tid) {
2735         av_free(is);
2736         return NULL;
2737     }
2738     return is;
2739 }
2740
2741 static void stream_cycle_channel(VideoState *is, int codec_type)
2742 {
2743     AVFormatContext *ic = is->ic;
2744     int start_index, stream_index;
2745     int old_index;
2746     AVStream *st;
2747
2748     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2749         start_index = is->last_video_stream;
2750         old_index = is->video_stream;
2751     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2752         start_index = is->last_audio_stream;
2753         old_index = is->audio_stream;
2754     } else {
2755         start_index = is->last_subtitle_stream;
2756         old_index = is->subtitle_stream;
2757     }
2758     stream_index = start_index;
2759     for (;;) {
2760         if (++stream_index >= is->ic->nb_streams)
2761         {
2762             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2763             {
2764                 stream_index = -1;
2765                 is->last_subtitle_stream = -1;
2766                 goto the_end;
2767             }
2768             if (start_index == -1)
2769                 return;
2770             stream_index = 0;
2771         }
2772         if (stream_index == start_index)
2773             return;
2774         st = ic->streams[stream_index];
2775         if (st->codec->codec_type == codec_type) {
2776             /* check that parameters are OK */
2777             switch (codec_type) {
2778             case AVMEDIA_TYPE_AUDIO:
2779                 if (st->codec->sample_rate != 0 &&
2780                     st->codec->channels != 0)
2781                     goto the_end;
2782                 break;
2783             case AVMEDIA_TYPE_VIDEO:
2784             case AVMEDIA_TYPE_SUBTITLE:
2785                 goto the_end;
2786             default:
2787                 break;
2788             }
2789         }
2790     }
2791  the_end:
2792     stream_component_close(is, old_index);
2793     stream_component_open(is, stream_index);
2794     if (codec_type == AVMEDIA_TYPE_VIDEO)
2795         is->que_attachments_req = 1;
2796 }
2797
2798
2799 static void toggle_full_screen(VideoState *is)
2800 {
2801 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2802     /* OS X needs to reallocate the SDL overlays */
2803     int i;
2804     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2805         is->pictq[i].reallocate = 1;
2806 #endif
2807     is_full_screen = !is_full_screen;
2808     video_open(is, 1);
2809 }
2810
2811 static void toggle_pause(VideoState *is)
2812 {
2813     stream_toggle_pause(is);
2814     is->step = 0;
2815 }
2816
2817 static void step_to_next_frame(VideoState *is)
2818 {
2819     /* if the stream is paused unpause it, then step */
2820     if (is->paused)
2821         stream_toggle_pause(is);
2822     is->step = 1;
2823 }
2824
2825 static void toggle_audio_display(VideoState *is)
2826 {
2827     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2828     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2829     fill_rectangle(screen,
2830                 is->xleft, is->ytop, is->width, is->height,
2831                 bgcolor);
2832     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2833 }
2834
2835 /* handle an event sent by the GUI */
2836 static void event_loop(VideoState *cur_stream)
2837 {
2838     SDL_Event event;
2839     double incr, pos, frac;
2840
2841     for (;;) {
2842         double x;
2843         SDL_WaitEvent(&event);
2844         switch (event.type) {
2845         case SDL_KEYDOWN:
2846             if (exit_on_keydown) {
2847                 do_exit(cur_stream);
2848                 break;
2849             }
2850             switch (event.key.keysym.sym) {
2851             case SDLK_ESCAPE:
2852             case SDLK_q:
2853                 do_exit(cur_stream);
2854                 break;
2855             case SDLK_f:
2856                 toggle_full_screen(cur_stream);
2857                 cur_stream->force_refresh = 1;
2858                 break;
2859             case SDLK_p:
2860             case SDLK_SPACE:
2861                 toggle_pause(cur_stream);
2862                 break;
2863             case SDLK_s: // S: Step to next frame
2864                 step_to_next_frame(cur_stream);
2865                 break;
2866             case SDLK_a:
2867                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2868                 break;
2869             case SDLK_v:
2870                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2871                 break;
2872             case SDLK_t:
2873                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2874                 break;
2875             case SDLK_w:
2876                 toggle_audio_display(cur_stream);
2877                 cur_stream->force_refresh = 1;
2878                 break;
2879             case SDLK_PAGEUP:
2880                 incr = 600.0;
2881                 goto do_seek;
2882             case SDLK_PAGEDOWN:
2883                 incr = -600.0;
2884                 goto do_seek;
2885             case SDLK_LEFT:
2886                 incr = -10.0;
2887                 goto do_seek;
2888             case SDLK_RIGHT:
2889                 incr = 10.0;
2890                 goto do_seek;
2891             case SDLK_UP:
2892                 incr = 60.0;
2893                 goto do_seek;
2894             case SDLK_DOWN:
2895                 incr = -60.0;
2896             do_seek:
2897                     if (seek_by_bytes) {
2898                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2899                             pos = cur_stream->video_current_pos;
2900                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2901                             pos = cur_stream->audio_pkt.pos;
2902                         } else
2903                             pos = avio_tell(cur_stream->ic->pb);
2904                         if (cur_stream->ic->bit_rate)
2905                             incr *= cur_stream->ic->bit_rate / 8.0;
2906                         else
2907                             incr *= 180000.0;
2908                         pos += incr;
2909                         stream_seek(cur_stream, pos, incr, 1);
2910                     } else {
2911                         pos = get_master_clock(cur_stream);
2912                         pos += incr;
2913                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2914                     }
2915                 break;
2916             default:
2917                 break;
2918             }
2919             break;
2920         case SDL_VIDEOEXPOSE:
2921             cur_stream->force_refresh = 1;
2922             break;
2923         case SDL_MOUSEBUTTONDOWN:
2924             if (exit_on_mousedown) {
2925                 do_exit(cur_stream);
2926                 break;
2927             }
2928         case SDL_MOUSEMOTION:
2929             if (event.type == SDL_MOUSEBUTTONDOWN) {
2930                 x = event.button.x;
2931             } else {
2932                 if (event.motion.state != SDL_PRESSED)
2933                     break;
2934                 x = event.motion.x;
2935             }
2936                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2937                     uint64_t size =  avio_size(cur_stream->ic->pb);
2938                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2939                 } else {
2940                     int64_t ts;
2941                     int ns, hh, mm, ss;
2942                     int tns, thh, tmm, tss;
2943                     tns  = cur_stream->ic->duration / 1000000LL;
2944                     thh  = tns / 3600;
2945                     tmm  = (tns % 3600) / 60;
2946                     tss  = (tns % 60);
2947                     frac = x / cur_stream->width;
2948                     ns   = frac * tns;
2949                     hh   = ns / 3600;
2950                     mm   = (ns % 3600) / 60;
2951                     ss   = (ns % 60);
2952                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2953                             hh, mm, ss, thh, tmm, tss);
2954                     ts = frac * cur_stream->ic->duration;
2955                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2956                         ts += cur_stream->ic->start_time;
2957                     stream_seek(cur_stream, ts, 0, 0);
2958                 }
2959             break;
2960         case SDL_VIDEORESIZE:
2961                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2962                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2963                 screen_width  = cur_stream->width  = event.resize.w;
2964                 screen_height = cur_stream->height = event.resize.h;
2965                 cur_stream->force_refresh = 1;
2966             break;
2967         case SDL_QUIT:
2968         case FF_QUIT_EVENT:
2969             do_exit(cur_stream);
2970             break;
2971         case FF_ALLOC_EVENT:
2972             alloc_picture(event.user.data1);
2973             break;
2974         case FF_REFRESH_EVENT:
2975             video_refresh(event.user.data1);
2976             cur_stream->refresh = 0;
2977             break;
2978         default:
2979             break;
2980         }
2981     }
2982 }
2983
2984 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2985 {
2986     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2987     return opt_default(NULL, "video_size", arg);
2988 }
2989
2990 static int opt_width(void *optctx, const char *opt, const char *arg)
2991 {
2992     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2993     return 0;
2994 }
2995
2996 static int opt_height(void *optctx, const char *opt, const char *arg)
2997 {
2998     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2999     return 0;
3000 }
3001
3002 static int opt_format(void *optctx, const char *opt, const char *arg)
3003 {
3004     file_iformat = av_find_input_format(arg);
3005     if (!file_iformat) {
3006         fprintf(stderr, "Unknown input format: %s\n", arg);
3007         return AVERROR(EINVAL);
3008     }
3009     return 0;
3010 }
3011
3012 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3013 {
3014     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3015     return opt_default(NULL, "pixel_format", arg);
3016 }
3017
3018 static int opt_sync(void *optctx, const char *opt, const char *arg)
3019 {
3020     if (!strcmp(arg, "audio"))
3021         av_sync_type = AV_SYNC_AUDIO_MASTER;
3022     else if (!strcmp(arg, "video"))
3023         av_sync_type = AV_SYNC_VIDEO_MASTER;
3024     else if (!strcmp(arg, "ext"))
3025         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3026     else {
3027         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3028         exit(1);
3029     }
3030     return 0;
3031 }
3032
3033 static int opt_seek(void *optctx, const char *opt, const char *arg)
3034 {
3035     start_time = parse_time_or_die(opt, arg, 1);
3036     return 0;
3037 }
3038
3039 static int opt_duration(void *optctx, const char *opt, const char *arg)
3040 {
3041     duration = parse_time_or_die(opt, arg, 1);
3042     return 0;
3043 }
3044
3045 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3046 {
3047     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3048                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3049                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3050                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3051     return 0;
3052 }
3053
3054 static void opt_input_file(void *optctx, const char *filename)
3055 {
3056     if (input_filename) {
3057         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3058                 filename, input_filename);
3059         exit(1);
3060     }
3061     if (!strcmp(filename, "-"))
3062         filename = "pipe:";
3063     input_filename = filename;
3064 }
3065
3066 static int opt_codec(void *o, const char *opt, const char *arg)
3067 {
3068     switch(opt[strlen(opt)-1]){
3069     case 'a' :    audio_codec_name = arg; break;
3070     case 's' : subtitle_codec_name = arg; break;
3071     case 'v' :    video_codec_name = arg; break;
3072     }
3073     return 0;
3074 }
3075
3076 static int dummy;
3077
3078 static const OptionDef options[] = {
3079 #include "cmdutils_common_opts.h"
3080     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3081     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3082     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3083     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3084     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3085     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3086     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3087     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3088     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3089     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3090     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3091     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3092     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3093     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3094     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3095     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3096     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3097     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3098     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3099     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3100     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3101     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3102     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3103     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3104     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3105     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3106     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3107     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3108     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3109     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3110     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3111     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3112     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3113     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3114 #if CONFIG_AVFILTER
3115     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3116 #endif
3117     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3118     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3119     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3120     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3121     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3122     { NULL, },
3123 };
3124
3125 static void show_usage(void)
3126 {
3127     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3128     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3129     av_log(NULL, AV_LOG_INFO, "\n");
3130 }
3131
3132 void show_help_default(const char *opt, const char *arg)
3133 {
3134     av_log_set_callback(log_callback_help);
3135     show_usage();
3136     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3137     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3138     printf("\n");
3139     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3140     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3141 #if !CONFIG_AVFILTER
3142     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3143 #else
3144     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3145 #endif
3146     printf("\nWhile playing:\n"
3147            "q, ESC              quit\n"
3148            "f                   toggle full screen\n"
3149            "p, SPC              pause\n"
3150            "a                   cycle audio channel\n"
3151            "v                   cycle video channel\n"
3152            "t                   cycle subtitle channel\n"
3153            "w                   show audio waves\n"
3154            "s                   activate frame-step mode\n"
3155            "left/right          seek backward/forward 10 seconds\n"
3156            "down/up             seek backward/forward 1 minute\n"
3157            "page down/page up   seek backward/forward 10 minutes\n"
3158            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3159            );
3160 }
3161
3162 static int lockmgr(void **mtx, enum AVLockOp op)
3163 {
3164    switch(op) {
3165       case AV_LOCK_CREATE:
3166           *mtx = SDL_CreateMutex();
3167           if(!*mtx)
3168               return 1;
3169           return 0;
3170       case AV_LOCK_OBTAIN:
3171           return !!SDL_LockMutex(*mtx);
3172       case AV_LOCK_RELEASE:
3173           return !!SDL_UnlockMutex(*mtx);
3174       case AV_LOCK_DESTROY:
3175           SDL_DestroyMutex(*mtx);
3176           return 0;
3177    }
3178    return 1;
3179 }
3180
3181 /* Called from the main */
3182 int main(int argc, char **argv)
3183 {
3184     int flags;
3185     VideoState *is;
3186     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3187
3188     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3189     parse_loglevel(argc, argv, options);
3190
3191     /* register all codecs, demux and protocols */
3192     avcodec_register_all();
3193 #if CONFIG_AVDEVICE
3194     avdevice_register_all();
3195 #endif
3196 #if CONFIG_AVFILTER
3197     avfilter_register_all();
3198 #endif
3199     av_register_all();
3200     avformat_network_init();
3201
3202     init_opts();
3203
3204     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3205     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3206
3207     show_banner(argc, argv, options);
3208
3209     parse_options(NULL, argc, argv, options, opt_input_file);
3210
3211     if (!input_filename) {
3212         show_usage();
3213         fprintf(stderr, "An input file must be specified\n");
3214         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3215         exit(1);
3216     }
3217
3218     if (display_disable) {
3219         video_disable = 1;
3220     }
3221     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3222     if (audio_disable)
3223         flags &= ~SDL_INIT_AUDIO;
3224     if (display_disable)
3225         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3226 #if !defined(__MINGW32__) && !defined(__APPLE__)
3227     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3228 #endif
3229     if (SDL_Init (flags)) {
3230         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3231         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3232         exit(1);
3233     }
3234
3235     if (!display_disable) {
3236 #if HAVE_SDL_VIDEO_SIZE
3237         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3238         fs_screen_width = vi->current_w;
3239         fs_screen_height = vi->current_h;
3240 #endif
3241     }
3242
3243     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3244     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3245     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3246
3247     if (av_lockmgr_register(lockmgr)) {
3248         fprintf(stderr, "Could not initialize lock manager!\n");
3249         do_exit(NULL);
3250     }
3251
3252     av_init_packet(&flush_pkt);
3253     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3254
3255     is = stream_open(input_filename, file_iformat);
3256     if (!is) {
3257         fprintf(stderr, "Failed to initialize VideoState!\n");
3258         do_exit(NULL);
3259     }
3260
3261     event_loop(is);
3262
3263     /* never returns */
3264
3265     return 0;
3266 }