]> git.sesse.net Git - ffmpeg/blob - ffplay.c
mips: add assembler flags for mips32r2 ISA and mhard-float
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     AVRational sample_aspect_ratio;
108     int allocated;
109     int reallocate;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 typedef struct AudioParams {
122     int freq;
123     int channels;
124     int channel_layout;
125     enum AVSampleFormat fmt;
126 } AudioParams;
127
128 enum {
129     AV_SYNC_AUDIO_MASTER, /* default choice */
130     AV_SYNC_VIDEO_MASTER,
131     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
132 };
133
134 typedef struct VideoState {
135     SDL_Thread *read_tid;
136     SDL_Thread *video_tid;
137     SDL_Thread *refresh_tid;
138     AVInputFormat *iformat;
139     int no_background;
140     int abort_request;
141     int force_refresh;
142     int paused;
143     int last_paused;
144     int que_attachments_req;
145     int seek_req;
146     int seek_flags;
147     int64_t seek_pos;
148     int64_t seek_rel;
149     int read_pause_return;
150     AVFormatContext *ic;
151
152     int audio_stream;
153
154     int av_sync_type;
155     double external_clock; /* external clock base */
156     int64_t external_clock_time;
157
158     double audio_clock;
159     double audio_diff_cum; /* used for AV difference average computation */
160     double audio_diff_avg_coef;
161     double audio_diff_threshold;
162     int audio_diff_avg_count;
163     AVStream *audio_st;
164     PacketQueue audioq;
165     int audio_hw_buf_size;
166     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
167     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
168     uint8_t *audio_buf;
169     uint8_t *audio_buf1;
170     unsigned int audio_buf_size; /* in bytes */
171     int audio_buf_index; /* in bytes */
172     int audio_write_buf_size;
173     AVPacket audio_pkt_temp;
174     AVPacket audio_pkt;
175     struct AudioParams audio_src;
176     struct AudioParams audio_tgt;
177     struct SwrContext *swr_ctx;
178     double audio_current_pts;
179     double audio_current_pts_drift;
180     int frame_drops_early;
181     int frame_drops_late;
182     AVFrame *frame;
183
184     enum ShowMode {
185         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
186     } show_mode;
187     int16_t sample_array[SAMPLE_ARRAY_SIZE];
188     int sample_array_index;
189     int last_i_start;
190     RDFTContext *rdft;
191     int rdft_bits;
192     FFTSample *rdft_data;
193     int xpos;
194
195     SDL_Thread *subtitle_tid;
196     int subtitle_stream;
197     int subtitle_stream_changed;
198     AVStream *subtitle_st;
199     PacketQueue subtitleq;
200     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
201     int subpq_size, subpq_rindex, subpq_windex;
202     SDL_mutex *subpq_mutex;
203     SDL_cond *subpq_cond;
204
205     double frame_timer;
206     double frame_last_pts;
207     double frame_last_duration;
208     double frame_last_dropped_pts;
209     double frame_last_returned_time;
210     double frame_last_filter_delay;
211     int64_t frame_last_dropped_pos;
212     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
213     int video_stream;
214     AVStream *video_st;
215     PacketQueue videoq;
216     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
217     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
218     int64_t video_current_pos;                   ///< current displayed file pos
219     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
220     int pictq_size, pictq_rindex, pictq_windex;
221     SDL_mutex *pictq_mutex;
222     SDL_cond *pictq_cond;
223 #if !CONFIG_AVFILTER
224     struct SwsContext *img_convert_ctx;
225 #endif
226
227     char filename[1024];
228     int width, height, xleft, ytop;
229     int step;
230
231 #if CONFIG_AVFILTER
232     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
233     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
234     int use_dr1;
235     FrameBuffer *buffer_pool;
236 #endif
237
238     int refresh;
239     int last_video_stream, last_audio_stream, last_subtitle_stream;
240 } VideoState;
241
242 typedef struct AllocEventProps {
243     VideoState *is;
244     AVFrame *frame;
245 } AllocEventProps;
246
247 static int opt_help(const char *opt, const char *arg);
248
249 /* options specified by the user */
250 static AVInputFormat *file_iformat;
251 static const char *input_filename;
252 static const char *window_title;
253 static int fs_screen_width;
254 static int fs_screen_height;
255 static int screen_width  = 0;
256 static int screen_height = 0;
257 static int audio_disable;
258 static int video_disable;
259 static int wanted_stream[AVMEDIA_TYPE_NB] = {
260     [AVMEDIA_TYPE_AUDIO]    = -1,
261     [AVMEDIA_TYPE_VIDEO]    = -1,
262     [AVMEDIA_TYPE_SUBTITLE] = -1,
263 };
264 static int seek_by_bytes = -1;
265 static int display_disable;
266 static int show_status = 1;
267 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
268 static int64_t start_time = AV_NOPTS_VALUE;
269 static int64_t duration = AV_NOPTS_VALUE;
270 static int workaround_bugs = 1;
271 static int fast = 0;
272 static int genpts = 0;
273 static int lowres = 0;
274 static int idct = FF_IDCT_AUTO;
275 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
276 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
277 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
278 static int error_concealment = 3;
279 static int decoder_reorder_pts = -1;
280 static int autoexit;
281 static int exit_on_keydown;
282 static int exit_on_mousedown;
283 static int loop = 1;
284 static int framedrop = -1;
285 static int infinite_buffer = 0;
286 static enum ShowMode show_mode = SHOW_MODE_NONE;
287 static const char *audio_codec_name;
288 static const char *subtitle_codec_name;
289 static const char *video_codec_name;
290 static int rdftspeed = 20;
291 #if CONFIG_AVFILTER
292 static char *vfilters = NULL;
293 #endif
294
295 /* current context */
296 static int is_full_screen;
297 static int64_t audio_callback_time;
298
299 static AVPacket flush_pkt;
300
301 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
302 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
303 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
304
305 static SDL_Surface *screen;
306
307 void av_noreturn exit_program(int ret)
308 {
309     exit(ret);
310 }
311
312 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
313 {
314     AVPacketList *pkt1;
315
316     if (q->abort_request)
317        return -1;
318
319     pkt1 = av_malloc(sizeof(AVPacketList));
320     if (!pkt1)
321         return -1;
322     pkt1->pkt = *pkt;
323     pkt1->next = NULL;
324
325     if (!q->last_pkt)
326         q->first_pkt = pkt1;
327     else
328         q->last_pkt->next = pkt1;
329     q->last_pkt = pkt1;
330     q->nb_packets++;
331     q->size += pkt1->pkt.size + sizeof(*pkt1);
332     /* XXX: should duplicate packet data in DV case */
333     SDL_CondSignal(q->cond);
334     return 0;
335 }
336
337 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
338 {
339     int ret;
340
341     /* duplicate the packet */
342     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
343         return -1;
344
345     SDL_LockMutex(q->mutex);
346     ret = packet_queue_put_private(q, pkt);
347     SDL_UnlockMutex(q->mutex);
348
349     if (pkt != &flush_pkt && ret < 0)
350         av_free_packet(pkt);
351
352     return ret;
353 }
354
355 /* packet queue handling */
356 static void packet_queue_init(PacketQueue *q)
357 {
358     memset(q, 0, sizeof(PacketQueue));
359     q->mutex = SDL_CreateMutex();
360     q->cond = SDL_CreateCond();
361     q->abort_request = 1;
362 }
363
364 static void packet_queue_flush(PacketQueue *q)
365 {
366     AVPacketList *pkt, *pkt1;
367
368     SDL_LockMutex(q->mutex);
369     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
370         pkt1 = pkt->next;
371         av_free_packet(&pkt->pkt);
372         av_freep(&pkt);
373     }
374     q->last_pkt = NULL;
375     q->first_pkt = NULL;
376     q->nb_packets = 0;
377     q->size = 0;
378     SDL_UnlockMutex(q->mutex);
379 }
380
381 static void packet_queue_destroy(PacketQueue *q)
382 {
383     packet_queue_flush(q);
384     SDL_DestroyMutex(q->mutex);
385     SDL_DestroyCond(q->cond);
386 }
387
388 static void packet_queue_abort(PacketQueue *q)
389 {
390     SDL_LockMutex(q->mutex);
391
392     q->abort_request = 1;
393
394     SDL_CondSignal(q->cond);
395
396     SDL_UnlockMutex(q->mutex);
397 }
398
399 static void packet_queue_start(PacketQueue *q)
400 {
401     SDL_LockMutex(q->mutex);
402     q->abort_request = 0;
403     packet_queue_put_private(q, &flush_pkt);
404     SDL_UnlockMutex(q->mutex);
405 }
406
407 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
408 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
409 {
410     AVPacketList *pkt1;
411     int ret;
412
413     SDL_LockMutex(q->mutex);
414
415     for (;;) {
416         if (q->abort_request) {
417             ret = -1;
418             break;
419         }
420
421         pkt1 = q->first_pkt;
422         if (pkt1) {
423             q->first_pkt = pkt1->next;
424             if (!q->first_pkt)
425                 q->last_pkt = NULL;
426             q->nb_packets--;
427             q->size -= pkt1->pkt.size + sizeof(*pkt1);
428             *pkt = pkt1->pkt;
429             av_free(pkt1);
430             ret = 1;
431             break;
432         } else if (!block) {
433             ret = 0;
434             break;
435         } else {
436             SDL_CondWait(q->cond, q->mutex);
437         }
438     }
439     SDL_UnlockMutex(q->mutex);
440     return ret;
441 }
442
443 static inline void fill_rectangle(SDL_Surface *screen,
444                                   int x, int y, int w, int h, int color)
445 {
446     SDL_Rect rect;
447     rect.x = x;
448     rect.y = y;
449     rect.w = w;
450     rect.h = h;
451     SDL_FillRect(screen, &rect, color);
452 }
453
454 #define ALPHA_BLEND(a, oldp, newp, s)\
455 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
456
457 #define RGBA_IN(r, g, b, a, s)\
458 {\
459     unsigned int v = ((const uint32_t *)(s))[0];\
460     a = (v >> 24) & 0xff;\
461     r = (v >> 16) & 0xff;\
462     g = (v >> 8) & 0xff;\
463     b = v & 0xff;\
464 }
465
466 #define YUVA_IN(y, u, v, a, s, pal)\
467 {\
468     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
469     a = (val >> 24) & 0xff;\
470     y = (val >> 16) & 0xff;\
471     u = (val >> 8) & 0xff;\
472     v = val & 0xff;\
473 }
474
475 #define YUVA_OUT(d, y, u, v, a)\
476 {\
477     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
478 }
479
480
481 #define BPP 1
482
483 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
484 {
485     int wrap, wrap3, width2, skip2;
486     int y, u, v, a, u1, v1, a1, w, h;
487     uint8_t *lum, *cb, *cr;
488     const uint8_t *p;
489     const uint32_t *pal;
490     int dstx, dsty, dstw, dsth;
491
492     dstw = av_clip(rect->w, 0, imgw);
493     dsth = av_clip(rect->h, 0, imgh);
494     dstx = av_clip(rect->x, 0, imgw - dstw);
495     dsty = av_clip(rect->y, 0, imgh - dsth);
496     lum = dst->data[0] + dsty * dst->linesize[0];
497     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
498     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
499
500     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
501     skip2 = dstx >> 1;
502     wrap = dst->linesize[0];
503     wrap3 = rect->pict.linesize[0];
504     p = rect->pict.data[0];
505     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
506
507     if (dsty & 1) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
516             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
517             cb++;
518             cr++;
519             lum++;
520             p += BPP;
521         }
522         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 = u;
525             v1 = v;
526             a1 = a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528
529             YUVA_IN(y, u, v, a, p + BPP, pal);
530             u1 += u;
531             v1 += v;
532             a1 += a;
533             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
534             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
535             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
536             cb++;
537             cr++;
538             p += 2 * BPP;
539             lum += 2;
540         }
541         if (w) {
542             YUVA_IN(y, u, v, a, p, pal);
543             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
545             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
546             p++;
547             lum++;
548         }
549         p += wrap3 - dstw * BPP;
550         lum += wrap - dstw - dstx;
551         cb += dst->linesize[1] - width2 - skip2;
552         cr += dst->linesize[2] - width2 - skip2;
553     }
554     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
555         lum += dstx;
556         cb += skip2;
557         cr += skip2;
558
559         if (dstx & 1) {
560             YUVA_IN(y, u, v, a, p, pal);
561             u1 = u;
562             v1 = v;
563             a1 = a;
564             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565             p += wrap3;
566             lum += wrap;
567             YUVA_IN(y, u, v, a, p, pal);
568             u1 += u;
569             v1 += v;
570             a1 += a;
571             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
573             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
574             cb++;
575             cr++;
576             p += -wrap3 + BPP;
577             lum += -wrap + 1;
578         }
579         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
580             YUVA_IN(y, u, v, a, p, pal);
581             u1 = u;
582             v1 = v;
583             a1 = a;
584             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585
586             YUVA_IN(y, u, v, a, p + BPP, pal);
587             u1 += u;
588             v1 += v;
589             a1 += a;
590             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
591             p += wrap3;
592             lum += wrap;
593
594             YUVA_IN(y, u, v, a, p, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599
600             YUVA_IN(y, u, v, a, p + BPP, pal);
601             u1 += u;
602             v1 += v;
603             a1 += a;
604             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
605
606             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
607             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
608
609             cb++;
610             cr++;
611             p += -wrap3 + 2 * BPP;
612             lum += -wrap + 2;
613         }
614         if (w) {
615             YUVA_IN(y, u, v, a, p, pal);
616             u1 = u;
617             v1 = v;
618             a1 = a;
619             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620             p += wrap3;
621             lum += wrap;
622             YUVA_IN(y, u, v, a, p, pal);
623             u1 += u;
624             v1 += v;
625             a1 += a;
626             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
628             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
629             cb++;
630             cr++;
631             p += -wrap3 + BPP;
632             lum += -wrap + 1;
633         }
634         p += wrap3 + (wrap3 - dstw * BPP);
635         lum += wrap + (wrap - dstw - dstx);
636         cb += dst->linesize[1] - width2 - skip2;
637         cr += dst->linesize[2] - width2 - skip2;
638     }
639     /* handle odd height */
640     if (h) {
641         lum += dstx;
642         cb += skip2;
643         cr += skip2;
644
645         if (dstx & 1) {
646             YUVA_IN(y, u, v, a, p, pal);
647             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
648             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
649             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
650             cb++;
651             cr++;
652             lum++;
653             p += BPP;
654         }
655         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
656             YUVA_IN(y, u, v, a, p, pal);
657             u1 = u;
658             v1 = v;
659             a1 = a;
660             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
661
662             YUVA_IN(y, u, v, a, p + BPP, pal);
663             u1 += u;
664             v1 += v;
665             a1 += a;
666             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
667             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
668             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
669             cb++;
670             cr++;
671             p += 2 * BPP;
672             lum += 2;
673         }
674         if (w) {
675             YUVA_IN(y, u, v, a, p, pal);
676             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
677             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
678             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
679         }
680     }
681 }
682
683 static void free_subpicture(SubPicture *sp)
684 {
685     avsubtitle_free(&sp->sub);
686 }
687
688 static void video_image_display(VideoState *is)
689 {
690     VideoPicture *vp;
691     SubPicture *sp;
692     AVPicture pict;
693     float aspect_ratio;
694     int width, height, x, y;
695     SDL_Rect rect;
696     int i;
697
698     vp = &is->pictq[is->pictq_rindex];
699     if (vp->bmp) {
700         if (vp->sample_aspect_ratio.num == 0)
701             aspect_ratio = 0;
702         else
703             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
704
705         if (aspect_ratio <= 0.0)
706             aspect_ratio = 1.0;
707         aspect_ratio *= (float)vp->width / (float)vp->height;
708
709         if (is->subtitle_st) {
710             if (is->subpq_size > 0) {
711                 sp = &is->subpq[is->subpq_rindex];
712
713                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
714                     SDL_LockYUVOverlay (vp->bmp);
715
716                     pict.data[0] = vp->bmp->pixels[0];
717                     pict.data[1] = vp->bmp->pixels[2];
718                     pict.data[2] = vp->bmp->pixels[1];
719
720                     pict.linesize[0] = vp->bmp->pitches[0];
721                     pict.linesize[1] = vp->bmp->pitches[2];
722                     pict.linesize[2] = vp->bmp->pitches[1];
723
724                     for (i = 0; i < sp->sub.num_rects; i++)
725                         blend_subrect(&pict, sp->sub.rects[i],
726                                       vp->bmp->w, vp->bmp->h);
727
728                     SDL_UnlockYUVOverlay (vp->bmp);
729                 }
730             }
731         }
732
733
734         /* XXX: we suppose the screen has a 1.0 pixel ratio */
735         height = is->height;
736         width = ((int)rint(height * aspect_ratio)) & ~1;
737         if (width > is->width) {
738             width = is->width;
739             height = ((int)rint(width / aspect_ratio)) & ~1;
740         }
741         x = (is->width - width) / 2;
742         y = (is->height - height) / 2;
743         is->no_background = 0;
744         rect.x = is->xleft + x;
745         rect.y = is->ytop  + y;
746         rect.w = FFMAX(width,  1);
747         rect.h = FFMAX(height, 1);
748         SDL_DisplayYUVOverlay(vp->bmp, &rect);
749     }
750 }
751
752 static inline int compute_mod(int a, int b)
753 {
754     return a < 0 ? a%b + b : a%b;
755 }
756
757 static void video_audio_display(VideoState *s)
758 {
759     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
760     int ch, channels, h, h2, bgcolor, fgcolor;
761     int16_t time_diff;
762     int rdft_bits, nb_freq;
763
764     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
765         ;
766     nb_freq = 1 << (rdft_bits - 1);
767
768     /* compute display index : center on currently output samples */
769     channels = s->audio_tgt.channels;
770     nb_display_channels = channels;
771     if (!s->paused) {
772         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
773         n = 2 * channels;
774         delay = s->audio_write_buf_size;
775         delay /= n;
776
777         /* to be more precise, we take into account the time spent since
778            the last buffer computation */
779         if (audio_callback_time) {
780             time_diff = av_gettime() - audio_callback_time;
781             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
782         }
783
784         delay += 2 * data_used;
785         if (delay < data_used)
786             delay = data_used;
787
788         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
789         if (s->show_mode == SHOW_MODE_WAVES) {
790             h = INT_MIN;
791             for (i = 0; i < 1000; i += channels) {
792                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
793                 int a = s->sample_array[idx];
794                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
795                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
796                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
797                 int score = a - d;
798                 if (h < score && (b ^ c) < 0) {
799                     h = score;
800                     i_start = idx;
801                 }
802             }
803         }
804
805         s->last_i_start = i_start;
806     } else {
807         i_start = s->last_i_start;
808     }
809
810     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
811     if (s->show_mode == SHOW_MODE_WAVES) {
812         fill_rectangle(screen,
813                        s->xleft, s->ytop, s->width, s->height,
814                        bgcolor);
815
816         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
817
818         /* total height for one channel */
819         h = s->height / nb_display_channels;
820         /* graph height / 2 */
821         h2 = (h * 9) / 20;
822         for (ch = 0; ch < nb_display_channels; ch++) {
823             i = i_start + ch;
824             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
825             for (x = 0; x < s->width; x++) {
826                 y = (s->sample_array[i] * h2) >> 15;
827                 if (y < 0) {
828                     y = -y;
829                     ys = y1 - y;
830                 } else {
831                     ys = y1;
832                 }
833                 fill_rectangle(screen,
834                                s->xleft + x, ys, 1, y,
835                                fgcolor);
836                 i += channels;
837                 if (i >= SAMPLE_ARRAY_SIZE)
838                     i -= SAMPLE_ARRAY_SIZE;
839             }
840         }
841
842         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
843
844         for (ch = 1; ch < nb_display_channels; ch++) {
845             y = s->ytop + ch * h;
846             fill_rectangle(screen,
847                            s->xleft, y, s->width, 1,
848                            fgcolor);
849         }
850         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
851     } else {
852         nb_display_channels= FFMIN(nb_display_channels, 2);
853         if (rdft_bits != s->rdft_bits) {
854             av_rdft_end(s->rdft);
855             av_free(s->rdft_data);
856             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
857             s->rdft_bits = rdft_bits;
858             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
859         }
860         {
861             FFTSample *data[2];
862             for (ch = 0; ch < nb_display_channels; ch++) {
863                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
864                 i = i_start + ch;
865                 for (x = 0; x < 2 * nb_freq; x++) {
866                     double w = (x-nb_freq) * (1.0 / nb_freq);
867                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
868                     i += channels;
869                     if (i >= SAMPLE_ARRAY_SIZE)
870                         i -= SAMPLE_ARRAY_SIZE;
871                 }
872                 av_rdft_calc(s->rdft, data[ch]);
873             }
874             // least efficient way to do this, we should of course directly access it but its more than fast enough
875             for (y = 0; y < s->height; y++) {
876                 double w = 1 / sqrt(nb_freq);
877                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
878                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
879                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
880                 a = FFMIN(a, 255);
881                 b = FFMIN(b, 255);
882                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
883
884                 fill_rectangle(screen,
885                             s->xpos, s->height-y, 1, 1,
886                             fgcolor);
887             }
888         }
889         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
890         if (!s->paused)
891             s->xpos++;
892         if (s->xpos >= s->width)
893             s->xpos= s->xleft;
894     }
895 }
896
897 static void stream_close(VideoState *is)
898 {
899     VideoPicture *vp;
900     int i;
901     /* XXX: use a special url_shutdown call to abort parse cleanly */
902     is->abort_request = 1;
903     SDL_WaitThread(is->read_tid, NULL);
904     SDL_WaitThread(is->refresh_tid, NULL);
905     packet_queue_destroy(&is->videoq);
906     packet_queue_destroy(&is->audioq);
907     packet_queue_destroy(&is->subtitleq);
908
909     /* free all pictures */
910     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
911         vp = &is->pictq[i];
912 #if CONFIG_AVFILTER
913         avfilter_unref_bufferp(&vp->picref);
914 #endif
915         if (vp->bmp) {
916             SDL_FreeYUVOverlay(vp->bmp);
917             vp->bmp = NULL;
918         }
919     }
920     SDL_DestroyMutex(is->pictq_mutex);
921     SDL_DestroyCond(is->pictq_cond);
922     SDL_DestroyMutex(is->subpq_mutex);
923     SDL_DestroyCond(is->subpq_cond);
924 #if !CONFIG_AVFILTER
925     if (is->img_convert_ctx)
926         sws_freeContext(is->img_convert_ctx);
927 #endif
928     av_free(is);
929 }
930
931 static void do_exit(VideoState *is)
932 {
933     if (is) {
934         stream_close(is);
935     }
936     av_lockmgr_register(NULL);
937     uninit_opts();
938 #if CONFIG_AVFILTER
939     avfilter_uninit();
940 #endif
941     avformat_network_deinit();
942     if (show_status)
943         printf("\n");
944     SDL_Quit();
945     av_log(NULL, AV_LOG_QUIET, "%s", "");
946     exit(0);
947 }
948
949 static void sigterm_handler(int sig)
950 {
951     exit(123);
952 }
953
954 static int video_open(VideoState *is, int force_set_video_mode)
955 {
956     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
957     int w,h;
958     VideoPicture *vp = &is->pictq[is->pictq_rindex];
959
960     if (is_full_screen) flags |= SDL_FULLSCREEN;
961     else                flags |= SDL_RESIZABLE;
962
963     if (is_full_screen && fs_screen_width) {
964         w = fs_screen_width;
965         h = fs_screen_height;
966     } else if (!is_full_screen && screen_width) {
967         w = screen_width;
968         h = screen_height;
969     } else if (vp->width) {
970         w = vp->width;
971         h = vp->height;
972     } else {
973         w = 640;
974         h = 480;
975     }
976     if (screen && is->width == screen->w && screen->w == w
977        && is->height== screen->h && screen->h == h && !force_set_video_mode)
978         return 0;
979     screen = SDL_SetVideoMode(w, h, 0, flags);
980     if (!screen) {
981         fprintf(stderr, "SDL: could not set video mode - exiting\n");
982         do_exit(is);
983     }
984     if (!window_title)
985         window_title = input_filename;
986     SDL_WM_SetCaption(window_title, window_title);
987
988     is->width  = screen->w;
989     is->height = screen->h;
990
991     return 0;
992 }
993
994 /* display the current picture, if any */
995 static void video_display(VideoState *is)
996 {
997     if (!screen)
998         video_open(is, 0);
999     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1000         video_audio_display(is);
1001     else if (is->video_st)
1002         video_image_display(is);
1003 }
1004
1005 static int refresh_thread(void *opaque)
1006 {
1007     VideoState *is= opaque;
1008     while (!is->abort_request) {
1009         SDL_Event event;
1010         event.type = FF_REFRESH_EVENT;
1011         event.user.data1 = opaque;
1012         if (!is->refresh && (!is->paused || is->force_refresh)) {
1013             is->refresh = 1;
1014             SDL_PushEvent(&event);
1015         }
1016         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1017         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1018     }
1019     return 0;
1020 }
1021
1022 /* get the current audio clock value */
1023 static double get_audio_clock(VideoState *is)
1024 {
1025     if (is->paused) {
1026         return is->audio_current_pts;
1027     } else {
1028         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1029     }
1030 }
1031
1032 /* get the current video clock value */
1033 static double get_video_clock(VideoState *is)
1034 {
1035     if (is->paused) {
1036         return is->video_current_pts;
1037     } else {
1038         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1039     }
1040 }
1041
1042 /* get the current external clock value */
1043 static double get_external_clock(VideoState *is)
1044 {
1045     int64_t ti;
1046     ti = av_gettime();
1047     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1048 }
1049
1050 /* get the current master clock value */
1051 static double get_master_clock(VideoState *is)
1052 {
1053     double val;
1054
1055     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1056         if (is->video_st)
1057             val = get_video_clock(is);
1058         else
1059             val = get_audio_clock(is);
1060     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1061         if (is->audio_st)
1062             val = get_audio_clock(is);
1063         else
1064             val = get_video_clock(is);
1065     } else {
1066         val = get_external_clock(is);
1067     }
1068     return val;
1069 }
1070
1071 /* seek in the stream */
1072 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1073 {
1074     if (!is->seek_req) {
1075         is->seek_pos = pos;
1076         is->seek_rel = rel;
1077         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1078         if (seek_by_bytes)
1079             is->seek_flags |= AVSEEK_FLAG_BYTE;
1080         is->seek_req = 1;
1081     }
1082 }
1083
1084 /* pause or resume the video */
1085 static void stream_toggle_pause(VideoState *is)
1086 {
1087     if (is->paused) {
1088         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1089         if (is->read_pause_return != AVERROR(ENOSYS)) {
1090             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1091         }
1092         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1093     }
1094     is->paused = !is->paused;
1095 }
1096
1097 static double compute_target_delay(double delay, VideoState *is)
1098 {
1099     double sync_threshold, diff;
1100
1101     /* update delay to follow master synchronisation source */
1102     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1103          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1104         /* if video is slave, we try to correct big delays by
1105            duplicating or deleting a frame */
1106         diff = get_video_clock(is) - get_master_clock(is);
1107
1108         /* skip or repeat frame. We take into account the
1109            delay to compute the threshold. I still don't know
1110            if it is the best guess */
1111         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1112         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1113             if (diff <= -sync_threshold)
1114                 delay = 0;
1115             else if (diff >= sync_threshold)
1116                 delay = 2 * delay;
1117         }
1118     }
1119
1120     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1121             delay, -diff);
1122
1123     return delay;
1124 }
1125
1126 static void pictq_next_picture(VideoState *is) {
1127     /* update queue size and signal for next picture */
1128     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1129         is->pictq_rindex = 0;
1130
1131     SDL_LockMutex(is->pictq_mutex);
1132     is->pictq_size--;
1133     SDL_CondSignal(is->pictq_cond);
1134     SDL_UnlockMutex(is->pictq_mutex);
1135 }
1136
1137 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1138     double time = av_gettime() / 1000000.0;
1139     /* update current video pts */
1140     is->video_current_pts = pts;
1141     is->video_current_pts_drift = is->video_current_pts - time;
1142     is->video_current_pos = pos;
1143     is->frame_last_pts = pts;
1144 }
1145
1146 /* called to display each frame */
1147 static void video_refresh(void *opaque)
1148 {
1149     VideoState *is = opaque;
1150     VideoPicture *vp;
1151     double time;
1152
1153     SubPicture *sp, *sp2;
1154
1155     if (is->video_st) {
1156 retry:
1157         if (is->pictq_size == 0) {
1158             SDL_LockMutex(is->pictq_mutex);
1159             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1160                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1161                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1162             }
1163             SDL_UnlockMutex(is->pictq_mutex);
1164             // nothing to do, no picture to display in the que
1165         } else {
1166             double last_duration, duration, delay;
1167             /* dequeue the picture */
1168             vp = &is->pictq[is->pictq_rindex];
1169
1170             if (vp->skip) {
1171                 pictq_next_picture(is);
1172                 goto retry;
1173             }
1174
1175             if (is->paused)
1176                 goto display;
1177
1178             /* compute nominal last_duration */
1179             last_duration = vp->pts - is->frame_last_pts;
1180             if (last_duration > 0 && last_duration < 10.0) {
1181                 /* if duration of the last frame was sane, update last_duration in video state */
1182                 is->frame_last_duration = last_duration;
1183             }
1184             delay = compute_target_delay(is->frame_last_duration, is);
1185
1186             time= av_gettime()/1000000.0;
1187             if (time < is->frame_timer + delay)
1188                 return;
1189
1190             if (delay > 0)
1191                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1192
1193             SDL_LockMutex(is->pictq_mutex);
1194             update_video_pts(is, vp->pts, vp->pos);
1195             SDL_UnlockMutex(is->pictq_mutex);
1196
1197             if (is->pictq_size > 1) {
1198                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1199                 duration = nextvp->pts - vp->pts;
1200                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1201                     is->frame_drops_late++;
1202                     pictq_next_picture(is);
1203                     goto retry;
1204                 }
1205             }
1206
1207             if (is->subtitle_st) {
1208                 if (is->subtitle_stream_changed) {
1209                     SDL_LockMutex(is->subpq_mutex);
1210
1211                     while (is->subpq_size) {
1212                         free_subpicture(&is->subpq[is->subpq_rindex]);
1213
1214                         /* update queue size and signal for next picture */
1215                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1216                             is->subpq_rindex = 0;
1217
1218                         is->subpq_size--;
1219                     }
1220                     is->subtitle_stream_changed = 0;
1221
1222                     SDL_CondSignal(is->subpq_cond);
1223                     SDL_UnlockMutex(is->subpq_mutex);
1224                 } else {
1225                     if (is->subpq_size > 0) {
1226                         sp = &is->subpq[is->subpq_rindex];
1227
1228                         if (is->subpq_size > 1)
1229                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1230                         else
1231                             sp2 = NULL;
1232
1233                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1234                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1235                         {
1236                             free_subpicture(sp);
1237
1238                             /* update queue size and signal for next picture */
1239                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1240                                 is->subpq_rindex = 0;
1241
1242                             SDL_LockMutex(is->subpq_mutex);
1243                             is->subpq_size--;
1244                             SDL_CondSignal(is->subpq_cond);
1245                             SDL_UnlockMutex(is->subpq_mutex);
1246                         }
1247                     }
1248                 }
1249             }
1250
1251 display:
1252             /* display picture */
1253             if (!display_disable)
1254                 video_display(is);
1255
1256             if (!is->paused)
1257                 pictq_next_picture(is);
1258         }
1259     } else if (is->audio_st) {
1260         /* draw the next audio frame */
1261
1262         /* if only audio stream, then display the audio bars (better
1263            than nothing, just to test the implementation */
1264
1265         /* display picture */
1266         if (!display_disable)
1267             video_display(is);
1268     }
1269     is->force_refresh = 0;
1270     if (show_status) {
1271         static int64_t last_time;
1272         int64_t cur_time;
1273         int aqsize, vqsize, sqsize;
1274         double av_diff;
1275
1276         cur_time = av_gettime();
1277         if (!last_time || (cur_time - last_time) >= 30000) {
1278             aqsize = 0;
1279             vqsize = 0;
1280             sqsize = 0;
1281             if (is->audio_st)
1282                 aqsize = is->audioq.size;
1283             if (is->video_st)
1284                 vqsize = is->videoq.size;
1285             if (is->subtitle_st)
1286                 sqsize = is->subtitleq.size;
1287             av_diff = 0;
1288             if (is->audio_st && is->video_st)
1289                 av_diff = get_audio_clock(is) - get_video_clock(is);
1290             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1291                    get_master_clock(is),
1292                    av_diff,
1293                    is->frame_drops_early + is->frame_drops_late,
1294                    aqsize / 1024,
1295                    vqsize / 1024,
1296                    sqsize,
1297                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1298                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1299             fflush(stdout);
1300             last_time = cur_time;
1301         }
1302     }
1303 }
1304
1305 /* allocate a picture (needs to do that in main thread to avoid
1306    potential locking problems */
1307 static void alloc_picture(AllocEventProps *event_props)
1308 {
1309     VideoState *is = event_props->is;
1310     AVFrame *frame = event_props->frame;
1311     VideoPicture *vp;
1312
1313     vp = &is->pictq[is->pictq_windex];
1314
1315     if (vp->bmp)
1316         SDL_FreeYUVOverlay(vp->bmp);
1317
1318 #if CONFIG_AVFILTER
1319     avfilter_unref_bufferp(&vp->picref);
1320 #endif
1321
1322     vp->width   = frame->width;
1323     vp->height  = frame->height;
1324
1325     video_open(event_props->is, 0);
1326
1327     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1328                                    SDL_YV12_OVERLAY,
1329                                    screen);
1330     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1331         /* SDL allocates a buffer smaller than requested if the video
1332          * overlay hardware is unable to support the requested size. */
1333         fprintf(stderr, "Error: the video system does not support an image\n"
1334                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1335                         "to reduce the image size.\n", vp->width, vp->height );
1336         do_exit(is);
1337     }
1338
1339     SDL_LockMutex(is->pictq_mutex);
1340     vp->allocated = 1;
1341     SDL_CondSignal(is->pictq_cond);
1342     SDL_UnlockMutex(is->pictq_mutex);
1343 }
1344
1345 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1346 {
1347     VideoPicture *vp;
1348     double frame_delay, pts = pts1;
1349
1350     /* compute the exact PTS for the picture if it is omitted in the stream
1351      * pts1 is the dts of the pkt / pts of the frame */
1352     if (pts != 0) {
1353         /* update video clock with pts, if present */
1354         is->video_clock = pts;
1355     } else {
1356         pts = is->video_clock;
1357     }
1358     /* update video clock for next frame */
1359     frame_delay = av_q2d(is->video_st->codec->time_base);
1360     /* for MPEG2, the frame can be repeated, so we update the
1361        clock accordingly */
1362     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1363     is->video_clock += frame_delay;
1364
1365 #if defined(DEBUG_SYNC) && 0
1366     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1367            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1368 #endif
1369
1370     /* wait until we have space to put a new picture */
1371     SDL_LockMutex(is->pictq_mutex);
1372
1373     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1374            !is->videoq.abort_request) {
1375         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1376     }
1377     SDL_UnlockMutex(is->pictq_mutex);
1378
1379     if (is->videoq.abort_request)
1380         return -1;
1381
1382     vp = &is->pictq[is->pictq_windex];
1383
1384     /* alloc or resize hardware picture buffer */
1385     if (!vp->bmp || vp->reallocate ||
1386         vp->width  != src_frame->width ||
1387         vp->height != src_frame->height) {
1388         SDL_Event event;
1389         AllocEventProps event_props;
1390
1391         event_props.frame = src_frame;
1392         event_props.is = is;
1393
1394         vp->allocated  = 0;
1395         vp->reallocate = 0;
1396
1397         /* the allocation must be done in the main thread to avoid
1398            locking problems. We wait in this block for the event to complete,
1399            so we can pass a pointer to event_props to it. */
1400         event.type = FF_ALLOC_EVENT;
1401         event.user.data1 = &event_props;
1402         SDL_PushEvent(&event);
1403
1404         /* wait until the picture is allocated */
1405         SDL_LockMutex(is->pictq_mutex);
1406         while (!vp->allocated && !is->videoq.abort_request) {
1407             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408         }
1409         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1410         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1411             while (!vp->allocated) {
1412                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1413             }
1414         }
1415         SDL_UnlockMutex(is->pictq_mutex);
1416
1417         if (is->videoq.abort_request)
1418             return -1;
1419     }
1420
1421     /* if the frame is not skipped, then display it */
1422     if (vp->bmp) {
1423         AVPicture pict = { { 0 } };
1424 #if CONFIG_AVFILTER
1425         avfilter_unref_bufferp(&vp->picref);
1426         vp->picref = src_frame->opaque;
1427 #endif
1428
1429         /* get a pointer on the bitmap */
1430         SDL_LockYUVOverlay (vp->bmp);
1431
1432         pict.data[0] = vp->bmp->pixels[0];
1433         pict.data[1] = vp->bmp->pixels[2];
1434         pict.data[2] = vp->bmp->pixels[1];
1435
1436         pict.linesize[0] = vp->bmp->pitches[0];
1437         pict.linesize[1] = vp->bmp->pitches[2];
1438         pict.linesize[2] = vp->bmp->pitches[1];
1439
1440 #if CONFIG_AVFILTER
1441         // FIXME use direct rendering
1442         av_picture_copy(&pict, (AVPicture *)src_frame,
1443                         src_frame->format, vp->width, vp->height);
1444         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1445 #else
1446         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1447         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1448             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1449             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1450         if (is->img_convert_ctx == NULL) {
1451             fprintf(stderr, "Cannot initialize the conversion context\n");
1452             exit(1);
1453         }
1454         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1455                   0, vp->height, pict.data, pict.linesize);
1456         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1457 #endif
1458         /* update the bitmap content */
1459         SDL_UnlockYUVOverlay(vp->bmp);
1460
1461         vp->pts = pts;
1462         vp->pos = pos;
1463         vp->skip = 0;
1464
1465         /* now we can update the picture count */
1466         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1467             is->pictq_windex = 0;
1468         SDL_LockMutex(is->pictq_mutex);
1469         is->pictq_size++;
1470         SDL_UnlockMutex(is->pictq_mutex);
1471     }
1472     return 0;
1473 }
1474
1475 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1476 {
1477     int got_picture, i;
1478
1479     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1480         return -1;
1481
1482     if (pkt->data == flush_pkt.data) {
1483         avcodec_flush_buffers(is->video_st->codec);
1484
1485         SDL_LockMutex(is->pictq_mutex);
1486         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1487         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1488             is->pictq[i].skip = 1;
1489         }
1490         while (is->pictq_size && !is->videoq.abort_request) {
1491             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1492         }
1493         is->video_current_pos = -1;
1494         is->frame_last_pts = AV_NOPTS_VALUE;
1495         is->frame_last_duration = 0;
1496         is->frame_timer = (double)av_gettime() / 1000000.0;
1497         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1498         SDL_UnlockMutex(is->pictq_mutex);
1499
1500         return 0;
1501     }
1502
1503     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1504         return 0;
1505
1506     if (got_picture) {
1507         int ret = 1;
1508
1509         if (decoder_reorder_pts == -1) {
1510             *pts = av_frame_get_best_effort_timestamp(frame);
1511         } else if (decoder_reorder_pts) {
1512             *pts = frame->pkt_pts;
1513         } else {
1514             *pts = frame->pkt_dts;
1515         }
1516
1517         if (*pts == AV_NOPTS_VALUE) {
1518             *pts = 0;
1519         }
1520
1521         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1522              (framedrop>0 || (framedrop && is->audio_st))) {
1523             SDL_LockMutex(is->pictq_mutex);
1524             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1525                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1526                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1527                 double ptsdiff = dpts - is->frame_last_pts;
1528                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1529                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1530                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1531                     is->frame_last_dropped_pos = pkt->pos;
1532                     is->frame_last_dropped_pts = dpts;
1533                     is->frame_drops_early++;
1534                     ret = 0;
1535                 }
1536             }
1537             SDL_UnlockMutex(is->pictq_mutex);
1538         }
1539
1540         return ret;
1541     }
1542     return 0;
1543 }
1544
1545 #if CONFIG_AVFILTER
1546 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1547                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1548 {
1549     int ret;
1550     AVFilterInOut *outputs = NULL, *inputs = NULL;
1551
1552     if (filtergraph) {
1553         outputs = avfilter_inout_alloc();
1554         inputs  = avfilter_inout_alloc();
1555         if (!outputs || !inputs) {
1556             ret = AVERROR(ENOMEM);
1557             goto fail;
1558         }
1559
1560         outputs->name       = av_strdup("in");
1561         outputs->filter_ctx = source_ctx;
1562         outputs->pad_idx    = 0;
1563         outputs->next       = NULL;
1564
1565         inputs->name        = av_strdup("out");
1566         inputs->filter_ctx  = sink_ctx;
1567         inputs->pad_idx     = 0;
1568         inputs->next        = NULL;
1569
1570         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1571             goto fail;
1572     } else {
1573         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1574             goto fail;
1575     }
1576
1577     return avfilter_graph_config(graph, NULL);
1578 fail:
1579     avfilter_inout_free(&outputs);
1580     avfilter_inout_free(&inputs);
1581     return ret;
1582 }
1583
1584 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1585 {
1586     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1587     char sws_flags_str[128];
1588     char buffersrc_args[256];
1589     int ret;
1590     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1591     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1592     AVCodecContext *codec = is->video_st->codec;
1593
1594     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1595     graph->scale_sws_opts = av_strdup(sws_flags_str);
1596
1597     snprintf(buffersrc_args, sizeof(buffersrc_args),
1598              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1599              codec->width, codec->height, codec->pix_fmt,
1600              is->video_st->time_base.num, is->video_st->time_base.den,
1601              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1602
1603     if ((ret = avfilter_graph_create_filter(&filt_src,
1604                                             avfilter_get_by_name("buffer"),
1605                                             "ffplay_buffer", buffersrc_args, NULL,
1606                                             graph)) < 0)
1607         return ret;
1608
1609     buffersink_params->pixel_fmts = pix_fmts;
1610     ret = avfilter_graph_create_filter(&filt_out,
1611                                        avfilter_get_by_name("buffersink"),
1612                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1613     av_freep(&buffersink_params);
1614     if (ret < 0)
1615         return ret;
1616
1617     if ((ret = avfilter_graph_create_filter(&filt_format,
1618                                             avfilter_get_by_name("format"),
1619                                             "format", "yuv420p", NULL, graph)) < 0)
1620         return ret;
1621     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1622         return ret;
1623
1624     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_format)) < 0)
1625         return ret;
1626
1627     is->in_video_filter  = filt_src;
1628     is->out_video_filter = filt_out;
1629
1630     return ret;
1631 }
1632
1633 #endif  /* CONFIG_AVFILTER */
1634
1635 static int video_thread(void *arg)
1636 {
1637     AVPacket pkt = { 0 };
1638     VideoState *is = arg;
1639     AVFrame *frame = avcodec_alloc_frame();
1640     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1641     double pts;
1642     int ret;
1643
1644 #if CONFIG_AVFILTER
1645     AVCodecContext *codec = is->video_st->codec;
1646     AVFilterGraph *graph = avfilter_graph_alloc();
1647     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1648     int last_w = 0;
1649     int last_h = 0;
1650     enum PixelFormat last_format = -2;
1651
1652     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1653         is->use_dr1 = 1;
1654         codec->get_buffer     = codec_get_buffer;
1655         codec->release_buffer = codec_release_buffer;
1656         codec->opaque         = &is->buffer_pool;
1657     }
1658 #endif
1659
1660     for (;;) {
1661 #if CONFIG_AVFILTER
1662         AVFilterBufferRef *picref;
1663         AVRational tb;
1664 #endif
1665         while (is->paused && !is->videoq.abort_request)
1666             SDL_Delay(10);
1667
1668         avcodec_get_frame_defaults(frame);
1669         av_free_packet(&pkt);
1670
1671         ret = get_video_frame(is, frame, &pts_int, &pkt);
1672         if (ret < 0)
1673             goto the_end;
1674
1675         if (!ret)
1676             continue;
1677
1678 #if CONFIG_AVFILTER
1679         if (   last_w != is->video_st->codec->width
1680             || last_h != is->video_st->codec->height
1681             || last_format != is->video_st->codec->pix_fmt) {
1682             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1683                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1684             avfilter_graph_free(&graph);
1685             graph = avfilter_graph_alloc();
1686             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1687                 SDL_Event event;
1688                 event.type = FF_QUIT_EVENT;
1689                 event.user.data1 = is;
1690                 SDL_PushEvent(&event);
1691                 av_free_packet(&pkt);
1692                 goto the_end;
1693             }
1694             filt_in  = is->in_video_filter;
1695             filt_out = is->out_video_filter;
1696             last_w = is->video_st->codec->width;
1697             last_h = is->video_st->codec->height;
1698             last_format = is->video_st->codec->pix_fmt;
1699         }
1700
1701         frame->pts = pts_int;
1702         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1703         if (is->use_dr1 && frame->opaque) {
1704             FrameBuffer      *buf = frame->opaque;
1705             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1706                                         frame->data, frame->linesize,
1707                                         AV_PERM_READ | AV_PERM_PRESERVE,
1708                                         frame->width, frame->height,
1709                                         frame->format);
1710
1711             avfilter_copy_frame_props(fb, frame);
1712             fb->buf->priv           = buf;
1713             fb->buf->free           = filter_release_buffer;
1714
1715             buf->refcount++;
1716             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1717
1718         } else
1719             av_buffersrc_write_frame(filt_in, frame);
1720
1721         av_free_packet(&pkt);
1722
1723         while (ret >= 0) {
1724             is->frame_last_returned_time = av_gettime() / 1000000.0;
1725
1726             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1727             if (ret < 0) {
1728                 ret = 0;
1729                 break;
1730             }
1731
1732             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1733             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1734                 is->frame_last_filter_delay = 0;
1735
1736             avfilter_copy_buf_props(frame, picref);
1737
1738             pts_int = picref->pts;
1739             tb      = filt_out->inputs[0]->time_base;
1740             pos     = picref->pos;
1741             frame->opaque = picref;
1742
1743             if (av_cmp_q(tb, is->video_st->time_base)) {
1744                 av_unused int64_t pts1 = pts_int;
1745                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1746                 av_dlog(NULL, "video_thread(): "
1747                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1748                         tb.num, tb.den, pts1,
1749                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1750             }
1751             pts = pts_int * av_q2d(is->video_st->time_base);
1752             ret = queue_picture(is, frame, pts, pos);
1753         }
1754 #else
1755         pts = pts_int * av_q2d(is->video_st->time_base);
1756         ret = queue_picture(is, frame, pts, pkt.pos);
1757 #endif
1758
1759         if (ret < 0)
1760             goto the_end;
1761
1762         if (is->step)
1763             stream_toggle_pause(is);
1764     }
1765  the_end:
1766     avcodec_flush_buffers(is->video_st->codec);
1767 #if CONFIG_AVFILTER
1768     av_freep(&vfilters);
1769     avfilter_graph_free(&graph);
1770 #endif
1771     av_free_packet(&pkt);
1772     av_free(frame);
1773     return 0;
1774 }
1775
1776 static int subtitle_thread(void *arg)
1777 {
1778     VideoState *is = arg;
1779     SubPicture *sp;
1780     AVPacket pkt1, *pkt = &pkt1;
1781     int got_subtitle;
1782     double pts;
1783     int i, j;
1784     int r, g, b, y, u, v, a;
1785
1786     for (;;) {
1787         while (is->paused && !is->subtitleq.abort_request) {
1788             SDL_Delay(10);
1789         }
1790         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1791             break;
1792
1793         if (pkt->data == flush_pkt.data) {
1794             avcodec_flush_buffers(is->subtitle_st->codec);
1795             continue;
1796         }
1797         SDL_LockMutex(is->subpq_mutex);
1798         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1799                !is->subtitleq.abort_request) {
1800             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1801         }
1802         SDL_UnlockMutex(is->subpq_mutex);
1803
1804         if (is->subtitleq.abort_request)
1805             return 0;
1806
1807         sp = &is->subpq[is->subpq_windex];
1808
1809        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1810            this packet, if any */
1811         pts = 0;
1812         if (pkt->pts != AV_NOPTS_VALUE)
1813             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1814
1815         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1816                                  &got_subtitle, pkt);
1817
1818         if (got_subtitle && sp->sub.format == 0) {
1819             sp->pts = pts;
1820
1821             for (i = 0; i < sp->sub.num_rects; i++)
1822             {
1823                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1824                 {
1825                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1826                     y = RGB_TO_Y_CCIR(r, g, b);
1827                     u = RGB_TO_U_CCIR(r, g, b, 0);
1828                     v = RGB_TO_V_CCIR(r, g, b, 0);
1829                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1830                 }
1831             }
1832
1833             /* now we can update the picture count */
1834             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1835                 is->subpq_windex = 0;
1836             SDL_LockMutex(is->subpq_mutex);
1837             is->subpq_size++;
1838             SDL_UnlockMutex(is->subpq_mutex);
1839         }
1840         av_free_packet(pkt);
1841     }
1842     return 0;
1843 }
1844
1845 /* copy samples for viewing in editor window */
1846 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1847 {
1848     int size, len;
1849
1850     size = samples_size / sizeof(short);
1851     while (size > 0) {
1852         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1853         if (len > size)
1854             len = size;
1855         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1856         samples += len;
1857         is->sample_array_index += len;
1858         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1859             is->sample_array_index = 0;
1860         size -= len;
1861     }
1862 }
1863
1864 /* return the wanted number of samples to get better sync if sync_type is video
1865  * or external master clock */
1866 static int synchronize_audio(VideoState *is, int nb_samples)
1867 {
1868     int wanted_nb_samples = nb_samples;
1869
1870     /* if not master, then we try to remove or add samples to correct the clock */
1871     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1872          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1873         double diff, avg_diff;
1874         int min_nb_samples, max_nb_samples;
1875
1876         diff = get_audio_clock(is) - get_master_clock(is);
1877
1878         if (diff < AV_NOSYNC_THRESHOLD) {
1879             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1880             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1881                 /* not enough measures to have a correct estimate */
1882                 is->audio_diff_avg_count++;
1883             } else {
1884                 /* estimate the A-V difference */
1885                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1886
1887                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1888                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1889                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1890                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1891                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1892                 }
1893                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1894                         diff, avg_diff, wanted_nb_samples - nb_samples,
1895                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1896             }
1897         } else {
1898             /* too big difference : may be initial PTS errors, so
1899                reset A-V filter */
1900             is->audio_diff_avg_count = 0;
1901             is->audio_diff_cum       = 0;
1902         }
1903     }
1904
1905     return wanted_nb_samples;
1906 }
1907
1908 /* decode one audio frame and returns its uncompressed size */
1909 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1910 {
1911     AVPacket *pkt_temp = &is->audio_pkt_temp;
1912     AVPacket *pkt = &is->audio_pkt;
1913     AVCodecContext *dec = is->audio_st->codec;
1914     int len1, len2, data_size, resampled_data_size;
1915     int64_t dec_channel_layout;
1916     int got_frame;
1917     double pts;
1918     int new_packet = 0;
1919     int flush_complete = 0;
1920     int wanted_nb_samples;
1921
1922     for (;;) {
1923         /* NOTE: the audio packet can contain several frames */
1924         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1925             if (!is->frame) {
1926                 if (!(is->frame = avcodec_alloc_frame()))
1927                     return AVERROR(ENOMEM);
1928             } else
1929                 avcodec_get_frame_defaults(is->frame);
1930
1931             if (is->paused)
1932                 return -1;
1933
1934             if (flush_complete)
1935                 break;
1936             new_packet = 0;
1937             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1938             if (len1 < 0) {
1939                 /* if error, we skip the frame */
1940                 pkt_temp->size = 0;
1941                 break;
1942             }
1943
1944             pkt_temp->data += len1;
1945             pkt_temp->size -= len1;
1946
1947             if (!got_frame) {
1948                 /* stop sending empty packets if the decoder is finished */
1949                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1950                     flush_complete = 1;
1951                 continue;
1952             }
1953             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1954                                                    is->frame->nb_samples,
1955                                                    dec->sample_fmt, 1);
1956
1957             dec_channel_layout =
1958                 (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ?
1959                 dec->channel_layout : av_get_default_channel_layout(dec->channels);
1960             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
1961
1962             if (dec->sample_fmt    != is->audio_src.fmt            ||
1963                 dec_channel_layout != is->audio_src.channel_layout ||
1964                 dec->sample_rate   != is->audio_src.freq           ||
1965                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
1966                 swr_free(&is->swr_ctx);
1967                 is->swr_ctx = swr_alloc_set_opts(NULL,
1968                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
1969                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
1970                                                  0, NULL);
1971                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
1972                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
1973                         dec->sample_rate,   av_get_sample_fmt_name(dec->sample_fmt),   dec->channels,
1974                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
1975                     break;
1976                 }
1977                 is->audio_src.channel_layout = dec_channel_layout;
1978                 is->audio_src.channels = dec->channels;
1979                 is->audio_src.freq = dec->sample_rate;
1980                 is->audio_src.fmt = dec->sample_fmt;
1981             }
1982
1983             if (is->swr_ctx) {
1984                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
1985                 uint8_t *out[] = {is->audio_buf2};
1986                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
1987                 if (wanted_nb_samples != is->frame->nb_samples) {
1988                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
1989                                                 wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
1990                         fprintf(stderr, "swr_set_compensation() failed\n");
1991                         break;
1992                     }
1993                 }
1994                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
1995                 if (len2 < 0) {
1996                     fprintf(stderr, "swr_convert() failed\n");
1997                     break;
1998                 }
1999                 if (len2 == out_count) {
2000                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2001                     swr_init(is->swr_ctx);
2002                 }
2003                 is->audio_buf = is->audio_buf2;
2004                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2005             } else {
2006                 is->audio_buf = is->frame->data[0];
2007                 resampled_data_size = data_size;
2008             }
2009
2010             /* if no pts, then compute it */
2011             pts = is->audio_clock;
2012             *pts_ptr = pts;
2013             is->audio_clock += (double)data_size /
2014                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2015 #ifdef DEBUG
2016             {
2017                 static double last_clock;
2018                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2019                        is->audio_clock - last_clock,
2020                        is->audio_clock, pts);
2021                 last_clock = is->audio_clock;
2022             }
2023 #endif
2024             return resampled_data_size;
2025         }
2026
2027         /* free the current packet */
2028         if (pkt->data)
2029             av_free_packet(pkt);
2030         memset(pkt_temp, 0, sizeof(*pkt_temp));
2031
2032         if (is->paused || is->audioq.abort_request) {
2033             return -1;
2034         }
2035
2036         /* read next packet */
2037         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2038             return -1;
2039
2040         if (pkt->data == flush_pkt.data) {
2041             avcodec_flush_buffers(dec);
2042             flush_complete = 0;
2043         }
2044
2045         *pkt_temp = *pkt;
2046
2047         /* if update the audio clock with the pts */
2048         if (pkt->pts != AV_NOPTS_VALUE) {
2049             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2050         }
2051     }
2052 }
2053
2054 /* prepare a new audio buffer */
2055 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2056 {
2057     VideoState *is = opaque;
2058     int audio_size, len1;
2059     int bytes_per_sec;
2060     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2061     double pts;
2062
2063     audio_callback_time = av_gettime();
2064
2065     while (len > 0) {
2066         if (is->audio_buf_index >= is->audio_buf_size) {
2067            audio_size = audio_decode_frame(is, &pts);
2068            if (audio_size < 0) {
2069                 /* if error, just output silence */
2070                is->audio_buf      = is->silence_buf;
2071                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2072            } else {
2073                if (is->show_mode != SHOW_MODE_VIDEO)
2074                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2075                is->audio_buf_size = audio_size;
2076            }
2077            is->audio_buf_index = 0;
2078         }
2079         len1 = is->audio_buf_size - is->audio_buf_index;
2080         if (len1 > len)
2081             len1 = len;
2082         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2083         len -= len1;
2084         stream += len1;
2085         is->audio_buf_index += len1;
2086     }
2087     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2088     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2089     /* Let's assume the audio driver that is used by SDL has two periods. */
2090     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2091     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2092 }
2093
2094 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2095 {
2096     SDL_AudioSpec wanted_spec, spec;
2097     const char *env;
2098     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2099
2100     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2101     if (env) {
2102         wanted_nb_channels = atoi(env);
2103         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2104     }
2105     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2106         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2107         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2108     }
2109     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2110     wanted_spec.freq = wanted_sample_rate;
2111     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2112         fprintf(stderr, "Invalid sample rate or channel count!\n");
2113         return -1;
2114     }
2115     wanted_spec.format = AUDIO_S16SYS;
2116     wanted_spec.silence = 0;
2117     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2118     wanted_spec.callback = sdl_audio_callback;
2119     wanted_spec.userdata = opaque;
2120     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2121         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2122         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2123         if (!wanted_spec.channels) {
2124             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2125             return -1;
2126         }
2127         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2128     }
2129     if (spec.format != AUDIO_S16SYS) {
2130         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2131         return -1;
2132     }
2133     if (spec.channels != wanted_spec.channels) {
2134         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2135         if (!wanted_channel_layout) {
2136             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2137             return -1;
2138         }
2139     }
2140
2141     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2142     audio_hw_params->freq = spec.freq;
2143     audio_hw_params->channel_layout = wanted_channel_layout;
2144     audio_hw_params->channels =  spec.channels;
2145     return spec.size;
2146 }
2147
2148 /* open a given stream. Return 0 if OK */
2149 static int stream_component_open(VideoState *is, int stream_index)
2150 {
2151     AVFormatContext *ic = is->ic;
2152     AVCodecContext *avctx;
2153     AVCodec *codec;
2154     AVDictionary *opts;
2155     AVDictionaryEntry *t = NULL;
2156
2157     if (stream_index < 0 || stream_index >= ic->nb_streams)
2158         return -1;
2159     avctx = ic->streams[stream_index]->codec;
2160
2161     codec = avcodec_find_decoder(avctx->codec_id);
2162     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2163
2164     switch(avctx->codec_type){
2165         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2166         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2167         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2168     }
2169     if (!codec)
2170         return -1;
2171
2172     avctx->workaround_bugs   = workaround_bugs;
2173     avctx->lowres            = lowres;
2174     if(avctx->lowres > codec->max_lowres){
2175         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2176                 codec->max_lowres);
2177         avctx->lowres= codec->max_lowres;
2178     }
2179     avctx->idct_algo         = idct;
2180     avctx->skip_frame        = skip_frame;
2181     avctx->skip_idct         = skip_idct;
2182     avctx->skip_loop_filter  = skip_loop_filter;
2183     avctx->error_concealment = error_concealment;
2184
2185     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2186     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2187     if(codec->capabilities & CODEC_CAP_DR1)
2188         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2189
2190     if (!av_dict_get(opts, "threads", NULL, 0))
2191         av_dict_set(&opts, "threads", "auto", 0);
2192     if (!codec ||
2193         avcodec_open2(avctx, codec, &opts) < 0)
2194         return -1;
2195     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2196         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2197         return AVERROR_OPTION_NOT_FOUND;
2198     }
2199
2200     /* prepare audio output */
2201     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2202         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2203         if (audio_hw_buf_size < 0)
2204             return -1;
2205         is->audio_hw_buf_size = audio_hw_buf_size;
2206         is->audio_tgt = is->audio_src;
2207     }
2208
2209     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2210     switch (avctx->codec_type) {
2211     case AVMEDIA_TYPE_AUDIO:
2212         is->audio_stream = stream_index;
2213         is->audio_st = ic->streams[stream_index];
2214         is->audio_buf_size  = 0;
2215         is->audio_buf_index = 0;
2216
2217         /* init averaging filter */
2218         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2219         is->audio_diff_avg_count = 0;
2220         /* since we do not have a precise anough audio fifo fullness,
2221            we correct audio sync only if larger than this threshold */
2222         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2223
2224         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2225         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2226         packet_queue_start(&is->audioq);
2227         SDL_PauseAudio(0);
2228         break;
2229     case AVMEDIA_TYPE_VIDEO:
2230         is->video_stream = stream_index;
2231         is->video_st = ic->streams[stream_index];
2232
2233         packet_queue_start(&is->videoq);
2234         is->video_tid = SDL_CreateThread(video_thread, is);
2235         break;
2236     case AVMEDIA_TYPE_SUBTITLE:
2237         is->subtitle_stream = stream_index;
2238         is->subtitle_st = ic->streams[stream_index];
2239         packet_queue_start(&is->subtitleq);
2240
2241         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2242         break;
2243     default:
2244         break;
2245     }
2246     return 0;
2247 }
2248
2249 static void stream_component_close(VideoState *is, int stream_index)
2250 {
2251     AVFormatContext *ic = is->ic;
2252     AVCodecContext *avctx;
2253
2254     if (stream_index < 0 || stream_index >= ic->nb_streams)
2255         return;
2256     avctx = ic->streams[stream_index]->codec;
2257
2258     switch (avctx->codec_type) {
2259     case AVMEDIA_TYPE_AUDIO:
2260         packet_queue_abort(&is->audioq);
2261
2262         SDL_CloseAudio();
2263
2264         packet_queue_flush(&is->audioq);
2265         av_free_packet(&is->audio_pkt);
2266         swr_free(&is->swr_ctx);
2267         av_freep(&is->audio_buf1);
2268         is->audio_buf = NULL;
2269         av_freep(&is->frame);
2270
2271         if (is->rdft) {
2272             av_rdft_end(is->rdft);
2273             av_freep(&is->rdft_data);
2274             is->rdft = NULL;
2275             is->rdft_bits = 0;
2276         }
2277         break;
2278     case AVMEDIA_TYPE_VIDEO:
2279         packet_queue_abort(&is->videoq);
2280
2281         /* note: we also signal this mutex to make sure we deblock the
2282            video thread in all cases */
2283         SDL_LockMutex(is->pictq_mutex);
2284         SDL_CondSignal(is->pictq_cond);
2285         SDL_UnlockMutex(is->pictq_mutex);
2286
2287         SDL_WaitThread(is->video_tid, NULL);
2288
2289         packet_queue_flush(&is->videoq);
2290         break;
2291     case AVMEDIA_TYPE_SUBTITLE:
2292         packet_queue_abort(&is->subtitleq);
2293
2294         /* note: we also signal this mutex to make sure we deblock the
2295            video thread in all cases */
2296         SDL_LockMutex(is->subpq_mutex);
2297         is->subtitle_stream_changed = 1;
2298
2299         SDL_CondSignal(is->subpq_cond);
2300         SDL_UnlockMutex(is->subpq_mutex);
2301
2302         SDL_WaitThread(is->subtitle_tid, NULL);
2303
2304         packet_queue_flush(&is->subtitleq);
2305         break;
2306     default:
2307         break;
2308     }
2309
2310     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2311     avcodec_close(avctx);
2312 #if CONFIG_AVFILTER
2313     free_buffer_pool(&is->buffer_pool);
2314 #endif
2315     switch (avctx->codec_type) {
2316     case AVMEDIA_TYPE_AUDIO:
2317         is->audio_st = NULL;
2318         is->audio_stream = -1;
2319         break;
2320     case AVMEDIA_TYPE_VIDEO:
2321         is->video_st = NULL;
2322         is->video_stream = -1;
2323         break;
2324     case AVMEDIA_TYPE_SUBTITLE:
2325         is->subtitle_st = NULL;
2326         is->subtitle_stream = -1;
2327         break;
2328     default:
2329         break;
2330     }
2331 }
2332
2333 static int decode_interrupt_cb(void *ctx)
2334 {
2335     VideoState *is = ctx;
2336     return is->abort_request;
2337 }
2338
2339 /* this thread gets the stream from the disk or the network */
2340 static int read_thread(void *arg)
2341 {
2342     VideoState *is = arg;
2343     AVFormatContext *ic = NULL;
2344     int err, i, ret;
2345     int st_index[AVMEDIA_TYPE_NB];
2346     AVPacket pkt1, *pkt = &pkt1;
2347     int eof = 0;
2348     int pkt_in_play_range = 0;
2349     AVDictionaryEntry *t;
2350     AVDictionary **opts;
2351     int orig_nb_streams;
2352
2353     memset(st_index, -1, sizeof(st_index));
2354     is->last_video_stream = is->video_stream = -1;
2355     is->last_audio_stream = is->audio_stream = -1;
2356     is->last_subtitle_stream = is->subtitle_stream = -1;
2357
2358     ic = avformat_alloc_context();
2359     ic->interrupt_callback.callback = decode_interrupt_cb;
2360     ic->interrupt_callback.opaque = is;
2361     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2362     if (err < 0) {
2363         print_error(is->filename, err);
2364         ret = -1;
2365         goto fail;
2366     }
2367     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2368         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2369         ret = AVERROR_OPTION_NOT_FOUND;
2370         goto fail;
2371     }
2372     is->ic = ic;
2373
2374     if (genpts)
2375         ic->flags |= AVFMT_FLAG_GENPTS;
2376
2377     opts = setup_find_stream_info_opts(ic, codec_opts);
2378     orig_nb_streams = ic->nb_streams;
2379
2380     err = avformat_find_stream_info(ic, opts);
2381     if (err < 0) {
2382         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2383         ret = -1;
2384         goto fail;
2385     }
2386     for (i = 0; i < orig_nb_streams; i++)
2387         av_dict_free(&opts[i]);
2388     av_freep(&opts);
2389
2390     if (ic->pb)
2391         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2392
2393     if (seek_by_bytes < 0)
2394         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2395
2396     /* if seeking requested, we execute it */
2397     if (start_time != AV_NOPTS_VALUE) {
2398         int64_t timestamp;
2399
2400         timestamp = start_time;
2401         /* add the stream start time */
2402         if (ic->start_time != AV_NOPTS_VALUE)
2403             timestamp += ic->start_time;
2404         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2405         if (ret < 0) {
2406             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2407                     is->filename, (double)timestamp / AV_TIME_BASE);
2408         }
2409     }
2410
2411     for (i = 0; i < ic->nb_streams; i++)
2412         ic->streams[i]->discard = AVDISCARD_ALL;
2413     if (!video_disable)
2414         st_index[AVMEDIA_TYPE_VIDEO] =
2415             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2416                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2417     if (!audio_disable)
2418         st_index[AVMEDIA_TYPE_AUDIO] =
2419             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2420                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2421                                 st_index[AVMEDIA_TYPE_VIDEO],
2422                                 NULL, 0);
2423     if (!video_disable)
2424         st_index[AVMEDIA_TYPE_SUBTITLE] =
2425             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2426                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2427                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2428                                  st_index[AVMEDIA_TYPE_AUDIO] :
2429                                  st_index[AVMEDIA_TYPE_VIDEO]),
2430                                 NULL, 0);
2431     if (show_status) {
2432         av_dump_format(ic, 0, is->filename, 0);
2433     }
2434
2435     is->show_mode = show_mode;
2436
2437     /* open the streams */
2438     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2439         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2440     }
2441
2442     ret = -1;
2443     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2444         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2445     }
2446     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2447     if (is->show_mode == SHOW_MODE_NONE)
2448         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2449
2450     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2451         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2452     }
2453
2454     if (is->video_stream < 0 && is->audio_stream < 0) {
2455         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2456         ret = -1;
2457         goto fail;
2458     }
2459
2460     for (;;) {
2461         if (is->abort_request)
2462             break;
2463         if (is->paused != is->last_paused) {
2464             is->last_paused = is->paused;
2465             if (is->paused)
2466                 is->read_pause_return = av_read_pause(ic);
2467             else
2468                 av_read_play(ic);
2469         }
2470 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2471         if (is->paused &&
2472                 (!strcmp(ic->iformat->name, "rtsp") ||
2473                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2474             /* wait 10 ms to avoid trying to get another packet */
2475             /* XXX: horrible */
2476             SDL_Delay(10);
2477             continue;
2478         }
2479 #endif
2480         if (is->seek_req) {
2481             int64_t seek_target = is->seek_pos;
2482             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2483             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2484 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2485 //      of the seek_pos/seek_rel variables
2486
2487             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2488             if (ret < 0) {
2489                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2490             } else {
2491                 if (is->audio_stream >= 0) {
2492                     packet_queue_flush(&is->audioq);
2493                     packet_queue_put(&is->audioq, &flush_pkt);
2494                 }
2495                 if (is->subtitle_stream >= 0) {
2496                     packet_queue_flush(&is->subtitleq);
2497                     packet_queue_put(&is->subtitleq, &flush_pkt);
2498                 }
2499                 if (is->video_stream >= 0) {
2500                     packet_queue_flush(&is->videoq);
2501                     packet_queue_put(&is->videoq, &flush_pkt);
2502                 }
2503             }
2504             is->seek_req = 0;
2505             eof = 0;
2506         }
2507         if (is->que_attachments_req) {
2508             avformat_queue_attached_pictures(ic);
2509             is->que_attachments_req = 0;
2510         }
2511
2512         /* if the queue are full, no need to read more */
2513         if (!infinite_buffer &&
2514               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2515             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2516                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2517                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2518             /* wait 10 ms */
2519             SDL_Delay(10);
2520             continue;
2521         }
2522         if (eof) {
2523             if (is->video_stream >= 0) {
2524                 av_init_packet(pkt);
2525                 pkt->data = NULL;
2526                 pkt->size = 0;
2527                 pkt->stream_index = is->video_stream;
2528                 packet_queue_put(&is->videoq, pkt);
2529             }
2530             if (is->audio_stream >= 0 &&
2531                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2532                 av_init_packet(pkt);
2533                 pkt->data = NULL;
2534                 pkt->size = 0;
2535                 pkt->stream_index = is->audio_stream;
2536                 packet_queue_put(&is->audioq, pkt);
2537             }
2538             SDL_Delay(10);
2539             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2540                 if (loop != 1 && (!loop || --loop)) {
2541                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2542                 } else if (autoexit) {
2543                     ret = AVERROR_EOF;
2544                     goto fail;
2545                 }
2546             }
2547             eof=0;
2548             continue;
2549         }
2550         ret = av_read_frame(ic, pkt);
2551         if (ret < 0) {
2552             if (ret == AVERROR_EOF || url_feof(ic->pb))
2553                 eof = 1;
2554             if (ic->pb && ic->pb->error)
2555                 break;
2556             SDL_Delay(100); /* wait for user event */
2557             continue;
2558         }
2559         /* check if packet is in play range specified by user, then queue, otherwise discard */
2560         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2561                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2562                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2563                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2564                 <= ((double)duration / 1000000);
2565         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2566             packet_queue_put(&is->audioq, pkt);
2567         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2568             packet_queue_put(&is->videoq, pkt);
2569         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2570             packet_queue_put(&is->subtitleq, pkt);
2571         } else {
2572             av_free_packet(pkt);
2573         }
2574     }
2575     /* wait until the end */
2576     while (!is->abort_request) {
2577         SDL_Delay(100);
2578     }
2579
2580     ret = 0;
2581  fail:
2582     /* close each stream */
2583     if (is->audio_stream >= 0)
2584         stream_component_close(is, is->audio_stream);
2585     if (is->video_stream >= 0)
2586         stream_component_close(is, is->video_stream);
2587     if (is->subtitle_stream >= 0)
2588         stream_component_close(is, is->subtitle_stream);
2589     if (is->ic) {
2590         avformat_close_input(&is->ic);
2591     }
2592
2593     if (ret != 0) {
2594         SDL_Event event;
2595
2596         event.type = FF_QUIT_EVENT;
2597         event.user.data1 = is;
2598         SDL_PushEvent(&event);
2599     }
2600     return 0;
2601 }
2602
2603 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2604 {
2605     VideoState *is;
2606
2607     is = av_mallocz(sizeof(VideoState));
2608     if (!is)
2609         return NULL;
2610     av_strlcpy(is->filename, filename, sizeof(is->filename));
2611     is->iformat = iformat;
2612     is->ytop    = 0;
2613     is->xleft   = 0;
2614
2615     /* start video display */
2616     is->pictq_mutex = SDL_CreateMutex();
2617     is->pictq_cond  = SDL_CreateCond();
2618
2619     is->subpq_mutex = SDL_CreateMutex();
2620     is->subpq_cond  = SDL_CreateCond();
2621
2622     packet_queue_init(&is->videoq);
2623     packet_queue_init(&is->audioq);
2624     packet_queue_init(&is->subtitleq);
2625
2626     is->av_sync_type = av_sync_type;
2627     is->read_tid     = SDL_CreateThread(read_thread, is);
2628     if (!is->read_tid) {
2629         av_free(is);
2630         return NULL;
2631     }
2632     return is;
2633 }
2634
2635 static void stream_cycle_channel(VideoState *is, int codec_type)
2636 {
2637     AVFormatContext *ic = is->ic;
2638     int start_index, stream_index;
2639     int old_index;
2640     AVStream *st;
2641
2642     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2643         start_index = is->last_video_stream;
2644         old_index = is->video_stream;
2645     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2646         start_index = is->last_audio_stream;
2647         old_index = is->audio_stream;
2648     } else {
2649         start_index = is->last_subtitle_stream;
2650         old_index = is->subtitle_stream;
2651     }
2652     stream_index = start_index;
2653     for (;;) {
2654         if (++stream_index >= is->ic->nb_streams)
2655         {
2656             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2657             {
2658                 stream_index = -1;
2659                 is->last_subtitle_stream = -1;
2660                 goto the_end;
2661             }
2662             if (start_index == -1)
2663                 return;
2664             stream_index = 0;
2665         }
2666         if (stream_index == start_index)
2667             return;
2668         st = ic->streams[stream_index];
2669         if (st->codec->codec_type == codec_type) {
2670             /* check that parameters are OK */
2671             switch (codec_type) {
2672             case AVMEDIA_TYPE_AUDIO:
2673                 if (st->codec->sample_rate != 0 &&
2674                     st->codec->channels != 0)
2675                     goto the_end;
2676                 break;
2677             case AVMEDIA_TYPE_VIDEO:
2678             case AVMEDIA_TYPE_SUBTITLE:
2679                 goto the_end;
2680             default:
2681                 break;
2682             }
2683         }
2684     }
2685  the_end:
2686     stream_component_close(is, old_index);
2687     stream_component_open(is, stream_index);
2688     if (codec_type == AVMEDIA_TYPE_VIDEO)
2689         is->que_attachments_req = 1;
2690 }
2691
2692
2693 static void toggle_full_screen(VideoState *is)
2694 {
2695 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2696     /* OS X needs to reallocate the SDL overlays */
2697     int i;
2698     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2699         is->pictq[i].reallocate = 1;
2700 #endif
2701     is_full_screen = !is_full_screen;
2702     video_open(is, 1);
2703 }
2704
2705 static void toggle_pause(VideoState *is)
2706 {
2707     stream_toggle_pause(is);
2708     is->step = 0;
2709 }
2710
2711 static void step_to_next_frame(VideoState *is)
2712 {
2713     /* if the stream is paused unpause it, then step */
2714     if (is->paused)
2715         stream_toggle_pause(is);
2716     is->step = 1;
2717 }
2718
2719 static void toggle_audio_display(VideoState *is)
2720 {
2721     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2722     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2723     fill_rectangle(screen,
2724                 is->xleft, is->ytop, is->width, is->height,
2725                 bgcolor);
2726     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2727 }
2728
2729 /* handle an event sent by the GUI */
2730 static void event_loop(VideoState *cur_stream)
2731 {
2732     SDL_Event event;
2733     double incr, pos, frac;
2734
2735     for (;;) {
2736         double x;
2737         SDL_WaitEvent(&event);
2738         switch (event.type) {
2739         case SDL_KEYDOWN:
2740             if (exit_on_keydown) {
2741                 do_exit(cur_stream);
2742                 break;
2743             }
2744             switch (event.key.keysym.sym) {
2745             case SDLK_ESCAPE:
2746             case SDLK_q:
2747                 do_exit(cur_stream);
2748                 break;
2749             case SDLK_f:
2750                 toggle_full_screen(cur_stream);
2751                 cur_stream->force_refresh = 1;
2752                 break;
2753             case SDLK_p:
2754             case SDLK_SPACE:
2755                 toggle_pause(cur_stream);
2756                 break;
2757             case SDLK_s: // S: Step to next frame
2758                 step_to_next_frame(cur_stream);
2759                 break;
2760             case SDLK_a:
2761                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2762                 break;
2763             case SDLK_v:
2764                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2765                 break;
2766             case SDLK_t:
2767                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2768                 break;
2769             case SDLK_w:
2770                 toggle_audio_display(cur_stream);
2771                 cur_stream->force_refresh = 1;
2772                 break;
2773             case SDLK_PAGEUP:
2774                 incr = 600.0;
2775                 goto do_seek;
2776             case SDLK_PAGEDOWN:
2777                 incr = -600.0;
2778                 goto do_seek;
2779             case SDLK_LEFT:
2780                 incr = -10.0;
2781                 goto do_seek;
2782             case SDLK_RIGHT:
2783                 incr = 10.0;
2784                 goto do_seek;
2785             case SDLK_UP:
2786                 incr = 60.0;
2787                 goto do_seek;
2788             case SDLK_DOWN:
2789                 incr = -60.0;
2790             do_seek:
2791                     if (seek_by_bytes) {
2792                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2793                             pos = cur_stream->video_current_pos;
2794                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2795                             pos = cur_stream->audio_pkt.pos;
2796                         } else
2797                             pos = avio_tell(cur_stream->ic->pb);
2798                         if (cur_stream->ic->bit_rate)
2799                             incr *= cur_stream->ic->bit_rate / 8.0;
2800                         else
2801                             incr *= 180000.0;
2802                         pos += incr;
2803                         stream_seek(cur_stream, pos, incr, 1);
2804                     } else {
2805                         pos = get_master_clock(cur_stream);
2806                         pos += incr;
2807                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2808                     }
2809                 break;
2810             default:
2811                 break;
2812             }
2813             break;
2814         case SDL_VIDEOEXPOSE:
2815             cur_stream->force_refresh = 1;
2816             break;
2817         case SDL_MOUSEBUTTONDOWN:
2818             if (exit_on_mousedown) {
2819                 do_exit(cur_stream);
2820                 break;
2821             }
2822         case SDL_MOUSEMOTION:
2823             if (event.type == SDL_MOUSEBUTTONDOWN) {
2824                 x = event.button.x;
2825             } else {
2826                 if (event.motion.state != SDL_PRESSED)
2827                     break;
2828                 x = event.motion.x;
2829             }
2830                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2831                     uint64_t size =  avio_size(cur_stream->ic->pb);
2832                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2833                 } else {
2834                     int64_t ts;
2835                     int ns, hh, mm, ss;
2836                     int tns, thh, tmm, tss;
2837                     tns  = cur_stream->ic->duration / 1000000LL;
2838                     thh  = tns / 3600;
2839                     tmm  = (tns % 3600) / 60;
2840                     tss  = (tns % 60);
2841                     frac = x / cur_stream->width;
2842                     ns   = frac * tns;
2843                     hh   = ns / 3600;
2844                     mm   = (ns % 3600) / 60;
2845                     ss   = (ns % 60);
2846                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2847                             hh, mm, ss, thh, tmm, tss);
2848                     ts = frac * cur_stream->ic->duration;
2849                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2850                         ts += cur_stream->ic->start_time;
2851                     stream_seek(cur_stream, ts, 0, 0);
2852                 }
2853             break;
2854         case SDL_VIDEORESIZE:
2855                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2856                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2857                 screen_width  = cur_stream->width  = event.resize.w;
2858                 screen_height = cur_stream->height = event.resize.h;
2859                 cur_stream->force_refresh = 1;
2860             break;
2861         case SDL_QUIT:
2862         case FF_QUIT_EVENT:
2863             do_exit(cur_stream);
2864             break;
2865         case FF_ALLOC_EVENT:
2866             alloc_picture(event.user.data1);
2867             break;
2868         case FF_REFRESH_EVENT:
2869             video_refresh(event.user.data1);
2870             cur_stream->refresh = 0;
2871             break;
2872         default:
2873             break;
2874         }
2875     }
2876 }
2877
2878 static int opt_frame_size(const char *opt, const char *arg)
2879 {
2880     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2881     return opt_default("video_size", arg);
2882 }
2883
2884 static int opt_width(const char *opt, const char *arg)
2885 {
2886     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2887     return 0;
2888 }
2889
2890 static int opt_height(const char *opt, const char *arg)
2891 {
2892     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2893     return 0;
2894 }
2895
2896 static int opt_format(const char *opt, const char *arg)
2897 {
2898     file_iformat = av_find_input_format(arg);
2899     if (!file_iformat) {
2900         fprintf(stderr, "Unknown input format: %s\n", arg);
2901         return AVERROR(EINVAL);
2902     }
2903     return 0;
2904 }
2905
2906 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2907 {
2908     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2909     return opt_default("pixel_format", arg);
2910 }
2911
2912 static int opt_sync(const char *opt, const char *arg)
2913 {
2914     if (!strcmp(arg, "audio"))
2915         av_sync_type = AV_SYNC_AUDIO_MASTER;
2916     else if (!strcmp(arg, "video"))
2917         av_sync_type = AV_SYNC_VIDEO_MASTER;
2918     else if (!strcmp(arg, "ext"))
2919         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2920     else {
2921         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2922         exit(1);
2923     }
2924     return 0;
2925 }
2926
2927 static int opt_seek(const char *opt, const char *arg)
2928 {
2929     start_time = parse_time_or_die(opt, arg, 1);
2930     return 0;
2931 }
2932
2933 static int opt_duration(const char *opt, const char *arg)
2934 {
2935     duration = parse_time_or_die(opt, arg, 1);
2936     return 0;
2937 }
2938
2939 static int opt_show_mode(const char *opt, const char *arg)
2940 {
2941     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2942                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2943                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2944                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2945     return 0;
2946 }
2947
2948 static void opt_input_file(void *optctx, const char *filename)
2949 {
2950     if (input_filename) {
2951         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2952                 filename, input_filename);
2953         exit_program(1);
2954     }
2955     if (!strcmp(filename, "-"))
2956         filename = "pipe:";
2957     input_filename = filename;
2958 }
2959
2960 static int opt_codec(void *o, const char *opt, const char *arg)
2961 {
2962     switch(opt[strlen(opt)-1]){
2963     case 'a' :    audio_codec_name = arg; break;
2964     case 's' : subtitle_codec_name = arg; break;
2965     case 'v' :    video_codec_name = arg; break;
2966     }
2967     return 0;
2968 }
2969
2970 static int dummy;
2971
2972 static const OptionDef options[] = {
2973 #include "cmdutils_common_opts.h"
2974     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2975     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2976     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2977     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2978     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2979     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2980     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2981     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2982     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2983     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2984     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2985     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2986     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2987     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2988     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2989     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2990     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2991     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2992     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2993     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2994     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
2995     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
2996     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
2997     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
2998     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
2999     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3000     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3001     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3002     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3003     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3004     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3005     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3006     { "infbuf", OPT_BOOL | OPT_EXPERT, { (void*)&infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3007     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3008 #if CONFIG_AVFILTER
3009     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3010 #endif
3011     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3012     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3013     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3014     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3015     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3016     { NULL, },
3017 };
3018
3019 static void show_usage(void)
3020 {
3021     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3022     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3023     av_log(NULL, AV_LOG_INFO, "\n");
3024 }
3025
3026 static int opt_help(const char *opt, const char *arg)
3027 {
3028     av_log_set_callback(log_callback_help);
3029     show_usage();
3030     show_help_options(options, "Main options:\n",
3031                       OPT_EXPERT, 0);
3032     show_help_options(options, "\nAdvanced options:\n",
3033                       OPT_EXPERT, OPT_EXPERT);
3034     printf("\n");
3035     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3036     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3037 #if !CONFIG_AVFILTER
3038     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3039 #endif
3040     printf("\nWhile playing:\n"
3041            "q, ESC              quit\n"
3042            "f                   toggle full screen\n"
3043            "p, SPC              pause\n"
3044            "a                   cycle audio channel\n"
3045            "v                   cycle video channel\n"
3046            "t                   cycle subtitle channel\n"
3047            "w                   show audio waves\n"
3048            "s                   activate frame-step mode\n"
3049            "left/right          seek backward/forward 10 seconds\n"
3050            "down/up             seek backward/forward 1 minute\n"
3051            "page down/page up   seek backward/forward 10 minutes\n"
3052            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3053            );
3054     return 0;
3055 }
3056
3057 static int lockmgr(void **mtx, enum AVLockOp op)
3058 {
3059    switch(op) {
3060       case AV_LOCK_CREATE:
3061           *mtx = SDL_CreateMutex();
3062           if(!*mtx)
3063               return 1;
3064           return 0;
3065       case AV_LOCK_OBTAIN:
3066           return !!SDL_LockMutex(*mtx);
3067       case AV_LOCK_RELEASE:
3068           return !!SDL_UnlockMutex(*mtx);
3069       case AV_LOCK_DESTROY:
3070           SDL_DestroyMutex(*mtx);
3071           return 0;
3072    }
3073    return 1;
3074 }
3075
3076 /* Called from the main */
3077 int main(int argc, char **argv)
3078 {
3079     int flags;
3080     VideoState *is;
3081     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3082
3083     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3084     parse_loglevel(argc, argv, options);
3085
3086     /* register all codecs, demux and protocols */
3087     avcodec_register_all();
3088 #if CONFIG_AVDEVICE
3089     avdevice_register_all();
3090 #endif
3091 #if CONFIG_AVFILTER
3092     avfilter_register_all();
3093 #endif
3094     av_register_all();
3095     avformat_network_init();
3096
3097     init_opts();
3098
3099     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3100     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3101
3102     show_banner(argc, argv, options);
3103
3104     parse_options(NULL, argc, argv, options, opt_input_file);
3105
3106     if (!input_filename) {
3107         show_usage();
3108         fprintf(stderr, "An input file must be specified\n");
3109         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3110         exit(1);
3111     }
3112
3113     if (display_disable) {
3114         video_disable = 1;
3115     }
3116     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3117     if (audio_disable)
3118         flags &= ~SDL_INIT_AUDIO;
3119     if (display_disable)
3120         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3121 #if !defined(__MINGW32__) && !defined(__APPLE__)
3122     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3123 #endif
3124     if (SDL_Init (flags)) {
3125         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3126         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3127         exit(1);
3128     }
3129
3130     if (!display_disable) {
3131 #if HAVE_SDL_VIDEO_SIZE
3132         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3133         fs_screen_width = vi->current_w;
3134         fs_screen_height = vi->current_h;
3135 #endif
3136     }
3137
3138     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3139     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3140     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3141
3142     if (av_lockmgr_register(lockmgr)) {
3143         fprintf(stderr, "Could not initialize lock manager!\n");
3144         do_exit(NULL);
3145     }
3146
3147     av_init_packet(&flush_pkt);
3148     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3149
3150     is = stream_open(input_filename, file_iformat);
3151     if (!is) {
3152         fprintf(stderr, "Failed to initialize VideoState!\n");
3153         do_exit(NULL);
3154     }
3155
3156     event_loop(is);
3157
3158     /* never returns */
3159
3160     return 0;
3161 }