]> git.sesse.net Git - ffmpeg/blob - ffplay.c
log: change color for filters from blue to bright green
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     AVRational sample_aspect_ratio;
108     int allocated;
109     int reallocate;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 typedef struct AudioParams {
122     int freq;
123     int channels;
124     int channel_layout;
125     enum AVSampleFormat fmt;
126 } AudioParams;
127
128 enum {
129     AV_SYNC_AUDIO_MASTER, /* default choice */
130     AV_SYNC_VIDEO_MASTER,
131     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
132 };
133
134 typedef struct VideoState {
135     SDL_Thread *read_tid;
136     SDL_Thread *video_tid;
137     SDL_Thread *refresh_tid;
138     AVInputFormat *iformat;
139     int no_background;
140     int abort_request;
141     int force_refresh;
142     int paused;
143     int last_paused;
144     int seek_req;
145     int seek_flags;
146     int64_t seek_pos;
147     int64_t seek_rel;
148     int read_pause_return;
149     AVFormatContext *ic;
150
151     int audio_stream;
152
153     int av_sync_type;
154     double external_clock; /* external clock base */
155     int64_t external_clock_time;
156
157     double audio_clock;
158     double audio_diff_cum; /* used for AV difference average computation */
159     double audio_diff_avg_coef;
160     double audio_diff_threshold;
161     int audio_diff_avg_count;
162     AVStream *audio_st;
163     PacketQueue audioq;
164     int audio_hw_buf_size;
165     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
166     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
167     uint8_t *audio_buf;
168     uint8_t *audio_buf1;
169     unsigned int audio_buf_size; /* in bytes */
170     int audio_buf_index; /* in bytes */
171     int audio_write_buf_size;
172     AVPacket audio_pkt_temp;
173     AVPacket audio_pkt;
174     struct AudioParams audio_src;
175     struct AudioParams audio_tgt;
176     struct SwrContext *swr_ctx;
177     double audio_current_pts;
178     double audio_current_pts_drift;
179     int frame_drops_early;
180     int frame_drops_late;
181     AVFrame *frame;
182
183     enum ShowMode {
184         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
185     } show_mode;
186     int16_t sample_array[SAMPLE_ARRAY_SIZE];
187     int sample_array_index;
188     int last_i_start;
189     RDFTContext *rdft;
190     int rdft_bits;
191     FFTSample *rdft_data;
192     int xpos;
193
194     SDL_Thread *subtitle_tid;
195     int subtitle_stream;
196     int subtitle_stream_changed;
197     AVStream *subtitle_st;
198     PacketQueue subtitleq;
199     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
200     int subpq_size, subpq_rindex, subpq_windex;
201     SDL_mutex *subpq_mutex;
202     SDL_cond *subpq_cond;
203
204     double frame_timer;
205     double frame_last_pts;
206     double frame_last_duration;
207     double frame_last_dropped_pts;
208     double frame_last_returned_time;
209     double frame_last_filter_delay;
210     int64_t frame_last_dropped_pos;
211     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
212     int video_stream;
213     AVStream *video_st;
214     PacketQueue videoq;
215     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
216     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
217     int64_t video_current_pos;                   ///< current displayed file pos
218     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
219     int pictq_size, pictq_rindex, pictq_windex;
220     SDL_mutex *pictq_mutex;
221     SDL_cond *pictq_cond;
222 #if !CONFIG_AVFILTER
223     struct SwsContext *img_convert_ctx;
224 #endif
225
226     char filename[1024];
227     int width, height, xleft, ytop;
228     int step;
229
230 #if CONFIG_AVFILTER
231     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
232     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
233     int use_dr1;
234     FrameBuffer *buffer_pool;
235 #endif
236
237     int refresh;
238     int last_video_stream, last_audio_stream, last_subtitle_stream;
239 } VideoState;
240
241 typedef struct AllocEventProps {
242     VideoState *is;
243     AVFrame *frame;
244 } AllocEventProps;
245
246 static int opt_help(const char *opt, const char *arg);
247
248 /* options specified by the user */
249 static AVInputFormat *file_iformat;
250 static const char *input_filename;
251 static const char *window_title;
252 static int fs_screen_width;
253 static int fs_screen_height;
254 static int screen_width  = 0;
255 static int screen_height = 0;
256 static int audio_disable;
257 static int video_disable;
258 static int wanted_stream[AVMEDIA_TYPE_NB] = {
259     [AVMEDIA_TYPE_AUDIO]    = -1,
260     [AVMEDIA_TYPE_VIDEO]    = -1,
261     [AVMEDIA_TYPE_SUBTITLE] = -1,
262 };
263 static int seek_by_bytes = -1;
264 static int display_disable;
265 static int show_status = 1;
266 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
267 static int64_t start_time = AV_NOPTS_VALUE;
268 static int64_t duration = AV_NOPTS_VALUE;
269 static int workaround_bugs = 1;
270 static int fast = 0;
271 static int genpts = 0;
272 static int lowres = 0;
273 static int idct = FF_IDCT_AUTO;
274 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
275 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
276 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
277 static int error_concealment = 3;
278 static int decoder_reorder_pts = -1;
279 static int autoexit;
280 static int exit_on_keydown;
281 static int exit_on_mousedown;
282 static int loop = 1;
283 static int framedrop = -1;
284 static enum ShowMode show_mode = SHOW_MODE_NONE;
285 static const char *audio_codec_name;
286 static const char *subtitle_codec_name;
287 static const char *video_codec_name;
288 static int rdftspeed = 20;
289 #if CONFIG_AVFILTER
290 static char *vfilters = NULL;
291 #endif
292
293 /* current context */
294 static int is_full_screen;
295 static int64_t audio_callback_time;
296
297 static AVPacket flush_pkt;
298
299 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
300 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
301 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
302
303 static SDL_Surface *screen;
304
305 void av_noreturn exit_program(int ret)
306 {
307     exit(ret);
308 }
309
310 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
311 {
312     AVPacketList *pkt1;
313
314     if (q->abort_request)
315        return -1;
316
317     pkt1 = av_malloc(sizeof(AVPacketList));
318     if (!pkt1)
319         return -1;
320     pkt1->pkt = *pkt;
321     pkt1->next = NULL;
322
323     if (!q->last_pkt)
324         q->first_pkt = pkt1;
325     else
326         q->last_pkt->next = pkt1;
327     q->last_pkt = pkt1;
328     q->nb_packets++;
329     q->size += pkt1->pkt.size + sizeof(*pkt1);
330     /* XXX: should duplicate packet data in DV case */
331     SDL_CondSignal(q->cond);
332     return 0;
333 }
334
335 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
336 {
337     int ret;
338
339     /* duplicate the packet */
340     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
341         return -1;
342
343     SDL_LockMutex(q->mutex);
344     ret = packet_queue_put_private(q, pkt);
345     SDL_UnlockMutex(q->mutex);
346
347     if (pkt != &flush_pkt && ret < 0)
348         av_free_packet(pkt);
349
350     return ret;
351 }
352
353 /* packet queue handling */
354 static void packet_queue_init(PacketQueue *q)
355 {
356     memset(q, 0, sizeof(PacketQueue));
357     q->mutex = SDL_CreateMutex();
358     q->cond = SDL_CreateCond();
359     q->abort_request = 1;
360 }
361
362 static void packet_queue_flush(PacketQueue *q)
363 {
364     AVPacketList *pkt, *pkt1;
365
366     SDL_LockMutex(q->mutex);
367     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
368         pkt1 = pkt->next;
369         av_free_packet(&pkt->pkt);
370         av_freep(&pkt);
371     }
372     q->last_pkt = NULL;
373     q->first_pkt = NULL;
374     q->nb_packets = 0;
375     q->size = 0;
376     SDL_UnlockMutex(q->mutex);
377 }
378
379 static void packet_queue_destroy(PacketQueue *q)
380 {
381     packet_queue_flush(q);
382     SDL_DestroyMutex(q->mutex);
383     SDL_DestroyCond(q->cond);
384 }
385
386 static void packet_queue_abort(PacketQueue *q)
387 {
388     SDL_LockMutex(q->mutex);
389
390     q->abort_request = 1;
391
392     SDL_CondSignal(q->cond);
393
394     SDL_UnlockMutex(q->mutex);
395 }
396
397 static void packet_queue_start(PacketQueue *q)
398 {
399     SDL_LockMutex(q->mutex);
400     q->abort_request = 0;
401     packet_queue_put_private(q, &flush_pkt);
402     SDL_UnlockMutex(q->mutex);
403 }
404
405 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
406 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
407 {
408     AVPacketList *pkt1;
409     int ret;
410
411     SDL_LockMutex(q->mutex);
412
413     for (;;) {
414         if (q->abort_request) {
415             ret = -1;
416             break;
417         }
418
419         pkt1 = q->first_pkt;
420         if (pkt1) {
421             q->first_pkt = pkt1->next;
422             if (!q->first_pkt)
423                 q->last_pkt = NULL;
424             q->nb_packets--;
425             q->size -= pkt1->pkt.size + sizeof(*pkt1);
426             *pkt = pkt1->pkt;
427             av_free(pkt1);
428             ret = 1;
429             break;
430         } else if (!block) {
431             ret = 0;
432             break;
433         } else {
434             SDL_CondWait(q->cond, q->mutex);
435         }
436     }
437     SDL_UnlockMutex(q->mutex);
438     return ret;
439 }
440
441 static inline void fill_rectangle(SDL_Surface *screen,
442                                   int x, int y, int w, int h, int color)
443 {
444     SDL_Rect rect;
445     rect.x = x;
446     rect.y = y;
447     rect.w = w;
448     rect.h = h;
449     SDL_FillRect(screen, &rect, color);
450 }
451
452 #define ALPHA_BLEND(a, oldp, newp, s)\
453 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
454
455 #define RGBA_IN(r, g, b, a, s)\
456 {\
457     unsigned int v = ((const uint32_t *)(s))[0];\
458     a = (v >> 24) & 0xff;\
459     r = (v >> 16) & 0xff;\
460     g = (v >> 8) & 0xff;\
461     b = v & 0xff;\
462 }
463
464 #define YUVA_IN(y, u, v, a, s, pal)\
465 {\
466     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
467     a = (val >> 24) & 0xff;\
468     y = (val >> 16) & 0xff;\
469     u = (val >> 8) & 0xff;\
470     v = val & 0xff;\
471 }
472
473 #define YUVA_OUT(d, y, u, v, a)\
474 {\
475     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
476 }
477
478
479 #define BPP 1
480
481 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
482 {
483     int wrap, wrap3, width2, skip2;
484     int y, u, v, a, u1, v1, a1, w, h;
485     uint8_t *lum, *cb, *cr;
486     const uint8_t *p;
487     const uint32_t *pal;
488     int dstx, dsty, dstw, dsth;
489
490     dstw = av_clip(rect->w, 0, imgw);
491     dsth = av_clip(rect->h, 0, imgh);
492     dstx = av_clip(rect->x, 0, imgw - dstw);
493     dsty = av_clip(rect->y, 0, imgh - dsth);
494     lum = dst->data[0] + dsty * dst->linesize[0];
495     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
496     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
497
498     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
499     skip2 = dstx >> 1;
500     wrap = dst->linesize[0];
501     wrap3 = rect->pict.linesize[0];
502     p = rect->pict.data[0];
503     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
504
505     if (dsty & 1) {
506         lum += dstx;
507         cb += skip2;
508         cr += skip2;
509
510         if (dstx & 1) {
511             YUVA_IN(y, u, v, a, p, pal);
512             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
514             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
515             cb++;
516             cr++;
517             lum++;
518             p += BPP;
519         }
520         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 = u;
523             v1 = v;
524             a1 = a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526
527             YUVA_IN(y, u, v, a, p + BPP, pal);
528             u1 += u;
529             v1 += v;
530             a1 += a;
531             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
532             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
533             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
534             cb++;
535             cr++;
536             p += 2 * BPP;
537             lum += 2;
538         }
539         if (w) {
540             YUVA_IN(y, u, v, a, p, pal);
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
543             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
544             p++;
545             lum++;
546         }
547         p += wrap3 - dstw * BPP;
548         lum += wrap - dstw - dstx;
549         cb += dst->linesize[1] - width2 - skip2;
550         cr += dst->linesize[2] - width2 - skip2;
551     }
552     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
553         lum += dstx;
554         cb += skip2;
555         cr += skip2;
556
557         if (dstx & 1) {
558             YUVA_IN(y, u, v, a, p, pal);
559             u1 = u;
560             v1 = v;
561             a1 = a;
562             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563             p += wrap3;
564             lum += wrap;
565             YUVA_IN(y, u, v, a, p, pal);
566             u1 += u;
567             v1 += v;
568             a1 += a;
569             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
571             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
572             cb++;
573             cr++;
574             p += -wrap3 + BPP;
575             lum += -wrap + 1;
576         }
577         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 = u;
580             v1 = v;
581             a1 = a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583
584             YUVA_IN(y, u, v, a, p + BPP, pal);
585             u1 += u;
586             v1 += v;
587             a1 += a;
588             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
589             p += wrap3;
590             lum += wrap;
591
592             YUVA_IN(y, u, v, a, p, pal);
593             u1 += u;
594             v1 += v;
595             a1 += a;
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
598             YUVA_IN(y, u, v, a, p + BPP, pal);
599             u1 += u;
600             v1 += v;
601             a1 += a;
602             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
603
604             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
605             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
606
607             cb++;
608             cr++;
609             p += -wrap3 + 2 * BPP;
610             lum += -wrap + 2;
611         }
612         if (w) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618             p += wrap3;
619             lum += wrap;
620             YUVA_IN(y, u, v, a, p, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
627             cb++;
628             cr++;
629             p += -wrap3 + BPP;
630             lum += -wrap + 1;
631         }
632         p += wrap3 + (wrap3 - dstw * BPP);
633         lum += wrap + (wrap - dstw - dstx);
634         cb += dst->linesize[1] - width2 - skip2;
635         cr += dst->linesize[2] - width2 - skip2;
636     }
637     /* handle odd height */
638     if (h) {
639         lum += dstx;
640         cb += skip2;
641         cr += skip2;
642
643         if (dstx & 1) {
644             YUVA_IN(y, u, v, a, p, pal);
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
647             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
648             cb++;
649             cr++;
650             lum++;
651             p += BPP;
652         }
653         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
654             YUVA_IN(y, u, v, a, p, pal);
655             u1 = u;
656             v1 = v;
657             a1 = a;
658             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
659
660             YUVA_IN(y, u, v, a, p + BPP, pal);
661             u1 += u;
662             v1 += v;
663             a1 += a;
664             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
665             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
666             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
667             cb++;
668             cr++;
669             p += 2 * BPP;
670             lum += 2;
671         }
672         if (w) {
673             YUVA_IN(y, u, v, a, p, pal);
674             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
675             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
676             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
677         }
678     }
679 }
680
681 static void free_subpicture(SubPicture *sp)
682 {
683     avsubtitle_free(&sp->sub);
684 }
685
686 static void video_image_display(VideoState *is)
687 {
688     VideoPicture *vp;
689     SubPicture *sp;
690     AVPicture pict;
691     float aspect_ratio;
692     int width, height, x, y;
693     SDL_Rect rect;
694     int i;
695
696     vp = &is->pictq[is->pictq_rindex];
697     if (vp->bmp) {
698         if (vp->sample_aspect_ratio.num == 0)
699             aspect_ratio = 0;
700         else
701             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
702
703         if (aspect_ratio <= 0.0)
704             aspect_ratio = 1.0;
705         aspect_ratio *= (float)vp->width / (float)vp->height;
706
707         if (is->subtitle_st) {
708             if (is->subpq_size > 0) {
709                 sp = &is->subpq[is->subpq_rindex];
710
711                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
712                     SDL_LockYUVOverlay (vp->bmp);
713
714                     pict.data[0] = vp->bmp->pixels[0];
715                     pict.data[1] = vp->bmp->pixels[2];
716                     pict.data[2] = vp->bmp->pixels[1];
717
718                     pict.linesize[0] = vp->bmp->pitches[0];
719                     pict.linesize[1] = vp->bmp->pitches[2];
720                     pict.linesize[2] = vp->bmp->pitches[1];
721
722                     for (i = 0; i < sp->sub.num_rects; i++)
723                         blend_subrect(&pict, sp->sub.rects[i],
724                                       vp->bmp->w, vp->bmp->h);
725
726                     SDL_UnlockYUVOverlay (vp->bmp);
727                 }
728             }
729         }
730
731
732         /* XXX: we suppose the screen has a 1.0 pixel ratio */
733         height = is->height;
734         width = ((int)rint(height * aspect_ratio)) & ~1;
735         if (width > is->width) {
736             width = is->width;
737             height = ((int)rint(width / aspect_ratio)) & ~1;
738         }
739         x = (is->width - width) / 2;
740         y = (is->height - height) / 2;
741         is->no_background = 0;
742         rect.x = is->xleft + x;
743         rect.y = is->ytop  + y;
744         rect.w = FFMAX(width,  1);
745         rect.h = FFMAX(height, 1);
746         SDL_DisplayYUVOverlay(vp->bmp, &rect);
747     }
748 }
749
750 static inline int compute_mod(int a, int b)
751 {
752     return a < 0 ? a%b + b : a%b;
753 }
754
755 static void video_audio_display(VideoState *s)
756 {
757     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
758     int ch, channels, h, h2, bgcolor, fgcolor;
759     int16_t time_diff;
760     int rdft_bits, nb_freq;
761
762     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
763         ;
764     nb_freq = 1 << (rdft_bits - 1);
765
766     /* compute display index : center on currently output samples */
767     channels = s->audio_tgt.channels;
768     nb_display_channels = channels;
769     if (!s->paused) {
770         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
771         n = 2 * channels;
772         delay = s->audio_write_buf_size;
773         delay /= n;
774
775         /* to be more precise, we take into account the time spent since
776            the last buffer computation */
777         if (audio_callback_time) {
778             time_diff = av_gettime() - audio_callback_time;
779             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
780         }
781
782         delay += 2 * data_used;
783         if (delay < data_used)
784             delay = data_used;
785
786         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
787         if (s->show_mode == SHOW_MODE_WAVES) {
788             h = INT_MIN;
789             for (i = 0; i < 1000; i += channels) {
790                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
791                 int a = s->sample_array[idx];
792                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
793                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
794                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
795                 int score = a - d;
796                 if (h < score && (b ^ c) < 0) {
797                     h = score;
798                     i_start = idx;
799                 }
800             }
801         }
802
803         s->last_i_start = i_start;
804     } else {
805         i_start = s->last_i_start;
806     }
807
808     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
809     if (s->show_mode == SHOW_MODE_WAVES) {
810         fill_rectangle(screen,
811                        s->xleft, s->ytop, s->width, s->height,
812                        bgcolor);
813
814         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
815
816         /* total height for one channel */
817         h = s->height / nb_display_channels;
818         /* graph height / 2 */
819         h2 = (h * 9) / 20;
820         for (ch = 0; ch < nb_display_channels; ch++) {
821             i = i_start + ch;
822             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
823             for (x = 0; x < s->width; x++) {
824                 y = (s->sample_array[i] * h2) >> 15;
825                 if (y < 0) {
826                     y = -y;
827                     ys = y1 - y;
828                 } else {
829                     ys = y1;
830                 }
831                 fill_rectangle(screen,
832                                s->xleft + x, ys, 1, y,
833                                fgcolor);
834                 i += channels;
835                 if (i >= SAMPLE_ARRAY_SIZE)
836                     i -= SAMPLE_ARRAY_SIZE;
837             }
838         }
839
840         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
841
842         for (ch = 1; ch < nb_display_channels; ch++) {
843             y = s->ytop + ch * h;
844             fill_rectangle(screen,
845                            s->xleft, y, s->width, 1,
846                            fgcolor);
847         }
848         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
849     } else {
850         nb_display_channels= FFMIN(nb_display_channels, 2);
851         if (rdft_bits != s->rdft_bits) {
852             av_rdft_end(s->rdft);
853             av_free(s->rdft_data);
854             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
855             s->rdft_bits = rdft_bits;
856             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
857         }
858         {
859             FFTSample *data[2];
860             for (ch = 0; ch < nb_display_channels; ch++) {
861                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
862                 i = i_start + ch;
863                 for (x = 0; x < 2 * nb_freq; x++) {
864                     double w = (x-nb_freq) * (1.0 / nb_freq);
865                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
866                     i += channels;
867                     if (i >= SAMPLE_ARRAY_SIZE)
868                         i -= SAMPLE_ARRAY_SIZE;
869                 }
870                 av_rdft_calc(s->rdft, data[ch]);
871             }
872             // least efficient way to do this, we should of course directly access it but its more than fast enough
873             for (y = 0; y < s->height; y++) {
874                 double w = 1 / sqrt(nb_freq);
875                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
876                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
877                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
878                 a = FFMIN(a, 255);
879                 b = FFMIN(b, 255);
880                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
881
882                 fill_rectangle(screen,
883                             s->xpos, s->height-y, 1, 1,
884                             fgcolor);
885             }
886         }
887         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
888         if (!s->paused)
889             s->xpos++;
890         if (s->xpos >= s->width)
891             s->xpos= s->xleft;
892     }
893 }
894
895 static void stream_close(VideoState *is)
896 {
897     VideoPicture *vp;
898     int i;
899     /* XXX: use a special url_shutdown call to abort parse cleanly */
900     is->abort_request = 1;
901     SDL_WaitThread(is->read_tid, NULL);
902     SDL_WaitThread(is->refresh_tid, NULL);
903     packet_queue_destroy(&is->videoq);
904     packet_queue_destroy(&is->audioq);
905     packet_queue_destroy(&is->subtitleq);
906
907     /* free all pictures */
908     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
909         vp = &is->pictq[i];
910 #if CONFIG_AVFILTER
911         if (vp->picref) {
912             avfilter_unref_buffer(vp->picref);
913             vp->picref = NULL;
914         }
915 #endif
916         if (vp->bmp) {
917             SDL_FreeYUVOverlay(vp->bmp);
918             vp->bmp = NULL;
919         }
920     }
921     SDL_DestroyMutex(is->pictq_mutex);
922     SDL_DestroyCond(is->pictq_cond);
923     SDL_DestroyMutex(is->subpq_mutex);
924     SDL_DestroyCond(is->subpq_cond);
925 #if !CONFIG_AVFILTER
926     if (is->img_convert_ctx)
927         sws_freeContext(is->img_convert_ctx);
928 #endif
929     av_free(is);
930 }
931
932 static void do_exit(VideoState *is)
933 {
934     if (is) {
935         stream_close(is);
936     }
937     av_lockmgr_register(NULL);
938     uninit_opts();
939 #if CONFIG_AVFILTER
940     avfilter_uninit();
941 #endif
942     avformat_network_deinit();
943     if (show_status)
944         printf("\n");
945     SDL_Quit();
946     av_log(NULL, AV_LOG_QUIET, "%s", "");
947     exit(0);
948 }
949
950 static void sigterm_handler(int sig)
951 {
952     exit(123);
953 }
954
955 static int video_open(VideoState *is, int force_set_video_mode)
956 {
957     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
958     int w,h;
959     VideoPicture *vp = &is->pictq[is->pictq_rindex];
960
961     if (is_full_screen) flags |= SDL_FULLSCREEN;
962     else                flags |= SDL_RESIZABLE;
963
964     if (is_full_screen && fs_screen_width) {
965         w = fs_screen_width;
966         h = fs_screen_height;
967     } else if (!is_full_screen && screen_width) {
968         w = screen_width;
969         h = screen_height;
970     } else if (vp->width) {
971         w = vp->width;
972         h = vp->height;
973     } else {
974         w = 640;
975         h = 480;
976     }
977     if (screen && is->width == screen->w && screen->w == w
978        && is->height== screen->h && screen->h == h && !force_set_video_mode)
979         return 0;
980     screen = SDL_SetVideoMode(w, h, 0, flags);
981     if (!screen) {
982         fprintf(stderr, "SDL: could not set video mode - exiting\n");
983         do_exit(is);
984     }
985     if (!window_title)
986         window_title = input_filename;
987     SDL_WM_SetCaption(window_title, window_title);
988
989     is->width  = screen->w;
990     is->height = screen->h;
991
992     return 0;
993 }
994
995 /* display the current picture, if any */
996 static void video_display(VideoState *is)
997 {
998     if (!screen)
999         video_open(is, 0);
1000     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1001         video_audio_display(is);
1002     else if (is->video_st)
1003         video_image_display(is);
1004 }
1005
1006 static int refresh_thread(void *opaque)
1007 {
1008     VideoState *is= opaque;
1009     while (!is->abort_request) {
1010         SDL_Event event;
1011         event.type = FF_REFRESH_EVENT;
1012         event.user.data1 = opaque;
1013         if (!is->refresh && (!is->paused || is->force_refresh)) {
1014             is->refresh = 1;
1015             SDL_PushEvent(&event);
1016         }
1017         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1018         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1019     }
1020     return 0;
1021 }
1022
1023 /* get the current audio clock value */
1024 static double get_audio_clock(VideoState *is)
1025 {
1026     if (is->paused) {
1027         return is->audio_current_pts;
1028     } else {
1029         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1030     }
1031 }
1032
1033 /* get the current video clock value */
1034 static double get_video_clock(VideoState *is)
1035 {
1036     if (is->paused) {
1037         return is->video_current_pts;
1038     } else {
1039         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1040     }
1041 }
1042
1043 /* get the current external clock value */
1044 static double get_external_clock(VideoState *is)
1045 {
1046     int64_t ti;
1047     ti = av_gettime();
1048     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1049 }
1050
1051 /* get the current master clock value */
1052 static double get_master_clock(VideoState *is)
1053 {
1054     double val;
1055
1056     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1057         if (is->video_st)
1058             val = get_video_clock(is);
1059         else
1060             val = get_audio_clock(is);
1061     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1062         if (is->audio_st)
1063             val = get_audio_clock(is);
1064         else
1065             val = get_video_clock(is);
1066     } else {
1067         val = get_external_clock(is);
1068     }
1069     return val;
1070 }
1071
1072 /* seek in the stream */
1073 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1074 {
1075     if (!is->seek_req) {
1076         is->seek_pos = pos;
1077         is->seek_rel = rel;
1078         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1079         if (seek_by_bytes)
1080             is->seek_flags |= AVSEEK_FLAG_BYTE;
1081         is->seek_req = 1;
1082     }
1083 }
1084
1085 /* pause or resume the video */
1086 static void stream_toggle_pause(VideoState *is)
1087 {
1088     if (is->paused) {
1089         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1090         if (is->read_pause_return != AVERROR(ENOSYS)) {
1091             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1092         }
1093         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1094     }
1095     is->paused = !is->paused;
1096 }
1097
1098 static double compute_target_delay(double delay, VideoState *is)
1099 {
1100     double sync_threshold, diff;
1101
1102     /* update delay to follow master synchronisation source */
1103     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1104          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1105         /* if video is slave, we try to correct big delays by
1106            duplicating or deleting a frame */
1107         diff = get_video_clock(is) - get_master_clock(is);
1108
1109         /* skip or repeat frame. We take into account the
1110            delay to compute the threshold. I still don't know
1111            if it is the best guess */
1112         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1113         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1114             if (diff <= -sync_threshold)
1115                 delay = 0;
1116             else if (diff >= sync_threshold)
1117                 delay = 2 * delay;
1118         }
1119     }
1120
1121     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1122             delay, -diff);
1123
1124     return delay;
1125 }
1126
1127 static void pictq_next_picture(VideoState *is) {
1128     /* update queue size and signal for next picture */
1129     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1130         is->pictq_rindex = 0;
1131
1132     SDL_LockMutex(is->pictq_mutex);
1133     is->pictq_size--;
1134     SDL_CondSignal(is->pictq_cond);
1135     SDL_UnlockMutex(is->pictq_mutex);
1136 }
1137
1138 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1139     double time = av_gettime() / 1000000.0;
1140     /* update current video pts */
1141     is->video_current_pts = pts;
1142     is->video_current_pts_drift = is->video_current_pts - time;
1143     is->video_current_pos = pos;
1144     is->frame_last_pts = pts;
1145 }
1146
1147 /* called to display each frame */
1148 static void video_refresh(void *opaque)
1149 {
1150     VideoState *is = opaque;
1151     VideoPicture *vp;
1152     double time;
1153
1154     SubPicture *sp, *sp2;
1155
1156     if (is->video_st) {
1157 retry:
1158         if (is->pictq_size == 0) {
1159             SDL_LockMutex(is->pictq_mutex);
1160             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1161                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1162                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1163             }
1164             SDL_UnlockMutex(is->pictq_mutex);
1165             // nothing to do, no picture to display in the que
1166         } else {
1167             double last_duration, duration, delay;
1168             /* dequeue the picture */
1169             vp = &is->pictq[is->pictq_rindex];
1170
1171             if (vp->skip) {
1172                 pictq_next_picture(is);
1173                 goto retry;
1174             }
1175
1176             if (is->paused)
1177                 goto display;
1178
1179             /* compute nominal last_duration */
1180             last_duration = vp->pts - is->frame_last_pts;
1181             if (last_duration > 0 && last_duration < 10.0) {
1182                 /* if duration of the last frame was sane, update last_duration in video state */
1183                 is->frame_last_duration = last_duration;
1184             }
1185             delay = compute_target_delay(is->frame_last_duration, is);
1186
1187             time= av_gettime()/1000000.0;
1188             if (time < is->frame_timer + delay)
1189                 return;
1190
1191             if (delay > 0)
1192                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1193
1194             SDL_LockMutex(is->pictq_mutex);
1195             update_video_pts(is, vp->pts, vp->pos);
1196             SDL_UnlockMutex(is->pictq_mutex);
1197
1198             if (is->pictq_size > 1) {
1199                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1200                 duration = nextvp->pts - vp->pts;
1201                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1202                     is->frame_drops_late++;
1203                     pictq_next_picture(is);
1204                     goto retry;
1205                 }
1206             }
1207
1208             if (is->subtitle_st) {
1209                 if (is->subtitle_stream_changed) {
1210                     SDL_LockMutex(is->subpq_mutex);
1211
1212                     while (is->subpq_size) {
1213                         free_subpicture(&is->subpq[is->subpq_rindex]);
1214
1215                         /* update queue size and signal for next picture */
1216                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1217                             is->subpq_rindex = 0;
1218
1219                         is->subpq_size--;
1220                     }
1221                     is->subtitle_stream_changed = 0;
1222
1223                     SDL_CondSignal(is->subpq_cond);
1224                     SDL_UnlockMutex(is->subpq_mutex);
1225                 } else {
1226                     if (is->subpq_size > 0) {
1227                         sp = &is->subpq[is->subpq_rindex];
1228
1229                         if (is->subpq_size > 1)
1230                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1231                         else
1232                             sp2 = NULL;
1233
1234                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1235                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1236                         {
1237                             free_subpicture(sp);
1238
1239                             /* update queue size and signal for next picture */
1240                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1241                                 is->subpq_rindex = 0;
1242
1243                             SDL_LockMutex(is->subpq_mutex);
1244                             is->subpq_size--;
1245                             SDL_CondSignal(is->subpq_cond);
1246                             SDL_UnlockMutex(is->subpq_mutex);
1247                         }
1248                     }
1249                 }
1250             }
1251
1252 display:
1253             /* display picture */
1254             if (!display_disable)
1255                 video_display(is);
1256
1257             if (!is->paused)
1258                 pictq_next_picture(is);
1259         }
1260     } else if (is->audio_st) {
1261         /* draw the next audio frame */
1262
1263         /* if only audio stream, then display the audio bars (better
1264            than nothing, just to test the implementation */
1265
1266         /* display picture */
1267         if (!display_disable)
1268             video_display(is);
1269     }
1270     is->force_refresh = 0;
1271     if (show_status) {
1272         static int64_t last_time;
1273         int64_t cur_time;
1274         int aqsize, vqsize, sqsize;
1275         double av_diff;
1276
1277         cur_time = av_gettime();
1278         if (!last_time || (cur_time - last_time) >= 30000) {
1279             aqsize = 0;
1280             vqsize = 0;
1281             sqsize = 0;
1282             if (is->audio_st)
1283                 aqsize = is->audioq.size;
1284             if (is->video_st)
1285                 vqsize = is->videoq.size;
1286             if (is->subtitle_st)
1287                 sqsize = is->subtitleq.size;
1288             av_diff = 0;
1289             if (is->audio_st && is->video_st)
1290                 av_diff = get_audio_clock(is) - get_video_clock(is);
1291             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1292                    get_master_clock(is),
1293                    av_diff,
1294                    is->frame_drops_early + is->frame_drops_late,
1295                    aqsize / 1024,
1296                    vqsize / 1024,
1297                    sqsize,
1298                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1299                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1300             fflush(stdout);
1301             last_time = cur_time;
1302         }
1303     }
1304 }
1305
1306 /* allocate a picture (needs to do that in main thread to avoid
1307    potential locking problems */
1308 static void alloc_picture(AllocEventProps *event_props)
1309 {
1310     VideoState *is = event_props->is;
1311     AVFrame *frame = event_props->frame;
1312     VideoPicture *vp;
1313
1314     vp = &is->pictq[is->pictq_windex];
1315
1316     if (vp->bmp)
1317         SDL_FreeYUVOverlay(vp->bmp);
1318
1319 #if CONFIG_AVFILTER
1320     if (vp->picref)
1321         avfilter_unref_buffer(vp->picref);
1322     vp->picref = NULL;
1323 #endif
1324
1325     vp->width   = frame->width;
1326     vp->height  = frame->height;
1327
1328     video_open(event_props->is, 0);
1329
1330     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1331                                    SDL_YV12_OVERLAY,
1332                                    screen);
1333     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1334         /* SDL allocates a buffer smaller than requested if the video
1335          * overlay hardware is unable to support the requested size. */
1336         fprintf(stderr, "Error: the video system does not support an image\n"
1337                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1338                         "to reduce the image size.\n", vp->width, vp->height );
1339         do_exit(is);
1340     }
1341
1342     SDL_LockMutex(is->pictq_mutex);
1343     vp->allocated = 1;
1344     SDL_CondSignal(is->pictq_cond);
1345     SDL_UnlockMutex(is->pictq_mutex);
1346 }
1347
1348 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1349 {
1350     VideoPicture *vp;
1351     double frame_delay, pts = pts1;
1352
1353     /* compute the exact PTS for the picture if it is omitted in the stream
1354      * pts1 is the dts of the pkt / pts of the frame */
1355     if (pts != 0) {
1356         /* update video clock with pts, if present */
1357         is->video_clock = pts;
1358     } else {
1359         pts = is->video_clock;
1360     }
1361     /* update video clock for next frame */
1362     frame_delay = av_q2d(is->video_st->codec->time_base);
1363     /* for MPEG2, the frame can be repeated, so we update the
1364        clock accordingly */
1365     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1366     is->video_clock += frame_delay;
1367
1368 #if defined(DEBUG_SYNC) && 0
1369     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1370            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1371 #endif
1372
1373     /* wait until we have space to put a new picture */
1374     SDL_LockMutex(is->pictq_mutex);
1375
1376     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1377            !is->videoq.abort_request) {
1378         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1379     }
1380     SDL_UnlockMutex(is->pictq_mutex);
1381
1382     if (is->videoq.abort_request)
1383         return -1;
1384
1385     vp = &is->pictq[is->pictq_windex];
1386
1387     /* alloc or resize hardware picture buffer */
1388     if (!vp->bmp || vp->reallocate ||
1389         vp->width  != src_frame->width ||
1390         vp->height != src_frame->height) {
1391         SDL_Event event;
1392         AllocEventProps event_props;
1393
1394         event_props.frame = src_frame;
1395         event_props.is = is;
1396
1397         vp->allocated  = 0;
1398         vp->reallocate = 0;
1399
1400         /* the allocation must be done in the main thread to avoid
1401            locking problems. We wait in this block for the event to complete,
1402            so we can pass a pointer to event_props to it. */
1403         event.type = FF_ALLOC_EVENT;
1404         event.user.data1 = &event_props;
1405         SDL_PushEvent(&event);
1406
1407         /* wait until the picture is allocated */
1408         SDL_LockMutex(is->pictq_mutex);
1409         while (!vp->allocated && !is->videoq.abort_request) {
1410             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1411         }
1412         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1413         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1414             while (!vp->allocated) {
1415                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1416             }
1417         }
1418         SDL_UnlockMutex(is->pictq_mutex);
1419
1420         if (is->videoq.abort_request)
1421             return -1;
1422     }
1423
1424     /* if the frame is not skipped, then display it */
1425     if (vp->bmp) {
1426         AVPicture pict = { { 0 } };
1427 #if CONFIG_AVFILTER
1428         if (vp->picref)
1429             avfilter_unref_buffer(vp->picref);
1430         vp->picref = src_frame->opaque;
1431 #endif
1432
1433         /* get a pointer on the bitmap */
1434         SDL_LockYUVOverlay (vp->bmp);
1435
1436         pict.data[0] = vp->bmp->pixels[0];
1437         pict.data[1] = vp->bmp->pixels[2];
1438         pict.data[2] = vp->bmp->pixels[1];
1439
1440         pict.linesize[0] = vp->bmp->pitches[0];
1441         pict.linesize[1] = vp->bmp->pitches[2];
1442         pict.linesize[2] = vp->bmp->pitches[1];
1443
1444 #if CONFIG_AVFILTER
1445         // FIXME use direct rendering
1446         av_picture_copy(&pict, (AVPicture *)src_frame,
1447                         src_frame->format, vp->width, vp->height);
1448         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1449 #else
1450         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1451         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1452             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1453             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1454         if (is->img_convert_ctx == NULL) {
1455             fprintf(stderr, "Cannot initialize the conversion context\n");
1456             exit(1);
1457         }
1458         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1459                   0, vp->height, pict.data, pict.linesize);
1460         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1461 #endif
1462         /* update the bitmap content */
1463         SDL_UnlockYUVOverlay(vp->bmp);
1464
1465         vp->pts = pts;
1466         vp->pos = pos;
1467         vp->skip = 0;
1468
1469         /* now we can update the picture count */
1470         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1471             is->pictq_windex = 0;
1472         SDL_LockMutex(is->pictq_mutex);
1473         is->pictq_size++;
1474         SDL_UnlockMutex(is->pictq_mutex);
1475     }
1476     return 0;
1477 }
1478
1479 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1480 {
1481     int got_picture, i;
1482
1483     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1484         return -1;
1485
1486     if (pkt->data == flush_pkt.data) {
1487         avcodec_flush_buffers(is->video_st->codec);
1488
1489         SDL_LockMutex(is->pictq_mutex);
1490         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1491         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1492             is->pictq[i].skip = 1;
1493         }
1494         while (is->pictq_size && !is->videoq.abort_request) {
1495             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1496         }
1497         is->video_current_pos = -1;
1498         is->frame_last_pts = AV_NOPTS_VALUE;
1499         is->frame_last_duration = 0;
1500         is->frame_timer = (double)av_gettime() / 1000000.0;
1501         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1502         SDL_UnlockMutex(is->pictq_mutex);
1503
1504         return 0;
1505     }
1506
1507     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1508         return 0;
1509
1510     if (got_picture) {
1511         int ret = 1;
1512
1513         if (decoder_reorder_pts == -1) {
1514             *pts = av_frame_get_best_effort_timestamp(frame);
1515         } else if (decoder_reorder_pts) {
1516             *pts = frame->pkt_pts;
1517         } else {
1518             *pts = frame->pkt_dts;
1519         }
1520
1521         if (*pts == AV_NOPTS_VALUE) {
1522             *pts = 0;
1523         }
1524
1525         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1526              (framedrop>0 || (framedrop && is->audio_st))) {
1527             SDL_LockMutex(is->pictq_mutex);
1528             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1529                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1530                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1531                 double ptsdiff = dpts - is->frame_last_pts;
1532                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1533                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1534                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1535                     is->frame_last_dropped_pos = pkt->pos;
1536                     is->frame_last_dropped_pts = dpts;
1537                     is->frame_drops_early++;
1538                     ret = 0;
1539                 }
1540             }
1541             SDL_UnlockMutex(is->pictq_mutex);
1542         }
1543
1544         return ret;
1545     }
1546     return 0;
1547 }
1548
1549 #if CONFIG_AVFILTER
1550 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1551                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1552 {
1553     int ret;
1554     AVFilterInOut *outputs = NULL, *inputs = NULL;
1555
1556     if (filtergraph) {
1557         outputs = avfilter_inout_alloc();
1558         inputs  = avfilter_inout_alloc();
1559         if (!outputs || !inputs) {
1560             ret = AVERROR(ENOMEM);
1561             goto fail;
1562         }
1563
1564         outputs->name       = av_strdup("in");
1565         outputs->filter_ctx = source_ctx;
1566         outputs->pad_idx    = 0;
1567         outputs->next       = NULL;
1568
1569         inputs->name        = av_strdup("out");
1570         inputs->filter_ctx  = sink_ctx;
1571         inputs->pad_idx     = 0;
1572         inputs->next        = NULL;
1573
1574         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1575             goto fail;
1576     } else {
1577         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1578             goto fail;
1579     }
1580
1581     return avfilter_graph_config(graph, NULL);
1582 fail:
1583     avfilter_inout_free(&outputs);
1584     avfilter_inout_free(&inputs);
1585     return ret;
1586 }
1587
1588 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1589 {
1590     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1591     char sws_flags_str[128];
1592     char buffersrc_args[256];
1593     int ret;
1594     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1595     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1596     AVCodecContext *codec = is->video_st->codec;
1597
1598     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1599     graph->scale_sws_opts = av_strdup(sws_flags_str);
1600
1601     snprintf(buffersrc_args, sizeof(buffersrc_args),
1602              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1603              codec->width, codec->height, codec->pix_fmt,
1604              is->video_st->time_base.num, is->video_st->time_base.den,
1605              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1606
1607     if ((ret = avfilter_graph_create_filter(&filt_src,
1608                                             avfilter_get_by_name("buffer"),
1609                                             "ffplay_buffer", buffersrc_args, NULL,
1610                                             graph)) < 0)
1611         return ret;
1612
1613     buffersink_params->pixel_fmts = pix_fmts;
1614     ret = avfilter_graph_create_filter(&filt_out,
1615                                        avfilter_get_by_name("buffersink"),
1616                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1617     av_freep(&buffersink_params);
1618     if (ret < 0)
1619         return ret;
1620
1621     if ((ret = avfilter_graph_create_filter(&filt_format,
1622                                             avfilter_get_by_name("format"),
1623                                             "format", "yuv420p", NULL, graph)) < 0)
1624         return ret;
1625     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1626         return ret;
1627
1628     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_format)) < 0)
1629         return ret;
1630
1631     is->in_video_filter  = filt_src;
1632     is->out_video_filter = filt_out;
1633
1634     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1635         is->use_dr1 = 1;
1636         codec->get_buffer     = codec_get_buffer;
1637         codec->release_buffer = codec_release_buffer;
1638         codec->opaque         = &is->buffer_pool;
1639     }
1640
1641     return ret;
1642 }
1643
1644 #endif  /* CONFIG_AVFILTER */
1645
1646 static int video_thread(void *arg)
1647 {
1648     VideoState *is = arg;
1649     AVFrame *frame = avcodec_alloc_frame();
1650     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1651     double pts;
1652     int ret;
1653
1654 #if CONFIG_AVFILTER
1655     AVFilterGraph *graph = avfilter_graph_alloc();
1656     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1657     int last_w = is->video_st->codec->width;
1658     int last_h = is->video_st->codec->height;
1659     enum PixelFormat last_format = is->video_st->codec->pix_fmt;
1660
1661     if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1662         SDL_Event event;
1663         event.type = FF_QUIT_EVENT;
1664         event.user.data1 = is;
1665         SDL_PushEvent(&event);
1666         goto the_end;
1667     }
1668     filt_in  = is->in_video_filter;
1669     filt_out = is->out_video_filter;
1670 #endif
1671
1672     for (;;) {
1673         AVPacket pkt;
1674 #if CONFIG_AVFILTER
1675         AVFilterBufferRef *picref;
1676         AVRational tb = filt_out->inputs[0]->time_base;
1677 #endif
1678         while (is->paused && !is->videoq.abort_request)
1679             SDL_Delay(10);
1680
1681         avcodec_get_frame_defaults(frame);
1682         ret = get_video_frame(is, frame, &pts_int, &pkt);
1683         if (ret < 0)
1684             goto the_end;
1685
1686         if (!ret) {
1687             av_free_packet(&pkt);
1688             continue;
1689         }
1690
1691 #if CONFIG_AVFILTER
1692         if (   last_w != is->video_st->codec->width
1693             || last_h != is->video_st->codec->height
1694             || last_format != is->video_st->codec->pix_fmt) {
1695             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1696                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1697             avfilter_graph_free(&graph);
1698             graph = avfilter_graph_alloc();
1699             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1700                 av_free_packet(&pkt);
1701                 goto the_end;
1702             }
1703             filt_in  = is->in_video_filter;
1704             filt_out = is->out_video_filter;
1705             last_w = is->video_st->codec->width;
1706             last_h = is->video_st->codec->height;
1707             last_format = is->video_st->codec->pix_fmt;
1708         }
1709
1710         frame->pts = pts_int;
1711         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1712         if (is->use_dr1 && frame->opaque) {
1713             FrameBuffer      *buf = frame->opaque;
1714             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1715                                         frame->data, frame->linesize,
1716                                         AV_PERM_READ | AV_PERM_PRESERVE,
1717                                         frame->width, frame->height,
1718                                         frame->format);
1719
1720             avfilter_copy_frame_props(fb, frame);
1721             fb->buf->priv           = buf;
1722             fb->buf->free           = filter_release_buffer;
1723
1724             buf->refcount++;
1725             av_buffersrc_buffer(filt_in, fb);
1726
1727         } else
1728             av_buffersrc_write_frame(filt_in, frame);
1729
1730         av_free_packet(&pkt);
1731
1732         while (ret >= 0) {
1733             is->frame_last_returned_time = av_gettime() / 1000000.0;
1734
1735             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1736             if (ret < 0) {
1737                 ret = 0;
1738                 break;
1739             }
1740
1741             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1742             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1743                 is->frame_last_filter_delay = 0;
1744
1745             avfilter_copy_buf_props(frame, picref);
1746
1747             pts_int = picref->pts;
1748             tb      = filt_out->inputs[0]->time_base;
1749             pos     = picref->pos;
1750             frame->opaque = picref;
1751
1752             if (av_cmp_q(tb, is->video_st->time_base)) {
1753                 av_unused int64_t pts1 = pts_int;
1754                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1755                 av_dlog(NULL, "video_thread(): "
1756                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1757                         tb.num, tb.den, pts1,
1758                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1759             }
1760             pts = pts_int * av_q2d(is->video_st->time_base);
1761             ret = queue_picture(is, frame, pts, pos);
1762         }
1763 #else
1764         pts = pts_int * av_q2d(is->video_st->time_base);
1765         ret = queue_picture(is, frame, pts, pkt.pos);
1766 #endif
1767
1768         if (ret < 0)
1769             goto the_end;
1770
1771         if (is->step)
1772             stream_toggle_pause(is);
1773     }
1774  the_end:
1775     avcodec_flush_buffers(is->video_st->codec);
1776 #if CONFIG_AVFILTER
1777     av_freep(&vfilters);
1778     avfilter_graph_free(&graph);
1779 #endif
1780     av_free(frame);
1781     return 0;
1782 }
1783
1784 static int subtitle_thread(void *arg)
1785 {
1786     VideoState *is = arg;
1787     SubPicture *sp;
1788     AVPacket pkt1, *pkt = &pkt1;
1789     int got_subtitle;
1790     double pts;
1791     int i, j;
1792     int r, g, b, y, u, v, a;
1793
1794     for (;;) {
1795         while (is->paused && !is->subtitleq.abort_request) {
1796             SDL_Delay(10);
1797         }
1798         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1799             break;
1800
1801         if (pkt->data == flush_pkt.data) {
1802             avcodec_flush_buffers(is->subtitle_st->codec);
1803             continue;
1804         }
1805         SDL_LockMutex(is->subpq_mutex);
1806         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1807                !is->subtitleq.abort_request) {
1808             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1809         }
1810         SDL_UnlockMutex(is->subpq_mutex);
1811
1812         if (is->subtitleq.abort_request)
1813             return 0;
1814
1815         sp = &is->subpq[is->subpq_windex];
1816
1817        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1818            this packet, if any */
1819         pts = 0;
1820         if (pkt->pts != AV_NOPTS_VALUE)
1821             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1822
1823         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1824                                  &got_subtitle, pkt);
1825
1826         if (got_subtitle && sp->sub.format == 0) {
1827             sp->pts = pts;
1828
1829             for (i = 0; i < sp->sub.num_rects; i++)
1830             {
1831                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1832                 {
1833                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1834                     y = RGB_TO_Y_CCIR(r, g, b);
1835                     u = RGB_TO_U_CCIR(r, g, b, 0);
1836                     v = RGB_TO_V_CCIR(r, g, b, 0);
1837                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1838                 }
1839             }
1840
1841             /* now we can update the picture count */
1842             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1843                 is->subpq_windex = 0;
1844             SDL_LockMutex(is->subpq_mutex);
1845             is->subpq_size++;
1846             SDL_UnlockMutex(is->subpq_mutex);
1847         }
1848         av_free_packet(pkt);
1849     }
1850     return 0;
1851 }
1852
1853 /* copy samples for viewing in editor window */
1854 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1855 {
1856     int size, len;
1857
1858     size = samples_size / sizeof(short);
1859     while (size > 0) {
1860         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1861         if (len > size)
1862             len = size;
1863         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1864         samples += len;
1865         is->sample_array_index += len;
1866         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1867             is->sample_array_index = 0;
1868         size -= len;
1869     }
1870 }
1871
1872 /* return the wanted number of samples to get better sync if sync_type is video
1873  * or external master clock */
1874 static int synchronize_audio(VideoState *is, int nb_samples)
1875 {
1876     int wanted_nb_samples = nb_samples;
1877
1878     /* if not master, then we try to remove or add samples to correct the clock */
1879     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1880          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1881         double diff, avg_diff;
1882         int min_nb_samples, max_nb_samples;
1883
1884         diff = get_audio_clock(is) - get_master_clock(is);
1885
1886         if (diff < AV_NOSYNC_THRESHOLD) {
1887             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1888             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1889                 /* not enough measures to have a correct estimate */
1890                 is->audio_diff_avg_count++;
1891             } else {
1892                 /* estimate the A-V difference */
1893                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1894
1895                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1896                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1897                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1898                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1899                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1900                 }
1901                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1902                         diff, avg_diff, wanted_nb_samples - nb_samples,
1903                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1904             }
1905         } else {
1906             /* too big difference : may be initial PTS errors, so
1907                reset A-V filter */
1908             is->audio_diff_avg_count = 0;
1909             is->audio_diff_cum       = 0;
1910         }
1911     }
1912
1913     return wanted_nb_samples;
1914 }
1915
1916 /* decode one audio frame and returns its uncompressed size */
1917 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1918 {
1919     AVPacket *pkt_temp = &is->audio_pkt_temp;
1920     AVPacket *pkt = &is->audio_pkt;
1921     AVCodecContext *dec = is->audio_st->codec;
1922     int len1, len2, data_size, resampled_data_size;
1923     int64_t dec_channel_layout;
1924     int got_frame;
1925     double pts;
1926     int new_packet = 0;
1927     int flush_complete = 0;
1928     int wanted_nb_samples;
1929
1930     for (;;) {
1931         /* NOTE: the audio packet can contain several frames */
1932         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1933             if (!is->frame) {
1934                 if (!(is->frame = avcodec_alloc_frame()))
1935                     return AVERROR(ENOMEM);
1936             } else
1937                 avcodec_get_frame_defaults(is->frame);
1938
1939             if (is->paused)
1940                 return -1;
1941
1942             if (flush_complete)
1943                 break;
1944             new_packet = 0;
1945             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1946             if (len1 < 0) {
1947                 /* if error, we skip the frame */
1948                 pkt_temp->size = 0;
1949                 break;
1950             }
1951
1952             pkt_temp->data += len1;
1953             pkt_temp->size -= len1;
1954
1955             if (!got_frame) {
1956                 /* stop sending empty packets if the decoder is finished */
1957                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1958                     flush_complete = 1;
1959                 continue;
1960             }
1961             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1962                                                    is->frame->nb_samples,
1963                                                    dec->sample_fmt, 1);
1964
1965             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
1966             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
1967
1968             if (dec->sample_fmt != is->audio_src.fmt ||
1969                 dec_channel_layout != is->audio_src.channel_layout ||
1970                 dec->sample_rate != is->audio_src.freq ||
1971                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
1972                 if (is->swr_ctx)
1973                     swr_free(&is->swr_ctx);
1974                 is->swr_ctx = swr_alloc_set_opts(NULL,
1975                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
1976                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
1977                                                  0, NULL);
1978                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
1979                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
1980                         dec->sample_rate,
1981                         av_get_sample_fmt_name(dec->sample_fmt),
1982                         dec->channels,
1983                         is->audio_tgt.freq,
1984                         av_get_sample_fmt_name(is->audio_tgt.fmt),
1985                         is->audio_tgt.channels);
1986                     break;
1987                 }
1988                 is->audio_src.channel_layout = dec_channel_layout;
1989                 is->audio_src.channels = dec->channels;
1990                 is->audio_src.freq = dec->sample_rate;
1991                 is->audio_src.fmt = dec->sample_fmt;
1992             }
1993
1994             resampled_data_size = data_size;
1995             if (is->swr_ctx) {
1996                 const uint8_t *in[] = { is->frame->data[0] };
1997                 uint8_t *out[] = {is->audio_buf2};
1998                 if (wanted_nb_samples != is->frame->nb_samples) {
1999                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
2000                                                 wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
2001                         fprintf(stderr, "swr_set_compensation() failed\n");
2002                         break;
2003                     }
2004                 }
2005                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt),
2006                                                 in, is->frame->nb_samples);
2007                 if (len2 < 0) {
2008                     fprintf(stderr, "audio_resample() failed\n");
2009                     break;
2010                 }
2011                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt)) {
2012                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2013                     swr_init(is->swr_ctx);
2014                 }
2015                 is->audio_buf = is->audio_buf2;
2016                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2017             } else {
2018                 is->audio_buf = is->frame->data[0];
2019             }
2020
2021             /* if no pts, then compute it */
2022             pts = is->audio_clock;
2023             *pts_ptr = pts;
2024             is->audio_clock += (double)data_size /
2025                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2026 #ifdef DEBUG
2027             {
2028                 static double last_clock;
2029                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2030                        is->audio_clock - last_clock,
2031                        is->audio_clock, pts);
2032                 last_clock = is->audio_clock;
2033             }
2034 #endif
2035             return resampled_data_size;
2036         }
2037
2038         /* free the current packet */
2039         if (pkt->data)
2040             av_free_packet(pkt);
2041         memset(pkt_temp, 0, sizeof(*pkt_temp));
2042
2043         if (is->paused || is->audioq.abort_request) {
2044             return -1;
2045         }
2046
2047         /* read next packet */
2048         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2049             return -1;
2050
2051         if (pkt->data == flush_pkt.data) {
2052             avcodec_flush_buffers(dec);
2053             flush_complete = 0;
2054         }
2055
2056         *pkt_temp = *pkt;
2057
2058         /* if update the audio clock with the pts */
2059         if (pkt->pts != AV_NOPTS_VALUE) {
2060             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2061         }
2062     }
2063 }
2064
2065 /* prepare a new audio buffer */
2066 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2067 {
2068     VideoState *is = opaque;
2069     int audio_size, len1;
2070     int bytes_per_sec;
2071     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2072     double pts;
2073
2074     audio_callback_time = av_gettime();
2075
2076     while (len > 0) {
2077         if (is->audio_buf_index >= is->audio_buf_size) {
2078            audio_size = audio_decode_frame(is, &pts);
2079            if (audio_size < 0) {
2080                 /* if error, just output silence */
2081                is->audio_buf      = is->silence_buf;
2082                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2083            } else {
2084                if (is->show_mode != SHOW_MODE_VIDEO)
2085                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2086                is->audio_buf_size = audio_size;
2087            }
2088            is->audio_buf_index = 0;
2089         }
2090         len1 = is->audio_buf_size - is->audio_buf_index;
2091         if (len1 > len)
2092             len1 = len;
2093         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2094         len -= len1;
2095         stream += len1;
2096         is->audio_buf_index += len1;
2097     }
2098     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2099     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2100     /* Let's assume the audio driver that is used by SDL has two periods. */
2101     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2102     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2103 }
2104
2105 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2106 {
2107     SDL_AudioSpec wanted_spec, spec;
2108     const char *env;
2109     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2110
2111     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2112     if (env) {
2113         wanted_nb_channels = SDL_atoi(env);
2114         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2115     }
2116     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2117         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2118         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2119     }
2120     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2121     wanted_spec.freq = wanted_sample_rate;
2122     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2123         fprintf(stderr, "Invalid sample rate or channel count!\n");
2124         return -1;
2125     }
2126     wanted_spec.format = AUDIO_S16SYS;
2127     wanted_spec.silence = 0;
2128     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2129     wanted_spec.callback = sdl_audio_callback;
2130     wanted_spec.userdata = opaque;
2131     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2132         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2133         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2134         if (!wanted_spec.channels) {
2135             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2136             return -1;
2137         }
2138         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2139     }
2140     if (spec.format != AUDIO_S16SYS) {
2141         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2142         return -1;
2143     }
2144     if (spec.channels != wanted_spec.channels) {
2145         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2146         if (!wanted_channel_layout) {
2147             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2148             return -1;
2149         }
2150     }
2151
2152     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2153     audio_hw_params->freq = spec.freq;
2154     audio_hw_params->channel_layout = wanted_channel_layout;
2155     audio_hw_params->channels =  spec.channels;
2156     return spec.size;
2157 }
2158
2159 /* open a given stream. Return 0 if OK */
2160 static int stream_component_open(VideoState *is, int stream_index)
2161 {
2162     AVFormatContext *ic = is->ic;
2163     AVCodecContext *avctx;
2164     AVCodec *codec;
2165     AVDictionary *opts;
2166     AVDictionaryEntry *t = NULL;
2167
2168     if (stream_index < 0 || stream_index >= ic->nb_streams)
2169         return -1;
2170     avctx = ic->streams[stream_index]->codec;
2171
2172     codec = avcodec_find_decoder(avctx->codec_id);
2173     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2174
2175     switch(avctx->codec_type){
2176         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2177         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2178         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2179     }
2180     if (!codec)
2181         return -1;
2182
2183     avctx->workaround_bugs   = workaround_bugs;
2184     avctx->lowres            = lowres;
2185     if(avctx->lowres > codec->max_lowres){
2186         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2187                 codec->max_lowres);
2188         avctx->lowres= codec->max_lowres;
2189     }
2190     avctx->idct_algo         = idct;
2191     avctx->skip_frame        = skip_frame;
2192     avctx->skip_idct         = skip_idct;
2193     avctx->skip_loop_filter  = skip_loop_filter;
2194     avctx->error_concealment = error_concealment;
2195
2196     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2197     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2198     if(codec->capabilities & CODEC_CAP_DR1)
2199         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2200
2201     if (!av_dict_get(opts, "threads", NULL, 0))
2202         av_dict_set(&opts, "threads", "auto", 0);
2203     if (!codec ||
2204         avcodec_open2(avctx, codec, &opts) < 0)
2205         return -1;
2206     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2207         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2208         return AVERROR_OPTION_NOT_FOUND;
2209     }
2210
2211     /* prepare audio output */
2212     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2213         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2214         if (audio_hw_buf_size < 0)
2215             return -1;
2216         is->audio_hw_buf_size = audio_hw_buf_size;
2217         is->audio_tgt = is->audio_src;
2218     }
2219
2220     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2221     switch (avctx->codec_type) {
2222     case AVMEDIA_TYPE_AUDIO:
2223         is->audio_stream = stream_index;
2224         is->audio_st = ic->streams[stream_index];
2225         is->audio_buf_size  = 0;
2226         is->audio_buf_index = 0;
2227
2228         /* init averaging filter */
2229         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2230         is->audio_diff_avg_count = 0;
2231         /* since we do not have a precise anough audio fifo fullness,
2232            we correct audio sync only if larger than this threshold */
2233         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2234
2235         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2236         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2237         packet_queue_start(&is->audioq);
2238         SDL_PauseAudio(0);
2239         break;
2240     case AVMEDIA_TYPE_VIDEO:
2241         is->video_stream = stream_index;
2242         is->video_st = ic->streams[stream_index];
2243
2244         packet_queue_start(&is->videoq);
2245         is->video_tid = SDL_CreateThread(video_thread, is);
2246         break;
2247     case AVMEDIA_TYPE_SUBTITLE:
2248         is->subtitle_stream = stream_index;
2249         is->subtitle_st = ic->streams[stream_index];
2250         packet_queue_start(&is->subtitleq);
2251
2252         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2253         break;
2254     default:
2255         break;
2256     }
2257     return 0;
2258 }
2259
2260 static void stream_component_close(VideoState *is, int stream_index)
2261 {
2262     AVFormatContext *ic = is->ic;
2263     AVCodecContext *avctx;
2264
2265     if (stream_index < 0 || stream_index >= ic->nb_streams)
2266         return;
2267     avctx = ic->streams[stream_index]->codec;
2268
2269     switch (avctx->codec_type) {
2270     case AVMEDIA_TYPE_AUDIO:
2271         packet_queue_abort(&is->audioq);
2272
2273         SDL_CloseAudio();
2274
2275         packet_queue_flush(&is->audioq);
2276         av_free_packet(&is->audio_pkt);
2277         if (is->swr_ctx)
2278             swr_free(&is->swr_ctx);
2279         av_freep(&is->audio_buf1);
2280         is->audio_buf = NULL;
2281         av_freep(&is->frame);
2282
2283         if (is->rdft) {
2284             av_rdft_end(is->rdft);
2285             av_freep(&is->rdft_data);
2286             is->rdft = NULL;
2287             is->rdft_bits = 0;
2288         }
2289         break;
2290     case AVMEDIA_TYPE_VIDEO:
2291         packet_queue_abort(&is->videoq);
2292
2293         /* note: we also signal this mutex to make sure we deblock the
2294            video thread in all cases */
2295         SDL_LockMutex(is->pictq_mutex);
2296         SDL_CondSignal(is->pictq_cond);
2297         SDL_UnlockMutex(is->pictq_mutex);
2298
2299         SDL_WaitThread(is->video_tid, NULL);
2300
2301         packet_queue_flush(&is->videoq);
2302         break;
2303     case AVMEDIA_TYPE_SUBTITLE:
2304         packet_queue_abort(&is->subtitleq);
2305
2306         /* note: we also signal this mutex to make sure we deblock the
2307            video thread in all cases */
2308         SDL_LockMutex(is->subpq_mutex);
2309         is->subtitle_stream_changed = 1;
2310
2311         SDL_CondSignal(is->subpq_cond);
2312         SDL_UnlockMutex(is->subpq_mutex);
2313
2314         SDL_WaitThread(is->subtitle_tid, NULL);
2315
2316         packet_queue_flush(&is->subtitleq);
2317         break;
2318     default:
2319         break;
2320     }
2321
2322     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2323     avcodec_close(avctx);
2324 #if CONFIG_AVFILTER
2325     free_buffer_pool(&is->buffer_pool);
2326 #endif
2327     switch (avctx->codec_type) {
2328     case AVMEDIA_TYPE_AUDIO:
2329         is->audio_st = NULL;
2330         is->audio_stream = -1;
2331         break;
2332     case AVMEDIA_TYPE_VIDEO:
2333         is->video_st = NULL;
2334         is->video_stream = -1;
2335         break;
2336     case AVMEDIA_TYPE_SUBTITLE:
2337         is->subtitle_st = NULL;
2338         is->subtitle_stream = -1;
2339         break;
2340     default:
2341         break;
2342     }
2343 }
2344
2345 static int decode_interrupt_cb(void *ctx)
2346 {
2347     VideoState *is = ctx;
2348     return is->abort_request;
2349 }
2350
2351 /* this thread gets the stream from the disk or the network */
2352 static int read_thread(void *arg)
2353 {
2354     VideoState *is = arg;
2355     AVFormatContext *ic = NULL;
2356     int err, i, ret;
2357     int st_index[AVMEDIA_TYPE_NB];
2358     AVPacket pkt1, *pkt = &pkt1;
2359     int eof = 0;
2360     int pkt_in_play_range = 0;
2361     AVDictionaryEntry *t;
2362     AVDictionary **opts;
2363     int orig_nb_streams;
2364
2365     memset(st_index, -1, sizeof(st_index));
2366     is->last_video_stream = is->video_stream = -1;
2367     is->last_audio_stream = is->audio_stream = -1;
2368     is->last_subtitle_stream = is->subtitle_stream = -1;
2369
2370     ic = avformat_alloc_context();
2371     ic->interrupt_callback.callback = decode_interrupt_cb;
2372     ic->interrupt_callback.opaque = is;
2373     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2374     if (err < 0) {
2375         print_error(is->filename, err);
2376         ret = -1;
2377         goto fail;
2378     }
2379     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2380         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2381         ret = AVERROR_OPTION_NOT_FOUND;
2382         goto fail;
2383     }
2384     is->ic = ic;
2385
2386     if (genpts)
2387         ic->flags |= AVFMT_FLAG_GENPTS;
2388
2389     opts = setup_find_stream_info_opts(ic, codec_opts);
2390     orig_nb_streams = ic->nb_streams;
2391
2392     err = avformat_find_stream_info(ic, opts);
2393     if (err < 0) {
2394         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2395         ret = -1;
2396         goto fail;
2397     }
2398     for (i = 0; i < orig_nb_streams; i++)
2399         av_dict_free(&opts[i]);
2400     av_freep(&opts);
2401
2402     if (ic->pb)
2403         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2404
2405     if (seek_by_bytes < 0)
2406         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2407
2408     /* if seeking requested, we execute it */
2409     if (start_time != AV_NOPTS_VALUE) {
2410         int64_t timestamp;
2411
2412         timestamp = start_time;
2413         /* add the stream start time */
2414         if (ic->start_time != AV_NOPTS_VALUE)
2415             timestamp += ic->start_time;
2416         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2417         if (ret < 0) {
2418             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2419                     is->filename, (double)timestamp / AV_TIME_BASE);
2420         }
2421     }
2422
2423     for (i = 0; i < ic->nb_streams; i++)
2424         ic->streams[i]->discard = AVDISCARD_ALL;
2425     if (!video_disable)
2426         st_index[AVMEDIA_TYPE_VIDEO] =
2427             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2428                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2429     if (!audio_disable)
2430         st_index[AVMEDIA_TYPE_AUDIO] =
2431             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2432                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2433                                 st_index[AVMEDIA_TYPE_VIDEO],
2434                                 NULL, 0);
2435     if (!video_disable)
2436         st_index[AVMEDIA_TYPE_SUBTITLE] =
2437             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2438                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2439                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2440                                  st_index[AVMEDIA_TYPE_AUDIO] :
2441                                  st_index[AVMEDIA_TYPE_VIDEO]),
2442                                 NULL, 0);
2443     if (show_status) {
2444         av_dump_format(ic, 0, is->filename, 0);
2445     }
2446
2447     is->show_mode = show_mode;
2448
2449     /* open the streams */
2450     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2451         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2452     }
2453
2454     ret = -1;
2455     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2456         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2457     }
2458     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2459     if (is->show_mode == SHOW_MODE_NONE)
2460         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2461
2462     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2463         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2464     }
2465
2466     if (is->video_stream < 0 && is->audio_stream < 0) {
2467         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2468         ret = -1;
2469         goto fail;
2470     }
2471
2472     for (;;) {
2473         if (is->abort_request)
2474             break;
2475         if (is->paused != is->last_paused) {
2476             is->last_paused = is->paused;
2477             if (is->paused)
2478                 is->read_pause_return = av_read_pause(ic);
2479             else
2480                 av_read_play(ic);
2481         }
2482 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2483         if (is->paused &&
2484                 (!strcmp(ic->iformat->name, "rtsp") ||
2485                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2486             /* wait 10 ms to avoid trying to get another packet */
2487             /* XXX: horrible */
2488             SDL_Delay(10);
2489             continue;
2490         }
2491 #endif
2492         if (is->seek_req) {
2493             int64_t seek_target = is->seek_pos;
2494             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2495             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2496 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2497 //      of the seek_pos/seek_rel variables
2498
2499             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2500             if (ret < 0) {
2501                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2502             } else {
2503                 if (is->audio_stream >= 0) {
2504                     packet_queue_flush(&is->audioq);
2505                     packet_queue_put(&is->audioq, &flush_pkt);
2506                 }
2507                 if (is->subtitle_stream >= 0) {
2508                     packet_queue_flush(&is->subtitleq);
2509                     packet_queue_put(&is->subtitleq, &flush_pkt);
2510                 }
2511                 if (is->video_stream >= 0) {
2512                     packet_queue_flush(&is->videoq);
2513                     packet_queue_put(&is->videoq, &flush_pkt);
2514                 }
2515             }
2516             is->seek_req = 0;
2517             eof = 0;
2518         }
2519
2520         /* if the queue are full, no need to read more */
2521         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2522             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2523                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2524                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
2525             /* wait 10 ms */
2526             SDL_Delay(10);
2527             continue;
2528         }
2529         if (eof) {
2530             if (is->video_stream >= 0) {
2531                 av_init_packet(pkt);
2532                 pkt->data = NULL;
2533                 pkt->size = 0;
2534                 pkt->stream_index = is->video_stream;
2535                 packet_queue_put(&is->videoq, pkt);
2536             }
2537             if (is->audio_stream >= 0 &&
2538                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2539                 av_init_packet(pkt);
2540                 pkt->data = NULL;
2541                 pkt->size = 0;
2542                 pkt->stream_index = is->audio_stream;
2543                 packet_queue_put(&is->audioq, pkt);
2544             }
2545             SDL_Delay(10);
2546             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2547                 if (loop != 1 && (!loop || --loop)) {
2548                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2549                 } else if (autoexit) {
2550                     ret = AVERROR_EOF;
2551                     goto fail;
2552                 }
2553             }
2554             eof=0;
2555             continue;
2556         }
2557         ret = av_read_frame(ic, pkt);
2558         if (ret < 0) {
2559             if (ret == AVERROR_EOF || url_feof(ic->pb))
2560                 eof = 1;
2561             if (ic->pb && ic->pb->error)
2562                 break;
2563             SDL_Delay(100); /* wait for user event */
2564             continue;
2565         }
2566         /* check if packet is in play range specified by user, then queue, otherwise discard */
2567         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2568                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2569                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2570                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2571                 <= ((double)duration / 1000000);
2572         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2573             packet_queue_put(&is->audioq, pkt);
2574         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2575             packet_queue_put(&is->videoq, pkt);
2576         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2577             packet_queue_put(&is->subtitleq, pkt);
2578         } else {
2579             av_free_packet(pkt);
2580         }
2581     }
2582     /* wait until the end */
2583     while (!is->abort_request) {
2584         SDL_Delay(100);
2585     }
2586
2587     ret = 0;
2588  fail:
2589     /* close each stream */
2590     if (is->audio_stream >= 0)
2591         stream_component_close(is, is->audio_stream);
2592     if (is->video_stream >= 0)
2593         stream_component_close(is, is->video_stream);
2594     if (is->subtitle_stream >= 0)
2595         stream_component_close(is, is->subtitle_stream);
2596     if (is->ic) {
2597         avformat_close_input(&is->ic);
2598     }
2599
2600     if (ret != 0) {
2601         SDL_Event event;
2602
2603         event.type = FF_QUIT_EVENT;
2604         event.user.data1 = is;
2605         SDL_PushEvent(&event);
2606     }
2607     return 0;
2608 }
2609
2610 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2611 {
2612     VideoState *is;
2613
2614     is = av_mallocz(sizeof(VideoState));
2615     if (!is)
2616         return NULL;
2617     av_strlcpy(is->filename, filename, sizeof(is->filename));
2618     is->iformat = iformat;
2619     is->ytop    = 0;
2620     is->xleft   = 0;
2621
2622     /* start video display */
2623     is->pictq_mutex = SDL_CreateMutex();
2624     is->pictq_cond  = SDL_CreateCond();
2625
2626     is->subpq_mutex = SDL_CreateMutex();
2627     is->subpq_cond  = SDL_CreateCond();
2628
2629     packet_queue_init(&is->videoq);
2630     packet_queue_init(&is->audioq);
2631     packet_queue_init(&is->subtitleq);
2632
2633     is->av_sync_type = av_sync_type;
2634     is->read_tid     = SDL_CreateThread(read_thread, is);
2635     if (!is->read_tid) {
2636         av_free(is);
2637         return NULL;
2638     }
2639     return is;
2640 }
2641
2642 static void stream_cycle_channel(VideoState *is, int codec_type)
2643 {
2644     AVFormatContext *ic = is->ic;
2645     int start_index, stream_index;
2646     int old_index;
2647     AVStream *st;
2648
2649     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2650         start_index = is->last_video_stream;
2651         old_index = is->video_stream;
2652     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2653         start_index = is->last_audio_stream;
2654         old_index = is->audio_stream;
2655     } else {
2656         start_index = is->last_subtitle_stream;
2657         old_index = is->subtitle_stream;
2658     }
2659     stream_index = start_index;
2660     for (;;) {
2661         if (++stream_index >= is->ic->nb_streams)
2662         {
2663             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2664             {
2665                 stream_index = -1;
2666                 is->last_subtitle_stream = -1;
2667                 goto the_end;
2668             }
2669             if (start_index == -1)
2670                 return;
2671             stream_index = 0;
2672         }
2673         if (stream_index == start_index)
2674             return;
2675         st = ic->streams[stream_index];
2676         if (st->codec->codec_type == codec_type) {
2677             /* check that parameters are OK */
2678             switch (codec_type) {
2679             case AVMEDIA_TYPE_AUDIO:
2680                 if (st->codec->sample_rate != 0 &&
2681                     st->codec->channels != 0)
2682                     goto the_end;
2683                 break;
2684             case AVMEDIA_TYPE_VIDEO:
2685             case AVMEDIA_TYPE_SUBTITLE:
2686                 goto the_end;
2687             default:
2688                 break;
2689             }
2690         }
2691     }
2692  the_end:
2693     stream_component_close(is, old_index);
2694     stream_component_open(is, stream_index);
2695 }
2696
2697
2698 static void toggle_full_screen(VideoState *is)
2699 {
2700 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2701     /* OS X needs to reallocate the SDL overlays */
2702     int i;
2703     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2704         is->pictq[i].reallocate = 1;
2705 #endif
2706     is_full_screen = !is_full_screen;
2707     video_open(is, 1);
2708 }
2709
2710 static void toggle_pause(VideoState *is)
2711 {
2712     stream_toggle_pause(is);
2713     is->step = 0;
2714 }
2715
2716 static void step_to_next_frame(VideoState *is)
2717 {
2718     /* if the stream is paused unpause it, then step */
2719     if (is->paused)
2720         stream_toggle_pause(is);
2721     is->step = 1;
2722 }
2723
2724 static void toggle_audio_display(VideoState *is)
2725 {
2726     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2727     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2728     fill_rectangle(screen,
2729                 is->xleft, is->ytop, is->width, is->height,
2730                 bgcolor);
2731     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2732 }
2733
2734 /* handle an event sent by the GUI */
2735 static void event_loop(VideoState *cur_stream)
2736 {
2737     SDL_Event event;
2738     double incr, pos, frac;
2739
2740     for (;;) {
2741         double x;
2742         SDL_WaitEvent(&event);
2743         switch (event.type) {
2744         case SDL_KEYDOWN:
2745             if (exit_on_keydown) {
2746                 do_exit(cur_stream);
2747                 break;
2748             }
2749             switch (event.key.keysym.sym) {
2750             case SDLK_ESCAPE:
2751             case SDLK_q:
2752                 do_exit(cur_stream);
2753                 break;
2754             case SDLK_f:
2755                 toggle_full_screen(cur_stream);
2756                 cur_stream->force_refresh = 1;
2757                 break;
2758             case SDLK_p:
2759             case SDLK_SPACE:
2760                 toggle_pause(cur_stream);
2761                 break;
2762             case SDLK_s: // S: Step to next frame
2763                 step_to_next_frame(cur_stream);
2764                 break;
2765             case SDLK_a:
2766                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2767                 break;
2768             case SDLK_v:
2769                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2770                 break;
2771             case SDLK_t:
2772                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2773                 break;
2774             case SDLK_w:
2775                 toggle_audio_display(cur_stream);
2776                 cur_stream->force_refresh = 1;
2777                 break;
2778             case SDLK_PAGEUP:
2779                 incr = 600.0;
2780                 goto do_seek;
2781             case SDLK_PAGEDOWN:
2782                 incr = -600.0;
2783                 goto do_seek;
2784             case SDLK_LEFT:
2785                 incr = -10.0;
2786                 goto do_seek;
2787             case SDLK_RIGHT:
2788                 incr = 10.0;
2789                 goto do_seek;
2790             case SDLK_UP:
2791                 incr = 60.0;
2792                 goto do_seek;
2793             case SDLK_DOWN:
2794                 incr = -60.0;
2795             do_seek:
2796                     if (seek_by_bytes) {
2797                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2798                             pos = cur_stream->video_current_pos;
2799                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2800                             pos = cur_stream->audio_pkt.pos;
2801                         } else
2802                             pos = avio_tell(cur_stream->ic->pb);
2803                         if (cur_stream->ic->bit_rate)
2804                             incr *= cur_stream->ic->bit_rate / 8.0;
2805                         else
2806                             incr *= 180000.0;
2807                         pos += incr;
2808                         stream_seek(cur_stream, pos, incr, 1);
2809                     } else {
2810                         pos = get_master_clock(cur_stream);
2811                         pos += incr;
2812                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2813                     }
2814                 break;
2815             default:
2816                 break;
2817             }
2818             break;
2819         case SDL_VIDEOEXPOSE:
2820             cur_stream->force_refresh = 1;
2821             break;
2822         case SDL_MOUSEBUTTONDOWN:
2823             if (exit_on_mousedown) {
2824                 do_exit(cur_stream);
2825                 break;
2826             }
2827         case SDL_MOUSEMOTION:
2828             if (event.type == SDL_MOUSEBUTTONDOWN) {
2829                 x = event.button.x;
2830             } else {
2831                 if (event.motion.state != SDL_PRESSED)
2832                     break;
2833                 x = event.motion.x;
2834             }
2835                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2836                     uint64_t size =  avio_size(cur_stream->ic->pb);
2837                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2838                 } else {
2839                     int64_t ts;
2840                     int ns, hh, mm, ss;
2841                     int tns, thh, tmm, tss;
2842                     tns  = cur_stream->ic->duration / 1000000LL;
2843                     thh  = tns / 3600;
2844                     tmm  = (tns % 3600) / 60;
2845                     tss  = (tns % 60);
2846                     frac = x / cur_stream->width;
2847                     ns   = frac * tns;
2848                     hh   = ns / 3600;
2849                     mm   = (ns % 3600) / 60;
2850                     ss   = (ns % 60);
2851                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2852                             hh, mm, ss, thh, tmm, tss);
2853                     ts = frac * cur_stream->ic->duration;
2854                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2855                         ts += cur_stream->ic->start_time;
2856                     stream_seek(cur_stream, ts, 0, 0);
2857                 }
2858             break;
2859         case SDL_VIDEORESIZE:
2860                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2861                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2862                 screen_width  = cur_stream->width  = event.resize.w;
2863                 screen_height = cur_stream->height = event.resize.h;
2864                 cur_stream->force_refresh = 1;
2865             break;
2866         case SDL_QUIT:
2867         case FF_QUIT_EVENT:
2868             do_exit(cur_stream);
2869             break;
2870         case FF_ALLOC_EVENT:
2871             alloc_picture(event.user.data1);
2872             break;
2873         case FF_REFRESH_EVENT:
2874             video_refresh(event.user.data1);
2875             cur_stream->refresh = 0;
2876             break;
2877         default:
2878             break;
2879         }
2880     }
2881 }
2882
2883 static int opt_frame_size(const char *opt, const char *arg)
2884 {
2885     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2886     return opt_default("video_size", arg);
2887 }
2888
2889 static int opt_width(const char *opt, const char *arg)
2890 {
2891     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2892     return 0;
2893 }
2894
2895 static int opt_height(const char *opt, const char *arg)
2896 {
2897     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2898     return 0;
2899 }
2900
2901 static int opt_format(const char *opt, const char *arg)
2902 {
2903     file_iformat = av_find_input_format(arg);
2904     if (!file_iformat) {
2905         fprintf(stderr, "Unknown input format: %s\n", arg);
2906         return AVERROR(EINVAL);
2907     }
2908     return 0;
2909 }
2910
2911 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2912 {
2913     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2914     return opt_default("pixel_format", arg);
2915 }
2916
2917 static int opt_sync(const char *opt, const char *arg)
2918 {
2919     if (!strcmp(arg, "audio"))
2920         av_sync_type = AV_SYNC_AUDIO_MASTER;
2921     else if (!strcmp(arg, "video"))
2922         av_sync_type = AV_SYNC_VIDEO_MASTER;
2923     else if (!strcmp(arg, "ext"))
2924         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2925     else {
2926         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2927         exit(1);
2928     }
2929     return 0;
2930 }
2931
2932 static int opt_seek(const char *opt, const char *arg)
2933 {
2934     start_time = parse_time_or_die(opt, arg, 1);
2935     return 0;
2936 }
2937
2938 static int opt_duration(const char *opt, const char *arg)
2939 {
2940     duration = parse_time_or_die(opt, arg, 1);
2941     return 0;
2942 }
2943
2944 static int opt_show_mode(const char *opt, const char *arg)
2945 {
2946     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2947                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2948                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2949                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2950     return 0;
2951 }
2952
2953 static void opt_input_file(void *optctx, const char *filename)
2954 {
2955     if (input_filename) {
2956         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2957                 filename, input_filename);
2958         exit_program(1);
2959     }
2960     if (!strcmp(filename, "-"))
2961         filename = "pipe:";
2962     input_filename = filename;
2963 }
2964
2965 static int opt_codec(void *o, const char *opt, const char *arg)
2966 {
2967     switch(opt[strlen(opt)-1]){
2968     case 'a' :    audio_codec_name = arg; break;
2969     case 's' : subtitle_codec_name = arg; break;
2970     case 'v' :    video_codec_name = arg; break;
2971     }
2972     return 0;
2973 }
2974
2975 static int dummy;
2976
2977 static const OptionDef options[] = {
2978 #include "cmdutils_common_opts.h"
2979     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2980     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2981     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2982     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2983     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2984     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2985     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2986     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2987     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2988     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2989     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2990     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2991     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2992     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2993     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2994     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2995     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2996     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2997     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2998     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2999     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3000     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3001     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3002     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3003     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3004     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3005     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3006     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3007     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3008     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3009     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3010     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3011     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3012 #if CONFIG_AVFILTER
3013     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3014 #endif
3015     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3016     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3017     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3018     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3019     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3020     { NULL, },
3021 };
3022
3023 static void show_usage(void)
3024 {
3025     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3026     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3027     av_log(NULL, AV_LOG_INFO, "\n");
3028 }
3029
3030 static int opt_help(const char *opt, const char *arg)
3031 {
3032     av_log_set_callback(log_callback_help);
3033     show_usage();
3034     show_help_options(options, "Main options:\n",
3035                       OPT_EXPERT, 0);
3036     show_help_options(options, "\nAdvanced options:\n",
3037                       OPT_EXPERT, OPT_EXPERT);
3038     printf("\n");
3039     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3040     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3041 #if !CONFIG_AVFILTER
3042     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3043 #endif
3044     printf("\nWhile playing:\n"
3045            "q, ESC              quit\n"
3046            "f                   toggle full screen\n"
3047            "p, SPC              pause\n"
3048            "a                   cycle audio channel\n"
3049            "v                   cycle video channel\n"
3050            "t                   cycle subtitle channel\n"
3051            "w                   show audio waves\n"
3052            "s                   activate frame-step mode\n"
3053            "left/right          seek backward/forward 10 seconds\n"
3054            "down/up             seek backward/forward 1 minute\n"
3055            "page down/page up   seek backward/forward 10 minutes\n"
3056            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3057            );
3058     return 0;
3059 }
3060
3061 static int lockmgr(void **mtx, enum AVLockOp op)
3062 {
3063    switch(op) {
3064       case AV_LOCK_CREATE:
3065           *mtx = SDL_CreateMutex();
3066           if(!*mtx)
3067               return 1;
3068           return 0;
3069       case AV_LOCK_OBTAIN:
3070           return !!SDL_LockMutex(*mtx);
3071       case AV_LOCK_RELEASE:
3072           return !!SDL_UnlockMutex(*mtx);
3073       case AV_LOCK_DESTROY:
3074           SDL_DestroyMutex(*mtx);
3075           return 0;
3076    }
3077    return 1;
3078 }
3079
3080 /* Called from the main */
3081 int main(int argc, char **argv)
3082 {
3083     int flags;
3084     VideoState *is;
3085     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3086
3087     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3088     parse_loglevel(argc, argv, options);
3089
3090     /* register all codecs, demux and protocols */
3091     avcodec_register_all();
3092 #if CONFIG_AVDEVICE
3093     avdevice_register_all();
3094 #endif
3095 #if CONFIG_AVFILTER
3096     avfilter_register_all();
3097 #endif
3098     av_register_all();
3099     avformat_network_init();
3100
3101     init_opts();
3102
3103     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3104     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3105
3106     show_banner(argc, argv, options);
3107
3108     parse_options(NULL, argc, argv, options, opt_input_file);
3109
3110     if (!input_filename) {
3111         show_usage();
3112         fprintf(stderr, "An input file must be specified\n");
3113         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3114         exit(1);
3115     }
3116
3117     if (display_disable) {
3118         video_disable = 1;
3119     }
3120     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3121     if (audio_disable)
3122         flags &= ~SDL_INIT_AUDIO;
3123     if (display_disable)
3124         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3125 #if !defined(__MINGW32__) && !defined(__APPLE__)
3126     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3127 #endif
3128     if (SDL_Init (flags)) {
3129         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3130         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3131         exit(1);
3132     }
3133
3134     if (!display_disable) {
3135 #if HAVE_SDL_VIDEO_SIZE
3136         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3137         fs_screen_width = vi->current_w;
3138         fs_screen_height = vi->current_h;
3139 #endif
3140     }
3141
3142     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3143     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3144     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3145
3146     if (av_lockmgr_register(lockmgr)) {
3147         fprintf(stderr, "Could not initialize lock manager!\n");
3148         do_exit(NULL);
3149     }
3150
3151     av_init_packet(&flush_pkt);
3152     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3153
3154     is = stream_open(input_filename, file_iformat);
3155     if (!is) {
3156         fprintf(stderr, "Failed to initialize VideoState!\n");
3157         do_exit(NULL);
3158     }
3159
3160     event_loop(is);
3161
3162     /* never returns */
3163
3164     return 0;
3165 }