]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     AVRational sample_aspect_ratio;
108     int allocated;
109     int reallocate;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 typedef struct AudioParams {
122     int freq;
123     int channels;
124     int channel_layout;
125     enum AVSampleFormat fmt;
126 } AudioParams;
127
128 enum {
129     AV_SYNC_AUDIO_MASTER, /* default choice */
130     AV_SYNC_VIDEO_MASTER,
131     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
132 };
133
134 typedef struct VideoState {
135     SDL_Thread *read_tid;
136     SDL_Thread *video_tid;
137     SDL_Thread *refresh_tid;
138     AVInputFormat *iformat;
139     int no_background;
140     int abort_request;
141     int force_refresh;
142     int paused;
143     int last_paused;
144     int seek_req;
145     int seek_flags;
146     int64_t seek_pos;
147     int64_t seek_rel;
148     int read_pause_return;
149     AVFormatContext *ic;
150
151     int audio_stream;
152
153     int av_sync_type;
154     double external_clock; /* external clock base */
155     int64_t external_clock_time;
156
157     double audio_clock;
158     double audio_diff_cum; /* used for AV difference average computation */
159     double audio_diff_avg_coef;
160     double audio_diff_threshold;
161     int audio_diff_avg_count;
162     AVStream *audio_st;
163     PacketQueue audioq;
164     int audio_hw_buf_size;
165     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
166     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
167     uint8_t *audio_buf;
168     uint8_t *audio_buf1;
169     unsigned int audio_buf_size; /* in bytes */
170     int audio_buf_index; /* in bytes */
171     int audio_write_buf_size;
172     AVPacket audio_pkt_temp;
173     AVPacket audio_pkt;
174     struct AudioParams audio_src;
175     struct AudioParams audio_tgt;
176     struct SwrContext *swr_ctx;
177     double audio_current_pts;
178     double audio_current_pts_drift;
179     int frame_drops_early;
180     int frame_drops_late;
181     AVFrame *frame;
182
183     enum ShowMode {
184         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
185     } show_mode;
186     int16_t sample_array[SAMPLE_ARRAY_SIZE];
187     int sample_array_index;
188     int last_i_start;
189     RDFTContext *rdft;
190     int rdft_bits;
191     FFTSample *rdft_data;
192     int xpos;
193
194     SDL_Thread *subtitle_tid;
195     int subtitle_stream;
196     int subtitle_stream_changed;
197     AVStream *subtitle_st;
198     PacketQueue subtitleq;
199     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
200     int subpq_size, subpq_rindex, subpq_windex;
201     SDL_mutex *subpq_mutex;
202     SDL_cond *subpq_cond;
203
204     double frame_timer;
205     double frame_last_pts;
206     double frame_last_duration;
207     double frame_last_dropped_pts;
208     double frame_last_returned_time;
209     double frame_last_filter_delay;
210     int64_t frame_last_dropped_pos;
211     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
212     int video_stream;
213     AVStream *video_st;
214     PacketQueue videoq;
215     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
216     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
217     int64_t video_current_pos;                   ///< current displayed file pos
218     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
219     int pictq_size, pictq_rindex, pictq_windex;
220     SDL_mutex *pictq_mutex;
221     SDL_cond *pictq_cond;
222 #if !CONFIG_AVFILTER
223     struct SwsContext *img_convert_ctx;
224 #endif
225
226     char filename[1024];
227     int width, height, xleft, ytop;
228     int step;
229
230 #if CONFIG_AVFILTER
231     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
232     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
233     int use_dr1;
234     FrameBuffer *buffer_pool;
235 #endif
236
237     int refresh;
238     int last_video_stream, last_audio_stream, last_subtitle_stream;
239 } VideoState;
240
241 typedef struct AllocEventProps {
242     VideoState *is;
243     AVFrame *frame;
244 } AllocEventProps;
245
246 static int opt_help(const char *opt, const char *arg);
247
248 /* options specified by the user */
249 static AVInputFormat *file_iformat;
250 static const char *input_filename;
251 static const char *window_title;
252 static int fs_screen_width;
253 static int fs_screen_height;
254 static int screen_width  = 0;
255 static int screen_height = 0;
256 static int audio_disable;
257 static int video_disable;
258 static int wanted_stream[AVMEDIA_TYPE_NB] = {
259     [AVMEDIA_TYPE_AUDIO]    = -1,
260     [AVMEDIA_TYPE_VIDEO]    = -1,
261     [AVMEDIA_TYPE_SUBTITLE] = -1,
262 };
263 static int seek_by_bytes = -1;
264 static int display_disable;
265 static int show_status = 1;
266 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
267 static int64_t start_time = AV_NOPTS_VALUE;
268 static int64_t duration = AV_NOPTS_VALUE;
269 static int workaround_bugs = 1;
270 static int fast = 0;
271 static int genpts = 0;
272 static int lowres = 0;
273 static int idct = FF_IDCT_AUTO;
274 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
275 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
276 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
277 static int error_concealment = 3;
278 static int decoder_reorder_pts = -1;
279 static int autoexit;
280 static int exit_on_keydown;
281 static int exit_on_mousedown;
282 static int loop = 1;
283 static int framedrop = -1;
284 static int infinite_buffer = 0;
285 static enum ShowMode show_mode = SHOW_MODE_NONE;
286 static const char *audio_codec_name;
287 static const char *subtitle_codec_name;
288 static const char *video_codec_name;
289 static int rdftspeed = 20;
290 #if CONFIG_AVFILTER
291 static char *vfilters = NULL;
292 #endif
293
294 /* current context */
295 static int is_full_screen;
296 static int64_t audio_callback_time;
297
298 static AVPacket flush_pkt;
299
300 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
301 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
302 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
303
304 static SDL_Surface *screen;
305
306 void av_noreturn exit_program(int ret)
307 {
308     exit(ret);
309 }
310
311 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
312 {
313     AVPacketList *pkt1;
314
315     if (q->abort_request)
316        return -1;
317
318     pkt1 = av_malloc(sizeof(AVPacketList));
319     if (!pkt1)
320         return -1;
321     pkt1->pkt = *pkt;
322     pkt1->next = NULL;
323
324     if (!q->last_pkt)
325         q->first_pkt = pkt1;
326     else
327         q->last_pkt->next = pkt1;
328     q->last_pkt = pkt1;
329     q->nb_packets++;
330     q->size += pkt1->pkt.size + sizeof(*pkt1);
331     /* XXX: should duplicate packet data in DV case */
332     SDL_CondSignal(q->cond);
333     return 0;
334 }
335
336 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
337 {
338     int ret;
339
340     /* duplicate the packet */
341     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
342         return -1;
343
344     SDL_LockMutex(q->mutex);
345     ret = packet_queue_put_private(q, pkt);
346     SDL_UnlockMutex(q->mutex);
347
348     if (pkt != &flush_pkt && ret < 0)
349         av_free_packet(pkt);
350
351     return ret;
352 }
353
354 /* packet queue handling */
355 static void packet_queue_init(PacketQueue *q)
356 {
357     memset(q, 0, sizeof(PacketQueue));
358     q->mutex = SDL_CreateMutex();
359     q->cond = SDL_CreateCond();
360     q->abort_request = 1;
361 }
362
363 static void packet_queue_flush(PacketQueue *q)
364 {
365     AVPacketList *pkt, *pkt1;
366
367     SDL_LockMutex(q->mutex);
368     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
369         pkt1 = pkt->next;
370         av_free_packet(&pkt->pkt);
371         av_freep(&pkt);
372     }
373     q->last_pkt = NULL;
374     q->first_pkt = NULL;
375     q->nb_packets = 0;
376     q->size = 0;
377     SDL_UnlockMutex(q->mutex);
378 }
379
380 static void packet_queue_destroy(PacketQueue *q)
381 {
382     packet_queue_flush(q);
383     SDL_DestroyMutex(q->mutex);
384     SDL_DestroyCond(q->cond);
385 }
386
387 static void packet_queue_abort(PacketQueue *q)
388 {
389     SDL_LockMutex(q->mutex);
390
391     q->abort_request = 1;
392
393     SDL_CondSignal(q->cond);
394
395     SDL_UnlockMutex(q->mutex);
396 }
397
398 static void packet_queue_start(PacketQueue *q)
399 {
400     SDL_LockMutex(q->mutex);
401     q->abort_request = 0;
402     packet_queue_put_private(q, &flush_pkt);
403     SDL_UnlockMutex(q->mutex);
404 }
405
406 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
407 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
408 {
409     AVPacketList *pkt1;
410     int ret;
411
412     SDL_LockMutex(q->mutex);
413
414     for (;;) {
415         if (q->abort_request) {
416             ret = -1;
417             break;
418         }
419
420         pkt1 = q->first_pkt;
421         if (pkt1) {
422             q->first_pkt = pkt1->next;
423             if (!q->first_pkt)
424                 q->last_pkt = NULL;
425             q->nb_packets--;
426             q->size -= pkt1->pkt.size + sizeof(*pkt1);
427             *pkt = pkt1->pkt;
428             av_free(pkt1);
429             ret = 1;
430             break;
431         } else if (!block) {
432             ret = 0;
433             break;
434         } else {
435             SDL_CondWait(q->cond, q->mutex);
436         }
437     }
438     SDL_UnlockMutex(q->mutex);
439     return ret;
440 }
441
442 static inline void fill_rectangle(SDL_Surface *screen,
443                                   int x, int y, int w, int h, int color)
444 {
445     SDL_Rect rect;
446     rect.x = x;
447     rect.y = y;
448     rect.w = w;
449     rect.h = h;
450     SDL_FillRect(screen, &rect, color);
451 }
452
453 #define ALPHA_BLEND(a, oldp, newp, s)\
454 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
455
456 #define RGBA_IN(r, g, b, a, s)\
457 {\
458     unsigned int v = ((const uint32_t *)(s))[0];\
459     a = (v >> 24) & 0xff;\
460     r = (v >> 16) & 0xff;\
461     g = (v >> 8) & 0xff;\
462     b = v & 0xff;\
463 }
464
465 #define YUVA_IN(y, u, v, a, s, pal)\
466 {\
467     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
468     a = (val >> 24) & 0xff;\
469     y = (val >> 16) & 0xff;\
470     u = (val >> 8) & 0xff;\
471     v = val & 0xff;\
472 }
473
474 #define YUVA_OUT(d, y, u, v, a)\
475 {\
476     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
477 }
478
479
480 #define BPP 1
481
482 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
483 {
484     int wrap, wrap3, width2, skip2;
485     int y, u, v, a, u1, v1, a1, w, h;
486     uint8_t *lum, *cb, *cr;
487     const uint8_t *p;
488     const uint32_t *pal;
489     int dstx, dsty, dstw, dsth;
490
491     dstw = av_clip(rect->w, 0, imgw);
492     dsth = av_clip(rect->h, 0, imgh);
493     dstx = av_clip(rect->x, 0, imgw - dstw);
494     dsty = av_clip(rect->y, 0, imgh - dsth);
495     lum = dst->data[0] + dsty * dst->linesize[0];
496     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
497     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
498
499     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
500     skip2 = dstx >> 1;
501     wrap = dst->linesize[0];
502     wrap3 = rect->pict.linesize[0];
503     p = rect->pict.data[0];
504     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
505
506     if (dsty & 1) {
507         lum += dstx;
508         cb += skip2;
509         cr += skip2;
510
511         if (dstx & 1) {
512             YUVA_IN(y, u, v, a, p, pal);
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
515             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
516             cb++;
517             cr++;
518             lum++;
519             p += BPP;
520         }
521         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 = u;
524             v1 = v;
525             a1 = a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527
528             YUVA_IN(y, u, v, a, p + BPP, pal);
529             u1 += u;
530             v1 += v;
531             a1 += a;
532             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
533             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
534             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
535             cb++;
536             cr++;
537             p += 2 * BPP;
538             lum += 2;
539         }
540         if (w) {
541             YUVA_IN(y, u, v, a, p, pal);
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
544             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
545             p++;
546             lum++;
547         }
548         p += wrap3 - dstw * BPP;
549         lum += wrap - dstw - dstx;
550         cb += dst->linesize[1] - width2 - skip2;
551         cr += dst->linesize[2] - width2 - skip2;
552     }
553     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
554         lum += dstx;
555         cb += skip2;
556         cr += skip2;
557
558         if (dstx & 1) {
559             YUVA_IN(y, u, v, a, p, pal);
560             u1 = u;
561             v1 = v;
562             a1 = a;
563             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564             p += wrap3;
565             lum += wrap;
566             YUVA_IN(y, u, v, a, p, pal);
567             u1 += u;
568             v1 += v;
569             a1 += a;
570             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
572             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
573             cb++;
574             cr++;
575             p += -wrap3 + BPP;
576             lum += -wrap + 1;
577         }
578         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 = u;
581             v1 = v;
582             a1 = a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584
585             YUVA_IN(y, u, v, a, p + BPP, pal);
586             u1 += u;
587             v1 += v;
588             a1 += a;
589             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
590             p += wrap3;
591             lum += wrap;
592
593             YUVA_IN(y, u, v, a, p, pal);
594             u1 += u;
595             v1 += v;
596             a1 += a;
597             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598
599             YUVA_IN(y, u, v, a, p + BPP, pal);
600             u1 += u;
601             v1 += v;
602             a1 += a;
603             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
604
605             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
606             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
607
608             cb++;
609             cr++;
610             p += -wrap3 + 2 * BPP;
611             lum += -wrap + 2;
612         }
613         if (w) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             p += wrap3;
620             lum += wrap;
621             YUVA_IN(y, u, v, a, p, pal);
622             u1 += u;
623             v1 += v;
624             a1 += a;
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
627             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
628             cb++;
629             cr++;
630             p += -wrap3 + BPP;
631             lum += -wrap + 1;
632         }
633         p += wrap3 + (wrap3 - dstw * BPP);
634         lum += wrap + (wrap - dstw - dstx);
635         cb += dst->linesize[1] - width2 - skip2;
636         cr += dst->linesize[2] - width2 - skip2;
637     }
638     /* handle odd height */
639     if (h) {
640         lum += dstx;
641         cb += skip2;
642         cr += skip2;
643
644         if (dstx & 1) {
645             YUVA_IN(y, u, v, a, p, pal);
646             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
647             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
648             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
649             cb++;
650             cr++;
651             lum++;
652             p += BPP;
653         }
654         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
655             YUVA_IN(y, u, v, a, p, pal);
656             u1 = u;
657             v1 = v;
658             a1 = a;
659             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
660
661             YUVA_IN(y, u, v, a, p + BPP, pal);
662             u1 += u;
663             v1 += v;
664             a1 += a;
665             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
666             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
667             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
668             cb++;
669             cr++;
670             p += 2 * BPP;
671             lum += 2;
672         }
673         if (w) {
674             YUVA_IN(y, u, v, a, p, pal);
675             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
676             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
677             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
678         }
679     }
680 }
681
682 static void free_subpicture(SubPicture *sp)
683 {
684     avsubtitle_free(&sp->sub);
685 }
686
687 static void video_image_display(VideoState *is)
688 {
689     VideoPicture *vp;
690     SubPicture *sp;
691     AVPicture pict;
692     float aspect_ratio;
693     int width, height, x, y;
694     SDL_Rect rect;
695     int i;
696
697     vp = &is->pictq[is->pictq_rindex];
698     if (vp->bmp) {
699         if (vp->sample_aspect_ratio.num == 0)
700             aspect_ratio = 0;
701         else
702             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
703
704         if (aspect_ratio <= 0.0)
705             aspect_ratio = 1.0;
706         aspect_ratio *= (float)vp->width / (float)vp->height;
707
708         if (is->subtitle_st) {
709             if (is->subpq_size > 0) {
710                 sp = &is->subpq[is->subpq_rindex];
711
712                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
713                     SDL_LockYUVOverlay (vp->bmp);
714
715                     pict.data[0] = vp->bmp->pixels[0];
716                     pict.data[1] = vp->bmp->pixels[2];
717                     pict.data[2] = vp->bmp->pixels[1];
718
719                     pict.linesize[0] = vp->bmp->pitches[0];
720                     pict.linesize[1] = vp->bmp->pitches[2];
721                     pict.linesize[2] = vp->bmp->pitches[1];
722
723                     for (i = 0; i < sp->sub.num_rects; i++)
724                         blend_subrect(&pict, sp->sub.rects[i],
725                                       vp->bmp->w, vp->bmp->h);
726
727                     SDL_UnlockYUVOverlay (vp->bmp);
728                 }
729             }
730         }
731
732
733         /* XXX: we suppose the screen has a 1.0 pixel ratio */
734         height = is->height;
735         width = ((int)rint(height * aspect_ratio)) & ~1;
736         if (width > is->width) {
737             width = is->width;
738             height = ((int)rint(width / aspect_ratio)) & ~1;
739         }
740         x = (is->width - width) / 2;
741         y = (is->height - height) / 2;
742         is->no_background = 0;
743         rect.x = is->xleft + x;
744         rect.y = is->ytop  + y;
745         rect.w = FFMAX(width,  1);
746         rect.h = FFMAX(height, 1);
747         SDL_DisplayYUVOverlay(vp->bmp, &rect);
748     }
749 }
750
751 static inline int compute_mod(int a, int b)
752 {
753     return a < 0 ? a%b + b : a%b;
754 }
755
756 static void video_audio_display(VideoState *s)
757 {
758     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
759     int ch, channels, h, h2, bgcolor, fgcolor;
760     int16_t time_diff;
761     int rdft_bits, nb_freq;
762
763     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
764         ;
765     nb_freq = 1 << (rdft_bits - 1);
766
767     /* compute display index : center on currently output samples */
768     channels = s->audio_tgt.channels;
769     nb_display_channels = channels;
770     if (!s->paused) {
771         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
772         n = 2 * channels;
773         delay = s->audio_write_buf_size;
774         delay /= n;
775
776         /* to be more precise, we take into account the time spent since
777            the last buffer computation */
778         if (audio_callback_time) {
779             time_diff = av_gettime() - audio_callback_time;
780             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
781         }
782
783         delay += 2 * data_used;
784         if (delay < data_used)
785             delay = data_used;
786
787         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
788         if (s->show_mode == SHOW_MODE_WAVES) {
789             h = INT_MIN;
790             for (i = 0; i < 1000; i += channels) {
791                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
792                 int a = s->sample_array[idx];
793                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
794                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
795                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
796                 int score = a - d;
797                 if (h < score && (b ^ c) < 0) {
798                     h = score;
799                     i_start = idx;
800                 }
801             }
802         }
803
804         s->last_i_start = i_start;
805     } else {
806         i_start = s->last_i_start;
807     }
808
809     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
810     if (s->show_mode == SHOW_MODE_WAVES) {
811         fill_rectangle(screen,
812                        s->xleft, s->ytop, s->width, s->height,
813                        bgcolor);
814
815         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
816
817         /* total height for one channel */
818         h = s->height / nb_display_channels;
819         /* graph height / 2 */
820         h2 = (h * 9) / 20;
821         for (ch = 0; ch < nb_display_channels; ch++) {
822             i = i_start + ch;
823             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
824             for (x = 0; x < s->width; x++) {
825                 y = (s->sample_array[i] * h2) >> 15;
826                 if (y < 0) {
827                     y = -y;
828                     ys = y1 - y;
829                 } else {
830                     ys = y1;
831                 }
832                 fill_rectangle(screen,
833                                s->xleft + x, ys, 1, y,
834                                fgcolor);
835                 i += channels;
836                 if (i >= SAMPLE_ARRAY_SIZE)
837                     i -= SAMPLE_ARRAY_SIZE;
838             }
839         }
840
841         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
842
843         for (ch = 1; ch < nb_display_channels; ch++) {
844             y = s->ytop + ch * h;
845             fill_rectangle(screen,
846                            s->xleft, y, s->width, 1,
847                            fgcolor);
848         }
849         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
850     } else {
851         nb_display_channels= FFMIN(nb_display_channels, 2);
852         if (rdft_bits != s->rdft_bits) {
853             av_rdft_end(s->rdft);
854             av_free(s->rdft_data);
855             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
856             s->rdft_bits = rdft_bits;
857             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
858         }
859         {
860             FFTSample *data[2];
861             for (ch = 0; ch < nb_display_channels; ch++) {
862                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
863                 i = i_start + ch;
864                 for (x = 0; x < 2 * nb_freq; x++) {
865                     double w = (x-nb_freq) * (1.0 / nb_freq);
866                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
867                     i += channels;
868                     if (i >= SAMPLE_ARRAY_SIZE)
869                         i -= SAMPLE_ARRAY_SIZE;
870                 }
871                 av_rdft_calc(s->rdft, data[ch]);
872             }
873             // least efficient way to do this, we should of course directly access it but its more than fast enough
874             for (y = 0; y < s->height; y++) {
875                 double w = 1 / sqrt(nb_freq);
876                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
877                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
878                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
879                 a = FFMIN(a, 255);
880                 b = FFMIN(b, 255);
881                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
882
883                 fill_rectangle(screen,
884                             s->xpos, s->height-y, 1, 1,
885                             fgcolor);
886             }
887         }
888         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
889         if (!s->paused)
890             s->xpos++;
891         if (s->xpos >= s->width)
892             s->xpos= s->xleft;
893     }
894 }
895
896 static void stream_close(VideoState *is)
897 {
898     VideoPicture *vp;
899     int i;
900     /* XXX: use a special url_shutdown call to abort parse cleanly */
901     is->abort_request = 1;
902     SDL_WaitThread(is->read_tid, NULL);
903     SDL_WaitThread(is->refresh_tid, NULL);
904     packet_queue_destroy(&is->videoq);
905     packet_queue_destroy(&is->audioq);
906     packet_queue_destroy(&is->subtitleq);
907
908     /* free all pictures */
909     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
910         vp = &is->pictq[i];
911 #if CONFIG_AVFILTER
912         avfilter_unref_bufferp(&vp->picref);
913 #endif
914         if (vp->bmp) {
915             SDL_FreeYUVOverlay(vp->bmp);
916             vp->bmp = NULL;
917         }
918     }
919     SDL_DestroyMutex(is->pictq_mutex);
920     SDL_DestroyCond(is->pictq_cond);
921     SDL_DestroyMutex(is->subpq_mutex);
922     SDL_DestroyCond(is->subpq_cond);
923 #if !CONFIG_AVFILTER
924     if (is->img_convert_ctx)
925         sws_freeContext(is->img_convert_ctx);
926 #endif
927     av_free(is);
928 }
929
930 static void do_exit(VideoState *is)
931 {
932     if (is) {
933         stream_close(is);
934     }
935     av_lockmgr_register(NULL);
936     uninit_opts();
937 #if CONFIG_AVFILTER
938     avfilter_uninit();
939 #endif
940     avformat_network_deinit();
941     if (show_status)
942         printf("\n");
943     SDL_Quit();
944     av_log(NULL, AV_LOG_QUIET, "%s", "");
945     exit(0);
946 }
947
948 static void sigterm_handler(int sig)
949 {
950     exit(123);
951 }
952
953 static int video_open(VideoState *is, int force_set_video_mode)
954 {
955     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
956     int w,h;
957     VideoPicture *vp = &is->pictq[is->pictq_rindex];
958
959     if (is_full_screen) flags |= SDL_FULLSCREEN;
960     else                flags |= SDL_RESIZABLE;
961
962     if (is_full_screen && fs_screen_width) {
963         w = fs_screen_width;
964         h = fs_screen_height;
965     } else if (!is_full_screen && screen_width) {
966         w = screen_width;
967         h = screen_height;
968     } else if (vp->width) {
969         w = vp->width;
970         h = vp->height;
971     } else {
972         w = 640;
973         h = 480;
974     }
975     if (screen && is->width == screen->w && screen->w == w
976        && is->height== screen->h && screen->h == h && !force_set_video_mode)
977         return 0;
978     screen = SDL_SetVideoMode(w, h, 0, flags);
979     if (!screen) {
980         fprintf(stderr, "SDL: could not set video mode - exiting\n");
981         do_exit(is);
982     }
983     if (!window_title)
984         window_title = input_filename;
985     SDL_WM_SetCaption(window_title, window_title);
986
987     is->width  = screen->w;
988     is->height = screen->h;
989
990     return 0;
991 }
992
993 /* display the current picture, if any */
994 static void video_display(VideoState *is)
995 {
996     if (!screen)
997         video_open(is, 0);
998     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
999         video_audio_display(is);
1000     else if (is->video_st)
1001         video_image_display(is);
1002 }
1003
1004 static int refresh_thread(void *opaque)
1005 {
1006     VideoState *is= opaque;
1007     while (!is->abort_request) {
1008         SDL_Event event;
1009         event.type = FF_REFRESH_EVENT;
1010         event.user.data1 = opaque;
1011         if (!is->refresh && (!is->paused || is->force_refresh)) {
1012             is->refresh = 1;
1013             SDL_PushEvent(&event);
1014         }
1015         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1016         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1017     }
1018     return 0;
1019 }
1020
1021 /* get the current audio clock value */
1022 static double get_audio_clock(VideoState *is)
1023 {
1024     if (is->paused) {
1025         return is->audio_current_pts;
1026     } else {
1027         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1028     }
1029 }
1030
1031 /* get the current video clock value */
1032 static double get_video_clock(VideoState *is)
1033 {
1034     if (is->paused) {
1035         return is->video_current_pts;
1036     } else {
1037         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1038     }
1039 }
1040
1041 /* get the current external clock value */
1042 static double get_external_clock(VideoState *is)
1043 {
1044     int64_t ti;
1045     ti = av_gettime();
1046     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1047 }
1048
1049 /* get the current master clock value */
1050 static double get_master_clock(VideoState *is)
1051 {
1052     double val;
1053
1054     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1055         if (is->video_st)
1056             val = get_video_clock(is);
1057         else
1058             val = get_audio_clock(is);
1059     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1060         if (is->audio_st)
1061             val = get_audio_clock(is);
1062         else
1063             val = get_video_clock(is);
1064     } else {
1065         val = get_external_clock(is);
1066     }
1067     return val;
1068 }
1069
1070 /* seek in the stream */
1071 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1072 {
1073     if (!is->seek_req) {
1074         is->seek_pos = pos;
1075         is->seek_rel = rel;
1076         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1077         if (seek_by_bytes)
1078             is->seek_flags |= AVSEEK_FLAG_BYTE;
1079         is->seek_req = 1;
1080     }
1081 }
1082
1083 /* pause or resume the video */
1084 static void stream_toggle_pause(VideoState *is)
1085 {
1086     if (is->paused) {
1087         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1088         if (is->read_pause_return != AVERROR(ENOSYS)) {
1089             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1090         }
1091         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1092     }
1093     is->paused = !is->paused;
1094 }
1095
1096 static double compute_target_delay(double delay, VideoState *is)
1097 {
1098     double sync_threshold, diff;
1099
1100     /* update delay to follow master synchronisation source */
1101     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1102          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1103         /* if video is slave, we try to correct big delays by
1104            duplicating or deleting a frame */
1105         diff = get_video_clock(is) - get_master_clock(is);
1106
1107         /* skip or repeat frame. We take into account the
1108            delay to compute the threshold. I still don't know
1109            if it is the best guess */
1110         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1111         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1112             if (diff <= -sync_threshold)
1113                 delay = 0;
1114             else if (diff >= sync_threshold)
1115                 delay = 2 * delay;
1116         }
1117     }
1118
1119     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1120             delay, -diff);
1121
1122     return delay;
1123 }
1124
1125 static void pictq_next_picture(VideoState *is) {
1126     /* update queue size and signal for next picture */
1127     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1128         is->pictq_rindex = 0;
1129
1130     SDL_LockMutex(is->pictq_mutex);
1131     is->pictq_size--;
1132     SDL_CondSignal(is->pictq_cond);
1133     SDL_UnlockMutex(is->pictq_mutex);
1134 }
1135
1136 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1137     double time = av_gettime() / 1000000.0;
1138     /* update current video pts */
1139     is->video_current_pts = pts;
1140     is->video_current_pts_drift = is->video_current_pts - time;
1141     is->video_current_pos = pos;
1142     is->frame_last_pts = pts;
1143 }
1144
1145 /* called to display each frame */
1146 static void video_refresh(void *opaque)
1147 {
1148     VideoState *is = opaque;
1149     VideoPicture *vp;
1150     double time;
1151
1152     SubPicture *sp, *sp2;
1153
1154     if (is->video_st) {
1155 retry:
1156         if (is->pictq_size == 0) {
1157             SDL_LockMutex(is->pictq_mutex);
1158             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1159                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1160                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1161             }
1162             SDL_UnlockMutex(is->pictq_mutex);
1163             // nothing to do, no picture to display in the que
1164         } else {
1165             double last_duration, duration, delay;
1166             /* dequeue the picture */
1167             vp = &is->pictq[is->pictq_rindex];
1168
1169             if (vp->skip) {
1170                 pictq_next_picture(is);
1171                 goto retry;
1172             }
1173
1174             if (is->paused)
1175                 goto display;
1176
1177             /* compute nominal last_duration */
1178             last_duration = vp->pts - is->frame_last_pts;
1179             if (last_duration > 0 && last_duration < 10.0) {
1180                 /* if duration of the last frame was sane, update last_duration in video state */
1181                 is->frame_last_duration = last_duration;
1182             }
1183             delay = compute_target_delay(is->frame_last_duration, is);
1184
1185             time= av_gettime()/1000000.0;
1186             if (time < is->frame_timer + delay)
1187                 return;
1188
1189             if (delay > 0)
1190                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1191
1192             SDL_LockMutex(is->pictq_mutex);
1193             update_video_pts(is, vp->pts, vp->pos);
1194             SDL_UnlockMutex(is->pictq_mutex);
1195
1196             if (is->pictq_size > 1) {
1197                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1198                 duration = nextvp->pts - vp->pts;
1199                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1200                     is->frame_drops_late++;
1201                     pictq_next_picture(is);
1202                     goto retry;
1203                 }
1204             }
1205
1206             if (is->subtitle_st) {
1207                 if (is->subtitle_stream_changed) {
1208                     SDL_LockMutex(is->subpq_mutex);
1209
1210                     while (is->subpq_size) {
1211                         free_subpicture(&is->subpq[is->subpq_rindex]);
1212
1213                         /* update queue size and signal for next picture */
1214                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1215                             is->subpq_rindex = 0;
1216
1217                         is->subpq_size--;
1218                     }
1219                     is->subtitle_stream_changed = 0;
1220
1221                     SDL_CondSignal(is->subpq_cond);
1222                     SDL_UnlockMutex(is->subpq_mutex);
1223                 } else {
1224                     if (is->subpq_size > 0) {
1225                         sp = &is->subpq[is->subpq_rindex];
1226
1227                         if (is->subpq_size > 1)
1228                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1229                         else
1230                             sp2 = NULL;
1231
1232                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1233                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1234                         {
1235                             free_subpicture(sp);
1236
1237                             /* update queue size and signal for next picture */
1238                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1239                                 is->subpq_rindex = 0;
1240
1241                             SDL_LockMutex(is->subpq_mutex);
1242                             is->subpq_size--;
1243                             SDL_CondSignal(is->subpq_cond);
1244                             SDL_UnlockMutex(is->subpq_mutex);
1245                         }
1246                     }
1247                 }
1248             }
1249
1250 display:
1251             /* display picture */
1252             if (!display_disable)
1253                 video_display(is);
1254
1255             if (!is->paused)
1256                 pictq_next_picture(is);
1257         }
1258     } else if (is->audio_st) {
1259         /* draw the next audio frame */
1260
1261         /* if only audio stream, then display the audio bars (better
1262            than nothing, just to test the implementation */
1263
1264         /* display picture */
1265         if (!display_disable)
1266             video_display(is);
1267     }
1268     is->force_refresh = 0;
1269     if (show_status) {
1270         static int64_t last_time;
1271         int64_t cur_time;
1272         int aqsize, vqsize, sqsize;
1273         double av_diff;
1274
1275         cur_time = av_gettime();
1276         if (!last_time || (cur_time - last_time) >= 30000) {
1277             aqsize = 0;
1278             vqsize = 0;
1279             sqsize = 0;
1280             if (is->audio_st)
1281                 aqsize = is->audioq.size;
1282             if (is->video_st)
1283                 vqsize = is->videoq.size;
1284             if (is->subtitle_st)
1285                 sqsize = is->subtitleq.size;
1286             av_diff = 0;
1287             if (is->audio_st && is->video_st)
1288                 av_diff = get_audio_clock(is) - get_video_clock(is);
1289             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1290                    get_master_clock(is),
1291                    av_diff,
1292                    is->frame_drops_early + is->frame_drops_late,
1293                    aqsize / 1024,
1294                    vqsize / 1024,
1295                    sqsize,
1296                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1297                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1298             fflush(stdout);
1299             last_time = cur_time;
1300         }
1301     }
1302 }
1303
1304 /* allocate a picture (needs to do that in main thread to avoid
1305    potential locking problems */
1306 static void alloc_picture(AllocEventProps *event_props)
1307 {
1308     VideoState *is = event_props->is;
1309     AVFrame *frame = event_props->frame;
1310     VideoPicture *vp;
1311
1312     vp = &is->pictq[is->pictq_windex];
1313
1314     if (vp->bmp)
1315         SDL_FreeYUVOverlay(vp->bmp);
1316
1317 #if CONFIG_AVFILTER
1318     avfilter_unref_bufferp(&vp->picref);
1319 #endif
1320
1321     vp->width   = frame->width;
1322     vp->height  = frame->height;
1323
1324     video_open(event_props->is, 0);
1325
1326     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1327                                    SDL_YV12_OVERLAY,
1328                                    screen);
1329     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1330         /* SDL allocates a buffer smaller than requested if the video
1331          * overlay hardware is unable to support the requested size. */
1332         fprintf(stderr, "Error: the video system does not support an image\n"
1333                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1334                         "to reduce the image size.\n", vp->width, vp->height );
1335         do_exit(is);
1336     }
1337
1338     SDL_LockMutex(is->pictq_mutex);
1339     vp->allocated = 1;
1340     SDL_CondSignal(is->pictq_cond);
1341     SDL_UnlockMutex(is->pictq_mutex);
1342 }
1343
1344 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1345 {
1346     VideoPicture *vp;
1347     double frame_delay, pts = pts1;
1348
1349     /* compute the exact PTS for the picture if it is omitted in the stream
1350      * pts1 is the dts of the pkt / pts of the frame */
1351     if (pts != 0) {
1352         /* update video clock with pts, if present */
1353         is->video_clock = pts;
1354     } else {
1355         pts = is->video_clock;
1356     }
1357     /* update video clock for next frame */
1358     frame_delay = av_q2d(is->video_st->codec->time_base);
1359     /* for MPEG2, the frame can be repeated, so we update the
1360        clock accordingly */
1361     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1362     is->video_clock += frame_delay;
1363
1364 #if defined(DEBUG_SYNC) && 0
1365     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1366            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1367 #endif
1368
1369     /* wait until we have space to put a new picture */
1370     SDL_LockMutex(is->pictq_mutex);
1371
1372     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1373            !is->videoq.abort_request) {
1374         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1375     }
1376     SDL_UnlockMutex(is->pictq_mutex);
1377
1378     if (is->videoq.abort_request)
1379         return -1;
1380
1381     vp = &is->pictq[is->pictq_windex];
1382
1383     /* alloc or resize hardware picture buffer */
1384     if (!vp->bmp || vp->reallocate ||
1385         vp->width  != src_frame->width ||
1386         vp->height != src_frame->height) {
1387         SDL_Event event;
1388         AllocEventProps event_props;
1389
1390         event_props.frame = src_frame;
1391         event_props.is = is;
1392
1393         vp->allocated  = 0;
1394         vp->reallocate = 0;
1395
1396         /* the allocation must be done in the main thread to avoid
1397            locking problems. We wait in this block for the event to complete,
1398            so we can pass a pointer to event_props to it. */
1399         event.type = FF_ALLOC_EVENT;
1400         event.user.data1 = &event_props;
1401         SDL_PushEvent(&event);
1402
1403         /* wait until the picture is allocated */
1404         SDL_LockMutex(is->pictq_mutex);
1405         while (!vp->allocated && !is->videoq.abort_request) {
1406             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1407         }
1408         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1409         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1410             while (!vp->allocated) {
1411                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1412             }
1413         }
1414         SDL_UnlockMutex(is->pictq_mutex);
1415
1416         if (is->videoq.abort_request)
1417             return -1;
1418     }
1419
1420     /* if the frame is not skipped, then display it */
1421     if (vp->bmp) {
1422         AVPicture pict = { { 0 } };
1423 #if CONFIG_AVFILTER
1424         avfilter_unref_bufferp(&vp->picref);
1425         vp->picref = src_frame->opaque;
1426 #endif
1427
1428         /* get a pointer on the bitmap */
1429         SDL_LockYUVOverlay (vp->bmp);
1430
1431         pict.data[0] = vp->bmp->pixels[0];
1432         pict.data[1] = vp->bmp->pixels[2];
1433         pict.data[2] = vp->bmp->pixels[1];
1434
1435         pict.linesize[0] = vp->bmp->pitches[0];
1436         pict.linesize[1] = vp->bmp->pitches[2];
1437         pict.linesize[2] = vp->bmp->pitches[1];
1438
1439 #if CONFIG_AVFILTER
1440         // FIXME use direct rendering
1441         av_picture_copy(&pict, (AVPicture *)src_frame,
1442                         src_frame->format, vp->width, vp->height);
1443         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1444 #else
1445         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1446         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1447             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1448             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1449         if (is->img_convert_ctx == NULL) {
1450             fprintf(stderr, "Cannot initialize the conversion context\n");
1451             exit(1);
1452         }
1453         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1454                   0, vp->height, pict.data, pict.linesize);
1455         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1456 #endif
1457         /* update the bitmap content */
1458         SDL_UnlockYUVOverlay(vp->bmp);
1459
1460         vp->pts = pts;
1461         vp->pos = pos;
1462         vp->skip = 0;
1463
1464         /* now we can update the picture count */
1465         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1466             is->pictq_windex = 0;
1467         SDL_LockMutex(is->pictq_mutex);
1468         is->pictq_size++;
1469         SDL_UnlockMutex(is->pictq_mutex);
1470     }
1471     return 0;
1472 }
1473
1474 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1475 {
1476     int got_picture, i;
1477
1478     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1479         return -1;
1480
1481     if (pkt->data == flush_pkt.data) {
1482         avcodec_flush_buffers(is->video_st->codec);
1483
1484         SDL_LockMutex(is->pictq_mutex);
1485         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1486         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1487             is->pictq[i].skip = 1;
1488         }
1489         while (is->pictq_size && !is->videoq.abort_request) {
1490             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1491         }
1492         is->video_current_pos = -1;
1493         is->frame_last_pts = AV_NOPTS_VALUE;
1494         is->frame_last_duration = 0;
1495         is->frame_timer = (double)av_gettime() / 1000000.0;
1496         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1497         SDL_UnlockMutex(is->pictq_mutex);
1498
1499         return 0;
1500     }
1501
1502     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1503         return 0;
1504
1505     if (got_picture) {
1506         int ret = 1;
1507
1508         if (decoder_reorder_pts == -1) {
1509             *pts = av_frame_get_best_effort_timestamp(frame);
1510         } else if (decoder_reorder_pts) {
1511             *pts = frame->pkt_pts;
1512         } else {
1513             *pts = frame->pkt_dts;
1514         }
1515
1516         if (*pts == AV_NOPTS_VALUE) {
1517             *pts = 0;
1518         }
1519
1520         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1521              (framedrop>0 || (framedrop && is->audio_st))) {
1522             SDL_LockMutex(is->pictq_mutex);
1523             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1524                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1525                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1526                 double ptsdiff = dpts - is->frame_last_pts;
1527                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1528                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1529                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1530                     is->frame_last_dropped_pos = pkt->pos;
1531                     is->frame_last_dropped_pts = dpts;
1532                     is->frame_drops_early++;
1533                     ret = 0;
1534                 }
1535             }
1536             SDL_UnlockMutex(is->pictq_mutex);
1537         }
1538
1539         return ret;
1540     }
1541     return 0;
1542 }
1543
1544 #if CONFIG_AVFILTER
1545 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1546                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1547 {
1548     int ret;
1549     AVFilterInOut *outputs = NULL, *inputs = NULL;
1550
1551     if (filtergraph) {
1552         outputs = avfilter_inout_alloc();
1553         inputs  = avfilter_inout_alloc();
1554         if (!outputs || !inputs) {
1555             ret = AVERROR(ENOMEM);
1556             goto fail;
1557         }
1558
1559         outputs->name       = av_strdup("in");
1560         outputs->filter_ctx = source_ctx;
1561         outputs->pad_idx    = 0;
1562         outputs->next       = NULL;
1563
1564         inputs->name        = av_strdup("out");
1565         inputs->filter_ctx  = sink_ctx;
1566         inputs->pad_idx     = 0;
1567         inputs->next        = NULL;
1568
1569         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1570             goto fail;
1571     } else {
1572         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1573             goto fail;
1574     }
1575
1576     return avfilter_graph_config(graph, NULL);
1577 fail:
1578     avfilter_inout_free(&outputs);
1579     avfilter_inout_free(&inputs);
1580     return ret;
1581 }
1582
1583 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1584 {
1585     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1586     char sws_flags_str[128];
1587     char buffersrc_args[256];
1588     int ret;
1589     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1590     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1591     AVCodecContext *codec = is->video_st->codec;
1592
1593     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1594     graph->scale_sws_opts = av_strdup(sws_flags_str);
1595
1596     snprintf(buffersrc_args, sizeof(buffersrc_args),
1597              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1598              codec->width, codec->height, codec->pix_fmt,
1599              is->video_st->time_base.num, is->video_st->time_base.den,
1600              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1601
1602     if ((ret = avfilter_graph_create_filter(&filt_src,
1603                                             avfilter_get_by_name("buffer"),
1604                                             "ffplay_buffer", buffersrc_args, NULL,
1605                                             graph)) < 0)
1606         return ret;
1607
1608     buffersink_params->pixel_fmts = pix_fmts;
1609     ret = avfilter_graph_create_filter(&filt_out,
1610                                        avfilter_get_by_name("buffersink"),
1611                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1612     av_freep(&buffersink_params);
1613     if (ret < 0)
1614         return ret;
1615
1616     if ((ret = avfilter_graph_create_filter(&filt_format,
1617                                             avfilter_get_by_name("format"),
1618                                             "format", "yuv420p", NULL, graph)) < 0)
1619         return ret;
1620     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1621         return ret;
1622
1623     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_format)) < 0)
1624         return ret;
1625
1626     is->in_video_filter  = filt_src;
1627     is->out_video_filter = filt_out;
1628
1629     return ret;
1630 }
1631
1632 #endif  /* CONFIG_AVFILTER */
1633
1634 static int video_thread(void *arg)
1635 {
1636     AVPacket pkt = { 0 };
1637     VideoState *is = arg;
1638     AVFrame *frame = avcodec_alloc_frame();
1639     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1640     double pts;
1641     int ret;
1642
1643 #if CONFIG_AVFILTER
1644     AVCodecContext *codec = is->video_st->codec;
1645     AVFilterGraph *graph = avfilter_graph_alloc();
1646     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1647     int last_w = 0;
1648     int last_h = 0;
1649     enum PixelFormat last_format = -2;
1650
1651     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1652         is->use_dr1 = 1;
1653         codec->get_buffer     = codec_get_buffer;
1654         codec->release_buffer = codec_release_buffer;
1655         codec->opaque         = &is->buffer_pool;
1656     }
1657 #endif
1658
1659     for (;;) {
1660 #if CONFIG_AVFILTER
1661         AVFilterBufferRef *picref;
1662         AVRational tb;
1663 #endif
1664         while (is->paused && !is->videoq.abort_request)
1665             SDL_Delay(10);
1666
1667         avcodec_get_frame_defaults(frame);
1668         av_free_packet(&pkt);
1669
1670         ret = get_video_frame(is, frame, &pts_int, &pkt);
1671         if (ret < 0)
1672             goto the_end;
1673
1674         if (!ret)
1675             continue;
1676
1677 #if CONFIG_AVFILTER
1678         if (   last_w != is->video_st->codec->width
1679             || last_h != is->video_st->codec->height
1680             || last_format != is->video_st->codec->pix_fmt) {
1681             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1682                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1683             avfilter_graph_free(&graph);
1684             graph = avfilter_graph_alloc();
1685             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1686                 SDL_Event event;
1687                 event.type = FF_QUIT_EVENT;
1688                 event.user.data1 = is;
1689                 SDL_PushEvent(&event);
1690                 av_free_packet(&pkt);
1691                 goto the_end;
1692             }
1693             filt_in  = is->in_video_filter;
1694             filt_out = is->out_video_filter;
1695             last_w = is->video_st->codec->width;
1696             last_h = is->video_st->codec->height;
1697             last_format = is->video_st->codec->pix_fmt;
1698         }
1699
1700         frame->pts = pts_int;
1701         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1702         if (is->use_dr1 && frame->opaque) {
1703             FrameBuffer      *buf = frame->opaque;
1704             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1705                                         frame->data, frame->linesize,
1706                                         AV_PERM_READ | AV_PERM_PRESERVE,
1707                                         frame->width, frame->height,
1708                                         frame->format);
1709
1710             avfilter_copy_frame_props(fb, frame);
1711             fb->buf->priv           = buf;
1712             fb->buf->free           = filter_release_buffer;
1713
1714             buf->refcount++;
1715             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1716
1717         } else
1718             av_buffersrc_write_frame(filt_in, frame);
1719
1720         av_free_packet(&pkt);
1721
1722         while (ret >= 0) {
1723             is->frame_last_returned_time = av_gettime() / 1000000.0;
1724
1725             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1726             if (ret < 0) {
1727                 ret = 0;
1728                 break;
1729             }
1730
1731             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1732             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1733                 is->frame_last_filter_delay = 0;
1734
1735             avfilter_copy_buf_props(frame, picref);
1736
1737             pts_int = picref->pts;
1738             tb      = filt_out->inputs[0]->time_base;
1739             pos     = picref->pos;
1740             frame->opaque = picref;
1741
1742             if (av_cmp_q(tb, is->video_st->time_base)) {
1743                 av_unused int64_t pts1 = pts_int;
1744                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1745                 av_dlog(NULL, "video_thread(): "
1746                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1747                         tb.num, tb.den, pts1,
1748                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1749             }
1750             pts = pts_int * av_q2d(is->video_st->time_base);
1751             ret = queue_picture(is, frame, pts, pos);
1752         }
1753 #else
1754         pts = pts_int * av_q2d(is->video_st->time_base);
1755         ret = queue_picture(is, frame, pts, pkt.pos);
1756 #endif
1757
1758         if (ret < 0)
1759             goto the_end;
1760
1761         if (is->step)
1762             stream_toggle_pause(is);
1763     }
1764  the_end:
1765     avcodec_flush_buffers(is->video_st->codec);
1766 #if CONFIG_AVFILTER
1767     av_freep(&vfilters);
1768     avfilter_graph_free(&graph);
1769 #endif
1770     av_free_packet(&pkt);
1771     av_free(frame);
1772     return 0;
1773 }
1774
1775 static int subtitle_thread(void *arg)
1776 {
1777     VideoState *is = arg;
1778     SubPicture *sp;
1779     AVPacket pkt1, *pkt = &pkt1;
1780     int got_subtitle;
1781     double pts;
1782     int i, j;
1783     int r, g, b, y, u, v, a;
1784
1785     for (;;) {
1786         while (is->paused && !is->subtitleq.abort_request) {
1787             SDL_Delay(10);
1788         }
1789         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1790             break;
1791
1792         if (pkt->data == flush_pkt.data) {
1793             avcodec_flush_buffers(is->subtitle_st->codec);
1794             continue;
1795         }
1796         SDL_LockMutex(is->subpq_mutex);
1797         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1798                !is->subtitleq.abort_request) {
1799             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1800         }
1801         SDL_UnlockMutex(is->subpq_mutex);
1802
1803         if (is->subtitleq.abort_request)
1804             return 0;
1805
1806         sp = &is->subpq[is->subpq_windex];
1807
1808        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1809            this packet, if any */
1810         pts = 0;
1811         if (pkt->pts != AV_NOPTS_VALUE)
1812             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1813
1814         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1815                                  &got_subtitle, pkt);
1816
1817         if (got_subtitle && sp->sub.format == 0) {
1818             sp->pts = pts;
1819
1820             for (i = 0; i < sp->sub.num_rects; i++)
1821             {
1822                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1823                 {
1824                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1825                     y = RGB_TO_Y_CCIR(r, g, b);
1826                     u = RGB_TO_U_CCIR(r, g, b, 0);
1827                     v = RGB_TO_V_CCIR(r, g, b, 0);
1828                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1829                 }
1830             }
1831
1832             /* now we can update the picture count */
1833             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1834                 is->subpq_windex = 0;
1835             SDL_LockMutex(is->subpq_mutex);
1836             is->subpq_size++;
1837             SDL_UnlockMutex(is->subpq_mutex);
1838         }
1839         av_free_packet(pkt);
1840     }
1841     return 0;
1842 }
1843
1844 /* copy samples for viewing in editor window */
1845 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1846 {
1847     int size, len;
1848
1849     size = samples_size / sizeof(short);
1850     while (size > 0) {
1851         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1852         if (len > size)
1853             len = size;
1854         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1855         samples += len;
1856         is->sample_array_index += len;
1857         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1858             is->sample_array_index = 0;
1859         size -= len;
1860     }
1861 }
1862
1863 /* return the wanted number of samples to get better sync if sync_type is video
1864  * or external master clock */
1865 static int synchronize_audio(VideoState *is, int nb_samples)
1866 {
1867     int wanted_nb_samples = nb_samples;
1868
1869     /* if not master, then we try to remove or add samples to correct the clock */
1870     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1871          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1872         double diff, avg_diff;
1873         int min_nb_samples, max_nb_samples;
1874
1875         diff = get_audio_clock(is) - get_master_clock(is);
1876
1877         if (diff < AV_NOSYNC_THRESHOLD) {
1878             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1879             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1880                 /* not enough measures to have a correct estimate */
1881                 is->audio_diff_avg_count++;
1882             } else {
1883                 /* estimate the A-V difference */
1884                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1885
1886                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1887                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1888                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1889                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1890                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1891                 }
1892                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1893                         diff, avg_diff, wanted_nb_samples - nb_samples,
1894                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1895             }
1896         } else {
1897             /* too big difference : may be initial PTS errors, so
1898                reset A-V filter */
1899             is->audio_diff_avg_count = 0;
1900             is->audio_diff_cum       = 0;
1901         }
1902     }
1903
1904     return wanted_nb_samples;
1905 }
1906
1907 /* decode one audio frame and returns its uncompressed size */
1908 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1909 {
1910     AVPacket *pkt_temp = &is->audio_pkt_temp;
1911     AVPacket *pkt = &is->audio_pkt;
1912     AVCodecContext *dec = is->audio_st->codec;
1913     int len1, len2, data_size, resampled_data_size;
1914     int64_t dec_channel_layout;
1915     int got_frame;
1916     double pts;
1917     int new_packet = 0;
1918     int flush_complete = 0;
1919     int wanted_nb_samples;
1920
1921     for (;;) {
1922         /* NOTE: the audio packet can contain several frames */
1923         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1924             if (!is->frame) {
1925                 if (!(is->frame = avcodec_alloc_frame()))
1926                     return AVERROR(ENOMEM);
1927             } else
1928                 avcodec_get_frame_defaults(is->frame);
1929
1930             if (is->paused)
1931                 return -1;
1932
1933             if (flush_complete)
1934                 break;
1935             new_packet = 0;
1936             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1937             if (len1 < 0) {
1938                 /* if error, we skip the frame */
1939                 pkt_temp->size = 0;
1940                 break;
1941             }
1942
1943             pkt_temp->data += len1;
1944             pkt_temp->size -= len1;
1945
1946             if (!got_frame) {
1947                 /* stop sending empty packets if the decoder is finished */
1948                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1949                     flush_complete = 1;
1950                 continue;
1951             }
1952             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1953                                                    is->frame->nb_samples,
1954                                                    dec->sample_fmt, 1);
1955
1956             dec_channel_layout =
1957                 (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ?
1958                 dec->channel_layout : av_get_default_channel_layout(dec->channels);
1959             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
1960
1961             if (dec->sample_fmt    != is->audio_src.fmt            ||
1962                 dec_channel_layout != is->audio_src.channel_layout ||
1963                 dec->sample_rate   != is->audio_src.freq           ||
1964                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
1965                 swr_free(&is->swr_ctx);
1966                 is->swr_ctx = swr_alloc_set_opts(NULL,
1967                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
1968                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
1969                                                  0, NULL);
1970                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
1971                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
1972                         dec->sample_rate,   av_get_sample_fmt_name(dec->sample_fmt),   dec->channels,
1973                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
1974                     break;
1975                 }
1976                 is->audio_src.channel_layout = dec_channel_layout;
1977                 is->audio_src.channels = dec->channels;
1978                 is->audio_src.freq = dec->sample_rate;
1979                 is->audio_src.fmt = dec->sample_fmt;
1980             }
1981
1982             if (is->swr_ctx) {
1983                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
1984                 uint8_t *out[] = {is->audio_buf2};
1985                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
1986                 if (wanted_nb_samples != is->frame->nb_samples) {
1987                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
1988                                                 wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
1989                         fprintf(stderr, "swr_set_compensation() failed\n");
1990                         break;
1991                     }
1992                 }
1993                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
1994                 if (len2 < 0) {
1995                     fprintf(stderr, "swr_convert() failed\n");
1996                     break;
1997                 }
1998                 if (len2 == out_count) {
1999                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2000                     swr_init(is->swr_ctx);
2001                 }
2002                 is->audio_buf = is->audio_buf2;
2003                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2004             } else {
2005                 is->audio_buf = is->frame->data[0];
2006                 resampled_data_size = data_size;
2007             }
2008
2009             /* if no pts, then compute it */
2010             pts = is->audio_clock;
2011             *pts_ptr = pts;
2012             is->audio_clock += (double)data_size /
2013                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2014 #ifdef DEBUG
2015             {
2016                 static double last_clock;
2017                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2018                        is->audio_clock - last_clock,
2019                        is->audio_clock, pts);
2020                 last_clock = is->audio_clock;
2021             }
2022 #endif
2023             return resampled_data_size;
2024         }
2025
2026         /* free the current packet */
2027         if (pkt->data)
2028             av_free_packet(pkt);
2029         memset(pkt_temp, 0, sizeof(*pkt_temp));
2030
2031         if (is->paused || is->audioq.abort_request) {
2032             return -1;
2033         }
2034
2035         /* read next packet */
2036         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2037             return -1;
2038
2039         if (pkt->data == flush_pkt.data) {
2040             avcodec_flush_buffers(dec);
2041             flush_complete = 0;
2042         }
2043
2044         *pkt_temp = *pkt;
2045
2046         /* if update the audio clock with the pts */
2047         if (pkt->pts != AV_NOPTS_VALUE) {
2048             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2049         }
2050     }
2051 }
2052
2053 /* prepare a new audio buffer */
2054 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2055 {
2056     VideoState *is = opaque;
2057     int audio_size, len1;
2058     int bytes_per_sec;
2059     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2060     double pts;
2061
2062     audio_callback_time = av_gettime();
2063
2064     while (len > 0) {
2065         if (is->audio_buf_index >= is->audio_buf_size) {
2066            audio_size = audio_decode_frame(is, &pts);
2067            if (audio_size < 0) {
2068                 /* if error, just output silence */
2069                is->audio_buf      = is->silence_buf;
2070                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2071            } else {
2072                if (is->show_mode != SHOW_MODE_VIDEO)
2073                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2074                is->audio_buf_size = audio_size;
2075            }
2076            is->audio_buf_index = 0;
2077         }
2078         len1 = is->audio_buf_size - is->audio_buf_index;
2079         if (len1 > len)
2080             len1 = len;
2081         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2082         len -= len1;
2083         stream += len1;
2084         is->audio_buf_index += len1;
2085     }
2086     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2087     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2088     /* Let's assume the audio driver that is used by SDL has two periods. */
2089     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2090     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2091 }
2092
2093 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2094 {
2095     SDL_AudioSpec wanted_spec, spec;
2096     const char *env;
2097     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2098
2099     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2100     if (env) {
2101         wanted_nb_channels = SDL_atoi(env);
2102         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2103     }
2104     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2105         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2106         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2107     }
2108     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2109     wanted_spec.freq = wanted_sample_rate;
2110     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2111         fprintf(stderr, "Invalid sample rate or channel count!\n");
2112         return -1;
2113     }
2114     wanted_spec.format = AUDIO_S16SYS;
2115     wanted_spec.silence = 0;
2116     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2117     wanted_spec.callback = sdl_audio_callback;
2118     wanted_spec.userdata = opaque;
2119     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2120         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2121         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2122         if (!wanted_spec.channels) {
2123             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2124             return -1;
2125         }
2126         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2127     }
2128     if (spec.format != AUDIO_S16SYS) {
2129         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2130         return -1;
2131     }
2132     if (spec.channels != wanted_spec.channels) {
2133         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2134         if (!wanted_channel_layout) {
2135             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2136             return -1;
2137         }
2138     }
2139
2140     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2141     audio_hw_params->freq = spec.freq;
2142     audio_hw_params->channel_layout = wanted_channel_layout;
2143     audio_hw_params->channels =  spec.channels;
2144     return spec.size;
2145 }
2146
2147 /* open a given stream. Return 0 if OK */
2148 static int stream_component_open(VideoState *is, int stream_index)
2149 {
2150     AVFormatContext *ic = is->ic;
2151     AVCodecContext *avctx;
2152     AVCodec *codec;
2153     AVDictionary *opts;
2154     AVDictionaryEntry *t = NULL;
2155
2156     if (stream_index < 0 || stream_index >= ic->nb_streams)
2157         return -1;
2158     avctx = ic->streams[stream_index]->codec;
2159
2160     codec = avcodec_find_decoder(avctx->codec_id);
2161     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2162
2163     switch(avctx->codec_type){
2164         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2165         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2166         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2167     }
2168     if (!codec)
2169         return -1;
2170
2171     avctx->workaround_bugs   = workaround_bugs;
2172     avctx->lowres            = lowres;
2173     if(avctx->lowres > codec->max_lowres){
2174         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2175                 codec->max_lowres);
2176         avctx->lowres= codec->max_lowres;
2177     }
2178     avctx->idct_algo         = idct;
2179     avctx->skip_frame        = skip_frame;
2180     avctx->skip_idct         = skip_idct;
2181     avctx->skip_loop_filter  = skip_loop_filter;
2182     avctx->error_concealment = error_concealment;
2183
2184     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2185     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2186     if(codec->capabilities & CODEC_CAP_DR1)
2187         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2188
2189     if (!av_dict_get(opts, "threads", NULL, 0))
2190         av_dict_set(&opts, "threads", "auto", 0);
2191     if (!codec ||
2192         avcodec_open2(avctx, codec, &opts) < 0)
2193         return -1;
2194     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2195         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2196         return AVERROR_OPTION_NOT_FOUND;
2197     }
2198
2199     /* prepare audio output */
2200     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2201         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2202         if (audio_hw_buf_size < 0)
2203             return -1;
2204         is->audio_hw_buf_size = audio_hw_buf_size;
2205         is->audio_tgt = is->audio_src;
2206     }
2207
2208     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2209     switch (avctx->codec_type) {
2210     case AVMEDIA_TYPE_AUDIO:
2211         is->audio_stream = stream_index;
2212         is->audio_st = ic->streams[stream_index];
2213         is->audio_buf_size  = 0;
2214         is->audio_buf_index = 0;
2215
2216         /* init averaging filter */
2217         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2218         is->audio_diff_avg_count = 0;
2219         /* since we do not have a precise anough audio fifo fullness,
2220            we correct audio sync only if larger than this threshold */
2221         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2222
2223         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2224         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2225         packet_queue_start(&is->audioq);
2226         SDL_PauseAudio(0);
2227         break;
2228     case AVMEDIA_TYPE_VIDEO:
2229         is->video_stream = stream_index;
2230         is->video_st = ic->streams[stream_index];
2231
2232         packet_queue_start(&is->videoq);
2233         is->video_tid = SDL_CreateThread(video_thread, is);
2234         break;
2235     case AVMEDIA_TYPE_SUBTITLE:
2236         is->subtitle_stream = stream_index;
2237         is->subtitle_st = ic->streams[stream_index];
2238         packet_queue_start(&is->subtitleq);
2239
2240         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2241         break;
2242     default:
2243         break;
2244     }
2245     return 0;
2246 }
2247
2248 static void stream_component_close(VideoState *is, int stream_index)
2249 {
2250     AVFormatContext *ic = is->ic;
2251     AVCodecContext *avctx;
2252
2253     if (stream_index < 0 || stream_index >= ic->nb_streams)
2254         return;
2255     avctx = ic->streams[stream_index]->codec;
2256
2257     switch (avctx->codec_type) {
2258     case AVMEDIA_TYPE_AUDIO:
2259         packet_queue_abort(&is->audioq);
2260
2261         SDL_CloseAudio();
2262
2263         packet_queue_flush(&is->audioq);
2264         av_free_packet(&is->audio_pkt);
2265         swr_free(&is->swr_ctx);
2266         av_freep(&is->audio_buf1);
2267         is->audio_buf = NULL;
2268         av_freep(&is->frame);
2269
2270         if (is->rdft) {
2271             av_rdft_end(is->rdft);
2272             av_freep(&is->rdft_data);
2273             is->rdft = NULL;
2274             is->rdft_bits = 0;
2275         }
2276         break;
2277     case AVMEDIA_TYPE_VIDEO:
2278         packet_queue_abort(&is->videoq);
2279
2280         /* note: we also signal this mutex to make sure we deblock the
2281            video thread in all cases */
2282         SDL_LockMutex(is->pictq_mutex);
2283         SDL_CondSignal(is->pictq_cond);
2284         SDL_UnlockMutex(is->pictq_mutex);
2285
2286         SDL_WaitThread(is->video_tid, NULL);
2287
2288         packet_queue_flush(&is->videoq);
2289         break;
2290     case AVMEDIA_TYPE_SUBTITLE:
2291         packet_queue_abort(&is->subtitleq);
2292
2293         /* note: we also signal this mutex to make sure we deblock the
2294            video thread in all cases */
2295         SDL_LockMutex(is->subpq_mutex);
2296         is->subtitle_stream_changed = 1;
2297
2298         SDL_CondSignal(is->subpq_cond);
2299         SDL_UnlockMutex(is->subpq_mutex);
2300
2301         SDL_WaitThread(is->subtitle_tid, NULL);
2302
2303         packet_queue_flush(&is->subtitleq);
2304         break;
2305     default:
2306         break;
2307     }
2308
2309     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2310     avcodec_close(avctx);
2311 #if CONFIG_AVFILTER
2312     free_buffer_pool(&is->buffer_pool);
2313 #endif
2314     switch (avctx->codec_type) {
2315     case AVMEDIA_TYPE_AUDIO:
2316         is->audio_st = NULL;
2317         is->audio_stream = -1;
2318         break;
2319     case AVMEDIA_TYPE_VIDEO:
2320         is->video_st = NULL;
2321         is->video_stream = -1;
2322         break;
2323     case AVMEDIA_TYPE_SUBTITLE:
2324         is->subtitle_st = NULL;
2325         is->subtitle_stream = -1;
2326         break;
2327     default:
2328         break;
2329     }
2330 }
2331
2332 static int decode_interrupt_cb(void *ctx)
2333 {
2334     VideoState *is = ctx;
2335     return is->abort_request;
2336 }
2337
2338 /* this thread gets the stream from the disk or the network */
2339 static int read_thread(void *arg)
2340 {
2341     VideoState *is = arg;
2342     AVFormatContext *ic = NULL;
2343     int err, i, ret;
2344     int st_index[AVMEDIA_TYPE_NB];
2345     AVPacket pkt1, *pkt = &pkt1;
2346     int eof = 0;
2347     int pkt_in_play_range = 0;
2348     AVDictionaryEntry *t;
2349     AVDictionary **opts;
2350     int orig_nb_streams;
2351
2352     memset(st_index, -1, sizeof(st_index));
2353     is->last_video_stream = is->video_stream = -1;
2354     is->last_audio_stream = is->audio_stream = -1;
2355     is->last_subtitle_stream = is->subtitle_stream = -1;
2356
2357     ic = avformat_alloc_context();
2358     ic->interrupt_callback.callback = decode_interrupt_cb;
2359     ic->interrupt_callback.opaque = is;
2360     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2361     if (err < 0) {
2362         print_error(is->filename, err);
2363         ret = -1;
2364         goto fail;
2365     }
2366     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2367         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2368         ret = AVERROR_OPTION_NOT_FOUND;
2369         goto fail;
2370     }
2371     is->ic = ic;
2372
2373     if (genpts)
2374         ic->flags |= AVFMT_FLAG_GENPTS;
2375
2376     opts = setup_find_stream_info_opts(ic, codec_opts);
2377     orig_nb_streams = ic->nb_streams;
2378
2379     err = avformat_find_stream_info(ic, opts);
2380     if (err < 0) {
2381         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2382         ret = -1;
2383         goto fail;
2384     }
2385     for (i = 0; i < orig_nb_streams; i++)
2386         av_dict_free(&opts[i]);
2387     av_freep(&opts);
2388
2389     if (ic->pb)
2390         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2391
2392     if (seek_by_bytes < 0)
2393         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2394
2395     /* if seeking requested, we execute it */
2396     if (start_time != AV_NOPTS_VALUE) {
2397         int64_t timestamp;
2398
2399         timestamp = start_time;
2400         /* add the stream start time */
2401         if (ic->start_time != AV_NOPTS_VALUE)
2402             timestamp += ic->start_time;
2403         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2404         if (ret < 0) {
2405             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2406                     is->filename, (double)timestamp / AV_TIME_BASE);
2407         }
2408     }
2409
2410     for (i = 0; i < ic->nb_streams; i++)
2411         ic->streams[i]->discard = AVDISCARD_ALL;
2412     if (!video_disable)
2413         st_index[AVMEDIA_TYPE_VIDEO] =
2414             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2415                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2416     if (!audio_disable)
2417         st_index[AVMEDIA_TYPE_AUDIO] =
2418             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2419                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2420                                 st_index[AVMEDIA_TYPE_VIDEO],
2421                                 NULL, 0);
2422     if (!video_disable)
2423         st_index[AVMEDIA_TYPE_SUBTITLE] =
2424             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2425                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2426                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2427                                  st_index[AVMEDIA_TYPE_AUDIO] :
2428                                  st_index[AVMEDIA_TYPE_VIDEO]),
2429                                 NULL, 0);
2430     if (show_status) {
2431         av_dump_format(ic, 0, is->filename, 0);
2432     }
2433
2434     is->show_mode = show_mode;
2435
2436     /* open the streams */
2437     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2438         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2439     }
2440
2441     ret = -1;
2442     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2443         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2444     }
2445     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2446     if (is->show_mode == SHOW_MODE_NONE)
2447         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2448
2449     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2450         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2451     }
2452
2453     if (is->video_stream < 0 && is->audio_stream < 0) {
2454         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2455         ret = -1;
2456         goto fail;
2457     }
2458
2459     for (;;) {
2460         if (is->abort_request)
2461             break;
2462         if (is->paused != is->last_paused) {
2463             is->last_paused = is->paused;
2464             if (is->paused)
2465                 is->read_pause_return = av_read_pause(ic);
2466             else
2467                 av_read_play(ic);
2468         }
2469 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2470         if (is->paused &&
2471                 (!strcmp(ic->iformat->name, "rtsp") ||
2472                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2473             /* wait 10 ms to avoid trying to get another packet */
2474             /* XXX: horrible */
2475             SDL_Delay(10);
2476             continue;
2477         }
2478 #endif
2479         if (is->seek_req) {
2480             int64_t seek_target = is->seek_pos;
2481             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2482             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2483 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2484 //      of the seek_pos/seek_rel variables
2485
2486             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2487             if (ret < 0) {
2488                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2489             } else {
2490                 if (is->audio_stream >= 0) {
2491                     packet_queue_flush(&is->audioq);
2492                     packet_queue_put(&is->audioq, &flush_pkt);
2493                 }
2494                 if (is->subtitle_stream >= 0) {
2495                     packet_queue_flush(&is->subtitleq);
2496                     packet_queue_put(&is->subtitleq, &flush_pkt);
2497                 }
2498                 if (is->video_stream >= 0) {
2499                     packet_queue_flush(&is->videoq);
2500                     packet_queue_put(&is->videoq, &flush_pkt);
2501                 }
2502             }
2503             is->seek_req = 0;
2504             eof = 0;
2505         }
2506
2507         /* if the queue are full, no need to read more */
2508         if (!infinite_buffer &&
2509               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2510             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2511                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2512                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2513             /* wait 10 ms */
2514             SDL_Delay(10);
2515             continue;
2516         }
2517         if (eof) {
2518             if (is->video_stream >= 0) {
2519                 av_init_packet(pkt);
2520                 pkt->data = NULL;
2521                 pkt->size = 0;
2522                 pkt->stream_index = is->video_stream;
2523                 packet_queue_put(&is->videoq, pkt);
2524             }
2525             if (is->audio_stream >= 0 &&
2526                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2527                 av_init_packet(pkt);
2528                 pkt->data = NULL;
2529                 pkt->size = 0;
2530                 pkt->stream_index = is->audio_stream;
2531                 packet_queue_put(&is->audioq, pkt);
2532             }
2533             SDL_Delay(10);
2534             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2535                 if (loop != 1 && (!loop || --loop)) {
2536                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2537                 } else if (autoexit) {
2538                     ret = AVERROR_EOF;
2539                     goto fail;
2540                 }
2541             }
2542             eof=0;
2543             continue;
2544         }
2545         ret = av_read_frame(ic, pkt);
2546         if (ret < 0) {
2547             if (ret == AVERROR_EOF || url_feof(ic->pb))
2548                 eof = 1;
2549             if (ic->pb && ic->pb->error)
2550                 break;
2551             SDL_Delay(100); /* wait for user event */
2552             continue;
2553         }
2554         /* check if packet is in play range specified by user, then queue, otherwise discard */
2555         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2556                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2557                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2558                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2559                 <= ((double)duration / 1000000);
2560         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2561             packet_queue_put(&is->audioq, pkt);
2562         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2563             packet_queue_put(&is->videoq, pkt);
2564         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2565             packet_queue_put(&is->subtitleq, pkt);
2566         } else {
2567             av_free_packet(pkt);
2568         }
2569     }
2570     /* wait until the end */
2571     while (!is->abort_request) {
2572         SDL_Delay(100);
2573     }
2574
2575     ret = 0;
2576  fail:
2577     /* close each stream */
2578     if (is->audio_stream >= 0)
2579         stream_component_close(is, is->audio_stream);
2580     if (is->video_stream >= 0)
2581         stream_component_close(is, is->video_stream);
2582     if (is->subtitle_stream >= 0)
2583         stream_component_close(is, is->subtitle_stream);
2584     if (is->ic) {
2585         avformat_close_input(&is->ic);
2586     }
2587
2588     if (ret != 0) {
2589         SDL_Event event;
2590
2591         event.type = FF_QUIT_EVENT;
2592         event.user.data1 = is;
2593         SDL_PushEvent(&event);
2594     }
2595     return 0;
2596 }
2597
2598 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2599 {
2600     VideoState *is;
2601
2602     is = av_mallocz(sizeof(VideoState));
2603     if (!is)
2604         return NULL;
2605     av_strlcpy(is->filename, filename, sizeof(is->filename));
2606     is->iformat = iformat;
2607     is->ytop    = 0;
2608     is->xleft   = 0;
2609
2610     /* start video display */
2611     is->pictq_mutex = SDL_CreateMutex();
2612     is->pictq_cond  = SDL_CreateCond();
2613
2614     is->subpq_mutex = SDL_CreateMutex();
2615     is->subpq_cond  = SDL_CreateCond();
2616
2617     packet_queue_init(&is->videoq);
2618     packet_queue_init(&is->audioq);
2619     packet_queue_init(&is->subtitleq);
2620
2621     is->av_sync_type = av_sync_type;
2622     is->read_tid     = SDL_CreateThread(read_thread, is);
2623     if (!is->read_tid) {
2624         av_free(is);
2625         return NULL;
2626     }
2627     return is;
2628 }
2629
2630 static void stream_cycle_channel(VideoState *is, int codec_type)
2631 {
2632     AVFormatContext *ic = is->ic;
2633     int start_index, stream_index;
2634     int old_index;
2635     AVStream *st;
2636
2637     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2638         start_index = is->last_video_stream;
2639         old_index = is->video_stream;
2640     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2641         start_index = is->last_audio_stream;
2642         old_index = is->audio_stream;
2643     } else {
2644         start_index = is->last_subtitle_stream;
2645         old_index = is->subtitle_stream;
2646     }
2647     stream_index = start_index;
2648     for (;;) {
2649         if (++stream_index >= is->ic->nb_streams)
2650         {
2651             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2652             {
2653                 stream_index = -1;
2654                 is->last_subtitle_stream = -1;
2655                 goto the_end;
2656             }
2657             if (start_index == -1)
2658                 return;
2659             stream_index = 0;
2660         }
2661         if (stream_index == start_index)
2662             return;
2663         st = ic->streams[stream_index];
2664         if (st->codec->codec_type == codec_type) {
2665             /* check that parameters are OK */
2666             switch (codec_type) {
2667             case AVMEDIA_TYPE_AUDIO:
2668                 if (st->codec->sample_rate != 0 &&
2669                     st->codec->channels != 0)
2670                     goto the_end;
2671                 break;
2672             case AVMEDIA_TYPE_VIDEO:
2673             case AVMEDIA_TYPE_SUBTITLE:
2674                 goto the_end;
2675             default:
2676                 break;
2677             }
2678         }
2679     }
2680  the_end:
2681     stream_component_close(is, old_index);
2682     stream_component_open(is, stream_index);
2683 }
2684
2685
2686 static void toggle_full_screen(VideoState *is)
2687 {
2688 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2689     /* OS X needs to reallocate the SDL overlays */
2690     int i;
2691     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2692         is->pictq[i].reallocate = 1;
2693 #endif
2694     is_full_screen = !is_full_screen;
2695     video_open(is, 1);
2696 }
2697
2698 static void toggle_pause(VideoState *is)
2699 {
2700     stream_toggle_pause(is);
2701     is->step = 0;
2702 }
2703
2704 static void step_to_next_frame(VideoState *is)
2705 {
2706     /* if the stream is paused unpause it, then step */
2707     if (is->paused)
2708         stream_toggle_pause(is);
2709     is->step = 1;
2710 }
2711
2712 static void toggle_audio_display(VideoState *is)
2713 {
2714     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2715     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2716     fill_rectangle(screen,
2717                 is->xleft, is->ytop, is->width, is->height,
2718                 bgcolor);
2719     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2720 }
2721
2722 /* handle an event sent by the GUI */
2723 static void event_loop(VideoState *cur_stream)
2724 {
2725     SDL_Event event;
2726     double incr, pos, frac;
2727
2728     for (;;) {
2729         double x;
2730         SDL_WaitEvent(&event);
2731         switch (event.type) {
2732         case SDL_KEYDOWN:
2733             if (exit_on_keydown) {
2734                 do_exit(cur_stream);
2735                 break;
2736             }
2737             switch (event.key.keysym.sym) {
2738             case SDLK_ESCAPE:
2739             case SDLK_q:
2740                 do_exit(cur_stream);
2741                 break;
2742             case SDLK_f:
2743                 toggle_full_screen(cur_stream);
2744                 cur_stream->force_refresh = 1;
2745                 break;
2746             case SDLK_p:
2747             case SDLK_SPACE:
2748                 toggle_pause(cur_stream);
2749                 break;
2750             case SDLK_s: // S: Step to next frame
2751                 step_to_next_frame(cur_stream);
2752                 break;
2753             case SDLK_a:
2754                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2755                 break;
2756             case SDLK_v:
2757                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2758                 break;
2759             case SDLK_t:
2760                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2761                 break;
2762             case SDLK_w:
2763                 toggle_audio_display(cur_stream);
2764                 cur_stream->force_refresh = 1;
2765                 break;
2766             case SDLK_PAGEUP:
2767                 incr = 600.0;
2768                 goto do_seek;
2769             case SDLK_PAGEDOWN:
2770                 incr = -600.0;
2771                 goto do_seek;
2772             case SDLK_LEFT:
2773                 incr = -10.0;
2774                 goto do_seek;
2775             case SDLK_RIGHT:
2776                 incr = 10.0;
2777                 goto do_seek;
2778             case SDLK_UP:
2779                 incr = 60.0;
2780                 goto do_seek;
2781             case SDLK_DOWN:
2782                 incr = -60.0;
2783             do_seek:
2784                     if (seek_by_bytes) {
2785                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2786                             pos = cur_stream->video_current_pos;
2787                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2788                             pos = cur_stream->audio_pkt.pos;
2789                         } else
2790                             pos = avio_tell(cur_stream->ic->pb);
2791                         if (cur_stream->ic->bit_rate)
2792                             incr *= cur_stream->ic->bit_rate / 8.0;
2793                         else
2794                             incr *= 180000.0;
2795                         pos += incr;
2796                         stream_seek(cur_stream, pos, incr, 1);
2797                     } else {
2798                         pos = get_master_clock(cur_stream);
2799                         pos += incr;
2800                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2801                     }
2802                 break;
2803             default:
2804                 break;
2805             }
2806             break;
2807         case SDL_VIDEOEXPOSE:
2808             cur_stream->force_refresh = 1;
2809             break;
2810         case SDL_MOUSEBUTTONDOWN:
2811             if (exit_on_mousedown) {
2812                 do_exit(cur_stream);
2813                 break;
2814             }
2815         case SDL_MOUSEMOTION:
2816             if (event.type == SDL_MOUSEBUTTONDOWN) {
2817                 x = event.button.x;
2818             } else {
2819                 if (event.motion.state != SDL_PRESSED)
2820                     break;
2821                 x = event.motion.x;
2822             }
2823                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2824                     uint64_t size =  avio_size(cur_stream->ic->pb);
2825                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2826                 } else {
2827                     int64_t ts;
2828                     int ns, hh, mm, ss;
2829                     int tns, thh, tmm, tss;
2830                     tns  = cur_stream->ic->duration / 1000000LL;
2831                     thh  = tns / 3600;
2832                     tmm  = (tns % 3600) / 60;
2833                     tss  = (tns % 60);
2834                     frac = x / cur_stream->width;
2835                     ns   = frac * tns;
2836                     hh   = ns / 3600;
2837                     mm   = (ns % 3600) / 60;
2838                     ss   = (ns % 60);
2839                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2840                             hh, mm, ss, thh, tmm, tss);
2841                     ts = frac * cur_stream->ic->duration;
2842                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2843                         ts += cur_stream->ic->start_time;
2844                     stream_seek(cur_stream, ts, 0, 0);
2845                 }
2846             break;
2847         case SDL_VIDEORESIZE:
2848                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2849                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2850                 screen_width  = cur_stream->width  = event.resize.w;
2851                 screen_height = cur_stream->height = event.resize.h;
2852                 cur_stream->force_refresh = 1;
2853             break;
2854         case SDL_QUIT:
2855         case FF_QUIT_EVENT:
2856             do_exit(cur_stream);
2857             break;
2858         case FF_ALLOC_EVENT:
2859             alloc_picture(event.user.data1);
2860             break;
2861         case FF_REFRESH_EVENT:
2862             video_refresh(event.user.data1);
2863             cur_stream->refresh = 0;
2864             break;
2865         default:
2866             break;
2867         }
2868     }
2869 }
2870
2871 static int opt_frame_size(const char *opt, const char *arg)
2872 {
2873     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2874     return opt_default("video_size", arg);
2875 }
2876
2877 static int opt_width(const char *opt, const char *arg)
2878 {
2879     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2880     return 0;
2881 }
2882
2883 static int opt_height(const char *opt, const char *arg)
2884 {
2885     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2886     return 0;
2887 }
2888
2889 static int opt_format(const char *opt, const char *arg)
2890 {
2891     file_iformat = av_find_input_format(arg);
2892     if (!file_iformat) {
2893         fprintf(stderr, "Unknown input format: %s\n", arg);
2894         return AVERROR(EINVAL);
2895     }
2896     return 0;
2897 }
2898
2899 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2900 {
2901     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2902     return opt_default("pixel_format", arg);
2903 }
2904
2905 static int opt_sync(const char *opt, const char *arg)
2906 {
2907     if (!strcmp(arg, "audio"))
2908         av_sync_type = AV_SYNC_AUDIO_MASTER;
2909     else if (!strcmp(arg, "video"))
2910         av_sync_type = AV_SYNC_VIDEO_MASTER;
2911     else if (!strcmp(arg, "ext"))
2912         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2913     else {
2914         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2915         exit(1);
2916     }
2917     return 0;
2918 }
2919
2920 static int opt_seek(const char *opt, const char *arg)
2921 {
2922     start_time = parse_time_or_die(opt, arg, 1);
2923     return 0;
2924 }
2925
2926 static int opt_duration(const char *opt, const char *arg)
2927 {
2928     duration = parse_time_or_die(opt, arg, 1);
2929     return 0;
2930 }
2931
2932 static int opt_show_mode(const char *opt, const char *arg)
2933 {
2934     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2935                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2936                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2937                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2938     return 0;
2939 }
2940
2941 static void opt_input_file(void *optctx, const char *filename)
2942 {
2943     if (input_filename) {
2944         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2945                 filename, input_filename);
2946         exit_program(1);
2947     }
2948     if (!strcmp(filename, "-"))
2949         filename = "pipe:";
2950     input_filename = filename;
2951 }
2952
2953 static int opt_codec(void *o, const char *opt, const char *arg)
2954 {
2955     switch(opt[strlen(opt)-1]){
2956     case 'a' :    audio_codec_name = arg; break;
2957     case 's' : subtitle_codec_name = arg; break;
2958     case 'v' :    video_codec_name = arg; break;
2959     }
2960     return 0;
2961 }
2962
2963 static int dummy;
2964
2965 static const OptionDef options[] = {
2966 #include "cmdutils_common_opts.h"
2967     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2968     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2969     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2970     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2971     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2972     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2973     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2974     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2975     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2976     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2977     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2978     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2979     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2980     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2981     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2982     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2983     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2984     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2985     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2986     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2987     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
2988     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
2989     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
2990     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
2991     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
2992     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
2993     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2994     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
2995     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
2996     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
2997     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
2998     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
2999     { "infbuf", OPT_BOOL | OPT_EXPERT, { (void*)&infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3000     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3001 #if CONFIG_AVFILTER
3002     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3003 #endif
3004     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3005     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3006     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3007     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3008     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3009     { NULL, },
3010 };
3011
3012 static void show_usage(void)
3013 {
3014     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3015     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3016     av_log(NULL, AV_LOG_INFO, "\n");
3017 }
3018
3019 static int opt_help(const char *opt, const char *arg)
3020 {
3021     av_log_set_callback(log_callback_help);
3022     show_usage();
3023     show_help_options(options, "Main options:\n",
3024                       OPT_EXPERT, 0);
3025     show_help_options(options, "\nAdvanced options:\n",
3026                       OPT_EXPERT, OPT_EXPERT);
3027     printf("\n");
3028     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3029     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3030 #if !CONFIG_AVFILTER
3031     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3032 #endif
3033     printf("\nWhile playing:\n"
3034            "q, ESC              quit\n"
3035            "f                   toggle full screen\n"
3036            "p, SPC              pause\n"
3037            "a                   cycle audio channel\n"
3038            "v                   cycle video channel\n"
3039            "t                   cycle subtitle channel\n"
3040            "w                   show audio waves\n"
3041            "s                   activate frame-step mode\n"
3042            "left/right          seek backward/forward 10 seconds\n"
3043            "down/up             seek backward/forward 1 minute\n"
3044            "page down/page up   seek backward/forward 10 minutes\n"
3045            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3046            );
3047     return 0;
3048 }
3049
3050 static int lockmgr(void **mtx, enum AVLockOp op)
3051 {
3052    switch(op) {
3053       case AV_LOCK_CREATE:
3054           *mtx = SDL_CreateMutex();
3055           if(!*mtx)
3056               return 1;
3057           return 0;
3058       case AV_LOCK_OBTAIN:
3059           return !!SDL_LockMutex(*mtx);
3060       case AV_LOCK_RELEASE:
3061           return !!SDL_UnlockMutex(*mtx);
3062       case AV_LOCK_DESTROY:
3063           SDL_DestroyMutex(*mtx);
3064           return 0;
3065    }
3066    return 1;
3067 }
3068
3069 /* Called from the main */
3070 int main(int argc, char **argv)
3071 {
3072     int flags;
3073     VideoState *is;
3074     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3075
3076     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3077     parse_loglevel(argc, argv, options);
3078
3079     /* register all codecs, demux and protocols */
3080     avcodec_register_all();
3081 #if CONFIG_AVDEVICE
3082     avdevice_register_all();
3083 #endif
3084 #if CONFIG_AVFILTER
3085     avfilter_register_all();
3086 #endif
3087     av_register_all();
3088     avformat_network_init();
3089
3090     init_opts();
3091
3092     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3093     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3094
3095     show_banner(argc, argv, options);
3096
3097     parse_options(NULL, argc, argv, options, opt_input_file);
3098
3099     if (!input_filename) {
3100         show_usage();
3101         fprintf(stderr, "An input file must be specified\n");
3102         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3103         exit(1);
3104     }
3105
3106     if (display_disable) {
3107         video_disable = 1;
3108     }
3109     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3110     if (audio_disable)
3111         flags &= ~SDL_INIT_AUDIO;
3112     if (display_disable)
3113         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3114 #if !defined(__MINGW32__) && !defined(__APPLE__)
3115     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3116 #endif
3117     if (SDL_Init (flags)) {
3118         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3119         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3120         exit(1);
3121     }
3122
3123     if (!display_disable) {
3124 #if HAVE_SDL_VIDEO_SIZE
3125         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3126         fs_screen_width = vi->current_w;
3127         fs_screen_height = vi->current_h;
3128 #endif
3129     }
3130
3131     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3132     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3133     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3134
3135     if (av_lockmgr_register(lockmgr)) {
3136         fprintf(stderr, "Could not initialize lock manager!\n");
3137         do_exit(NULL);
3138     }
3139
3140     av_init_packet(&flush_pkt);
3141     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3142
3143     is = stream_open(input_filename, file_iformat);
3144     if (!is) {
3145         fprintf(stderr, "Failed to initialize VideoState!\n");
3146         do_exit(NULL);
3147     }
3148
3149     event_loop(is);
3150
3151     /* never returns */
3152
3153     return 0;
3154 }