]> git.sesse.net Git - ffmpeg/blob - ffplay.c
indeo4: update AVCodecContext width/height on size change
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 4
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///< presentation time stamp for this picture
104     int64_t pos;                                 ///< byte position in file
105     int skip;
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     AVRational sample_aspect_ratio;
109     int allocated;
110     int reallocate;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 typedef struct AudioParams {
123     int freq;
124     int channels;
125     int channel_layout;
126     enum AVSampleFormat fmt;
127 } AudioParams;
128
129 enum {
130     AV_SYNC_AUDIO_MASTER, /* default choice */
131     AV_SYNC_VIDEO_MASTER,
132     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
133 };
134
135 typedef struct VideoState {
136     SDL_Thread *read_tid;
137     SDL_Thread *video_tid;
138     SDL_Thread *refresh_tid;
139     AVInputFormat *iformat;
140     int no_background;
141     int abort_request;
142     int force_refresh;
143     int paused;
144     int last_paused;
145     int que_attachments_req;
146     int seek_req;
147     int seek_flags;
148     int64_t seek_pos;
149     int64_t seek_rel;
150     int read_pause_return;
151     AVFormatContext *ic;
152
153     int audio_stream;
154
155     int av_sync_type;
156     double external_clock; /* external clock base */
157     int64_t external_clock_time;
158
159     double audio_clock;
160     double audio_diff_cum; /* used for AV difference average computation */
161     double audio_diff_avg_coef;
162     double audio_diff_threshold;
163     int audio_diff_avg_count;
164     AVStream *audio_st;
165     PacketQueue audioq;
166     int audio_hw_buf_size;
167     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
168     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
169     uint8_t *audio_buf;
170     uint8_t *audio_buf1;
171     unsigned int audio_buf_size; /* in bytes */
172     int audio_buf_index; /* in bytes */
173     int audio_write_buf_size;
174     AVPacket audio_pkt_temp;
175     AVPacket audio_pkt;
176     struct AudioParams audio_src;
177     struct AudioParams audio_tgt;
178     struct SwrContext *swr_ctx;
179     double audio_current_pts;
180     double audio_current_pts_drift;
181     int frame_drops_early;
182     int frame_drops_late;
183     AVFrame *frame;
184
185     enum ShowMode {
186         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
187     } show_mode;
188     int16_t sample_array[SAMPLE_ARRAY_SIZE];
189     int sample_array_index;
190     int last_i_start;
191     RDFTContext *rdft;
192     int rdft_bits;
193     FFTSample *rdft_data;
194     int xpos;
195
196     SDL_Thread *subtitle_tid;
197     int subtitle_stream;
198     int subtitle_stream_changed;
199     AVStream *subtitle_st;
200     PacketQueue subtitleq;
201     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
202     int subpq_size, subpq_rindex, subpq_windex;
203     SDL_mutex *subpq_mutex;
204     SDL_cond *subpq_cond;
205
206     double frame_timer;
207     double frame_last_pts;
208     double frame_last_duration;
209     double frame_last_dropped_pts;
210     double frame_last_returned_time;
211     double frame_last_filter_delay;
212     int64_t frame_last_dropped_pos;
213     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
214     int video_stream;
215     AVStream *video_st;
216     PacketQueue videoq;
217     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
218     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
219     int64_t video_current_pos;                   ///< current displayed file pos
220     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
221     int pictq_size, pictq_rindex, pictq_windex;
222     SDL_mutex *pictq_mutex;
223     SDL_cond *pictq_cond;
224 #if !CONFIG_AVFILTER
225     struct SwsContext *img_convert_ctx;
226 #endif
227
228     char filename[1024];
229     int width, height, xleft, ytop;
230     int step;
231
232 #if CONFIG_AVFILTER
233     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
234     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
235     int use_dr1;
236     FrameBuffer *buffer_pool;
237 #endif
238
239     int refresh;
240     int last_video_stream, last_audio_stream, last_subtitle_stream;
241
242     SDL_cond *continue_read_thread;
243 } VideoState;
244
245 /* options specified by the user */
246 static AVInputFormat *file_iformat;
247 static const char *input_filename;
248 static const char *window_title;
249 static int fs_screen_width;
250 static int fs_screen_height;
251 static int screen_width  = 0;
252 static int screen_height = 0;
253 static int audio_disable;
254 static int video_disable;
255 static int wanted_stream[AVMEDIA_TYPE_NB] = {
256     [AVMEDIA_TYPE_AUDIO]    = -1,
257     [AVMEDIA_TYPE_VIDEO]    = -1,
258     [AVMEDIA_TYPE_SUBTITLE] = -1,
259 };
260 static int seek_by_bytes = -1;
261 static int display_disable;
262 static int show_status = 1;
263 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
264 static int64_t start_time = AV_NOPTS_VALUE;
265 static int64_t duration = AV_NOPTS_VALUE;
266 static int workaround_bugs = 1;
267 static int fast = 0;
268 static int genpts = 0;
269 static int lowres = 0;
270 static int idct = FF_IDCT_AUTO;
271 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
272 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
273 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
274 static int error_concealment = 3;
275 static int decoder_reorder_pts = -1;
276 static int autoexit;
277 static int exit_on_keydown;
278 static int exit_on_mousedown;
279 static int loop = 1;
280 static int framedrop = -1;
281 static int infinite_buffer = 0;
282 static enum ShowMode show_mode = SHOW_MODE_NONE;
283 static const char *audio_codec_name;
284 static const char *subtitle_codec_name;
285 static const char *video_codec_name;
286 static int rdftspeed = 20;
287 #if CONFIG_AVFILTER
288 static char *vfilters = NULL;
289 #endif
290
291 /* current context */
292 static int is_full_screen;
293 static int64_t audio_callback_time;
294
295 static AVPacket flush_pkt;
296
297 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
298 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
299 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
300
301 static SDL_Surface *screen;
302
303 void av_noreturn exit_program(int ret)
304 {
305     exit(ret);
306 }
307
308 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
309 {
310     AVPacketList *pkt1;
311
312     if (q->abort_request)
313        return -1;
314
315     pkt1 = av_malloc(sizeof(AVPacketList));
316     if (!pkt1)
317         return -1;
318     pkt1->pkt = *pkt;
319     pkt1->next = NULL;
320
321     if (!q->last_pkt)
322         q->first_pkt = pkt1;
323     else
324         q->last_pkt->next = pkt1;
325     q->last_pkt = pkt1;
326     q->nb_packets++;
327     q->size += pkt1->pkt.size + sizeof(*pkt1);
328     /* XXX: should duplicate packet data in DV case */
329     SDL_CondSignal(q->cond);
330     return 0;
331 }
332
333 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
334 {
335     int ret;
336
337     /* duplicate the packet */
338     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
339         return -1;
340
341     SDL_LockMutex(q->mutex);
342     ret = packet_queue_put_private(q, pkt);
343     SDL_UnlockMutex(q->mutex);
344
345     if (pkt != &flush_pkt && ret < 0)
346         av_free_packet(pkt);
347
348     return ret;
349 }
350
351 /* packet queue handling */
352 static void packet_queue_init(PacketQueue *q)
353 {
354     memset(q, 0, sizeof(PacketQueue));
355     q->mutex = SDL_CreateMutex();
356     q->cond = SDL_CreateCond();
357     q->abort_request = 1;
358 }
359
360 static void packet_queue_flush(PacketQueue *q)
361 {
362     AVPacketList *pkt, *pkt1;
363
364     SDL_LockMutex(q->mutex);
365     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
366         pkt1 = pkt->next;
367         av_free_packet(&pkt->pkt);
368         av_freep(&pkt);
369     }
370     q->last_pkt = NULL;
371     q->first_pkt = NULL;
372     q->nb_packets = 0;
373     q->size = 0;
374     SDL_UnlockMutex(q->mutex);
375 }
376
377 static void packet_queue_destroy(PacketQueue *q)
378 {
379     packet_queue_flush(q);
380     SDL_DestroyMutex(q->mutex);
381     SDL_DestroyCond(q->cond);
382 }
383
384 static void packet_queue_abort(PacketQueue *q)
385 {
386     SDL_LockMutex(q->mutex);
387
388     q->abort_request = 1;
389
390     SDL_CondSignal(q->cond);
391
392     SDL_UnlockMutex(q->mutex);
393 }
394
395 static void packet_queue_start(PacketQueue *q)
396 {
397     SDL_LockMutex(q->mutex);
398     q->abort_request = 0;
399     packet_queue_put_private(q, &flush_pkt);
400     SDL_UnlockMutex(q->mutex);
401 }
402
403 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
404 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
405 {
406     AVPacketList *pkt1;
407     int ret;
408
409     SDL_LockMutex(q->mutex);
410
411     for (;;) {
412         if (q->abort_request) {
413             ret = -1;
414             break;
415         }
416
417         pkt1 = q->first_pkt;
418         if (pkt1) {
419             q->first_pkt = pkt1->next;
420             if (!q->first_pkt)
421                 q->last_pkt = NULL;
422             q->nb_packets--;
423             q->size -= pkt1->pkt.size + sizeof(*pkt1);
424             *pkt = pkt1->pkt;
425             av_free(pkt1);
426             ret = 1;
427             break;
428         } else if (!block) {
429             ret = 0;
430             break;
431         } else {
432             SDL_CondWait(q->cond, q->mutex);
433         }
434     }
435     SDL_UnlockMutex(q->mutex);
436     return ret;
437 }
438
439 static inline void fill_rectangle(SDL_Surface *screen,
440                                   int x, int y, int w, int h, int color)
441 {
442     SDL_Rect rect;
443     rect.x = x;
444     rect.y = y;
445     rect.w = w;
446     rect.h = h;
447     SDL_FillRect(screen, &rect, color);
448 }
449
450 #define ALPHA_BLEND(a, oldp, newp, s)\
451 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452
453 #define RGBA_IN(r, g, b, a, s)\
454 {\
455     unsigned int v = ((const uint32_t *)(s))[0];\
456     a = (v >> 24) & 0xff;\
457     r = (v >> 16) & 0xff;\
458     g = (v >> 8) & 0xff;\
459     b = v & 0xff;\
460 }
461
462 #define YUVA_IN(y, u, v, a, s, pal)\
463 {\
464     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465     a = (val >> 24) & 0xff;\
466     y = (val >> 16) & 0xff;\
467     u = (val >> 8) & 0xff;\
468     v = val & 0xff;\
469 }
470
471 #define YUVA_OUT(d, y, u, v, a)\
472 {\
473     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474 }
475
476
477 #define BPP 1
478
479 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480 {
481     int wrap, wrap3, width2, skip2;
482     int y, u, v, a, u1, v1, a1, w, h;
483     uint8_t *lum, *cb, *cr;
484     const uint8_t *p;
485     const uint32_t *pal;
486     int dstx, dsty, dstw, dsth;
487
488     dstw = av_clip(rect->w, 0, imgw);
489     dsth = av_clip(rect->h, 0, imgh);
490     dstx = av_clip(rect->x, 0, imgw - dstw);
491     dsty = av_clip(rect->y, 0, imgh - dsth);
492     lum = dst->data[0] + dsty * dst->linesize[0];
493     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495
496     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497     skip2 = dstx >> 1;
498     wrap = dst->linesize[0];
499     wrap3 = rect->pict.linesize[0];
500     p = rect->pict.data[0];
501     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502
503     if (dsty & 1) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513             cb++;
514             cr++;
515             lum++;
516             p += BPP;
517         }
518         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 = u;
521             v1 = v;
522             a1 = a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524
525             YUVA_IN(y, u, v, a, p + BPP, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += 2 * BPP;
535             lum += 2;
536         }
537         if (w) {
538             YUVA_IN(y, u, v, a, p, pal);
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542             p++;
543             lum++;
544         }
545         p += wrap3 - dstw * BPP;
546         lum += wrap - dstw - dstx;
547         cb += dst->linesize[1] - width2 - skip2;
548         cr += dst->linesize[2] - width2 - skip2;
549     }
550     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
551         lum += dstx;
552         cb += skip2;
553         cr += skip2;
554
555         if (dstx & 1) {
556             YUVA_IN(y, u, v, a, p, pal);
557             u1 = u;
558             v1 = v;
559             a1 = a;
560             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561             p += wrap3;
562             lum += wrap;
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570             cb++;
571             cr++;
572             p += -wrap3 + BPP;
573             lum += -wrap + 1;
574         }
575         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 = u;
578             v1 = v;
579             a1 = a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581
582             YUVA_IN(y, u, v, a, p + BPP, pal);
583             u1 += u;
584             v1 += v;
585             a1 += a;
586             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587             p += wrap3;
588             lum += wrap;
589
590             YUVA_IN(y, u, v, a, p, pal);
591             u1 += u;
592             v1 += v;
593             a1 += a;
594             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595
596             YUVA_IN(y, u, v, a, p + BPP, pal);
597             u1 += u;
598             v1 += v;
599             a1 += a;
600             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601
602             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604
605             cb++;
606             cr++;
607             p += -wrap3 + 2 * BPP;
608             lum += -wrap + 2;
609         }
610         if (w) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616             p += wrap3;
617             lum += wrap;
618             YUVA_IN(y, u, v, a, p, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625             cb++;
626             cr++;
627             p += -wrap3 + BPP;
628             lum += -wrap + 1;
629         }
630         p += wrap3 + (wrap3 - dstw * BPP);
631         lum += wrap + (wrap - dstw - dstx);
632         cb += dst->linesize[1] - width2 - skip2;
633         cr += dst->linesize[2] - width2 - skip2;
634     }
635     /* handle odd height */
636     if (h) {
637         lum += dstx;
638         cb += skip2;
639         cr += skip2;
640
641         if (dstx & 1) {
642             YUVA_IN(y, u, v, a, p, pal);
643             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646             cb++;
647             cr++;
648             lum++;
649             p += BPP;
650         }
651         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
652             YUVA_IN(y, u, v, a, p, pal);
653             u1 = u;
654             v1 = v;
655             a1 = a;
656             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657
658             YUVA_IN(y, u, v, a, p + BPP, pal);
659             u1 += u;
660             v1 += v;
661             a1 += a;
662             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665             cb++;
666             cr++;
667             p += 2 * BPP;
668             lum += 2;
669         }
670         if (w) {
671             YUVA_IN(y, u, v, a, p, pal);
672             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675         }
676     }
677 }
678
679 static void free_subpicture(SubPicture *sp)
680 {
681     avsubtitle_free(&sp->sub);
682 }
683
684 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
685 {
686     float aspect_ratio;
687     int width, height, x, y;
688
689     if (vp->sample_aspect_ratio.num == 0)
690         aspect_ratio = 0;
691     else
692         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
693
694     if (aspect_ratio <= 0.0)
695         aspect_ratio = 1.0;
696     aspect_ratio *= (float)vp->width / (float)vp->height;
697
698     /* XXX: we suppose the screen has a 1.0 pixel ratio */
699     height = scr_height;
700     width = ((int)rint(height * aspect_ratio)) & ~1;
701     if (width > scr_width) {
702         width = scr_width;
703         height = ((int)rint(width / aspect_ratio)) & ~1;
704     }
705     x = (scr_width - width) / 2;
706     y = (scr_height - height) / 2;
707     rect->x = scr_xleft + x;
708     rect->y = scr_ytop  + y;
709     rect->w = FFMAX(width,  1);
710     rect->h = FFMAX(height, 1);
711 }
712
713 static void video_image_display(VideoState *is)
714 {
715     VideoPicture *vp;
716     SubPicture *sp;
717     AVPicture pict;
718     SDL_Rect rect;
719     int i;
720
721     vp = &is->pictq[is->pictq_rindex];
722     if (vp->bmp) {
723         if (is->subtitle_st) {
724             if (is->subpq_size > 0) {
725                 sp = &is->subpq[is->subpq_rindex];
726
727                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
728                     SDL_LockYUVOverlay (vp->bmp);
729
730                     pict.data[0] = vp->bmp->pixels[0];
731                     pict.data[1] = vp->bmp->pixels[2];
732                     pict.data[2] = vp->bmp->pixels[1];
733
734                     pict.linesize[0] = vp->bmp->pitches[0];
735                     pict.linesize[1] = vp->bmp->pitches[2];
736                     pict.linesize[2] = vp->bmp->pitches[1];
737
738                     for (i = 0; i < sp->sub.num_rects; i++)
739                         blend_subrect(&pict, sp->sub.rects[i],
740                                       vp->bmp->w, vp->bmp->h);
741
742                     SDL_UnlockYUVOverlay (vp->bmp);
743                 }
744             }
745         }
746
747         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
748
749         SDL_DisplayYUVOverlay(vp->bmp, &rect);
750     }
751 }
752
753 static inline int compute_mod(int a, int b)
754 {
755     return a < 0 ? a%b + b : a%b;
756 }
757
758 static void video_audio_display(VideoState *s)
759 {
760     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
761     int ch, channels, h, h2, bgcolor, fgcolor;
762     int16_t time_diff;
763     int rdft_bits, nb_freq;
764
765     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
766         ;
767     nb_freq = 1 << (rdft_bits - 1);
768
769     /* compute display index : center on currently output samples */
770     channels = s->audio_tgt.channels;
771     nb_display_channels = channels;
772     if (!s->paused) {
773         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
774         n = 2 * channels;
775         delay = s->audio_write_buf_size;
776         delay /= n;
777
778         /* to be more precise, we take into account the time spent since
779            the last buffer computation */
780         if (audio_callback_time) {
781             time_diff = av_gettime() - audio_callback_time;
782             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
783         }
784
785         delay += 2 * data_used;
786         if (delay < data_used)
787             delay = data_used;
788
789         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
790         if (s->show_mode == SHOW_MODE_WAVES) {
791             h = INT_MIN;
792             for (i = 0; i < 1000; i += channels) {
793                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
794                 int a = s->sample_array[idx];
795                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
796                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
797                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
798                 int score = a - d;
799                 if (h < score && (b ^ c) < 0) {
800                     h = score;
801                     i_start = idx;
802                 }
803             }
804         }
805
806         s->last_i_start = i_start;
807     } else {
808         i_start = s->last_i_start;
809     }
810
811     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
812     if (s->show_mode == SHOW_MODE_WAVES) {
813         fill_rectangle(screen,
814                        s->xleft, s->ytop, s->width, s->height,
815                        bgcolor);
816
817         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
818
819         /* total height for one channel */
820         h = s->height / nb_display_channels;
821         /* graph height / 2 */
822         h2 = (h * 9) / 20;
823         for (ch = 0; ch < nb_display_channels; ch++) {
824             i = i_start + ch;
825             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
826             for (x = 0; x < s->width; x++) {
827                 y = (s->sample_array[i] * h2) >> 15;
828                 if (y < 0) {
829                     y = -y;
830                     ys = y1 - y;
831                 } else {
832                     ys = y1;
833                 }
834                 fill_rectangle(screen,
835                                s->xleft + x, ys, 1, y,
836                                fgcolor);
837                 i += channels;
838                 if (i >= SAMPLE_ARRAY_SIZE)
839                     i -= SAMPLE_ARRAY_SIZE;
840             }
841         }
842
843         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
844
845         for (ch = 1; ch < nb_display_channels; ch++) {
846             y = s->ytop + ch * h;
847             fill_rectangle(screen,
848                            s->xleft, y, s->width, 1,
849                            fgcolor);
850         }
851         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
852     } else {
853         nb_display_channels= FFMIN(nb_display_channels, 2);
854         if (rdft_bits != s->rdft_bits) {
855             av_rdft_end(s->rdft);
856             av_free(s->rdft_data);
857             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
858             s->rdft_bits = rdft_bits;
859             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
860         }
861         {
862             FFTSample *data[2];
863             for (ch = 0; ch < nb_display_channels; ch++) {
864                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
865                 i = i_start + ch;
866                 for (x = 0; x < 2 * nb_freq; x++) {
867                     double w = (x-nb_freq) * (1.0 / nb_freq);
868                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
869                     i += channels;
870                     if (i >= SAMPLE_ARRAY_SIZE)
871                         i -= SAMPLE_ARRAY_SIZE;
872                 }
873                 av_rdft_calc(s->rdft, data[ch]);
874             }
875             // least efficient way to do this, we should of course directly access it but its more than fast enough
876             for (y = 0; y < s->height; y++) {
877                 double w = 1 / sqrt(nb_freq);
878                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
879                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
880                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
881                 a = FFMIN(a, 255);
882                 b = FFMIN(b, 255);
883                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
884
885                 fill_rectangle(screen,
886                             s->xpos, s->height-y, 1, 1,
887                             fgcolor);
888             }
889         }
890         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
891         if (!s->paused)
892             s->xpos++;
893         if (s->xpos >= s->width)
894             s->xpos= s->xleft;
895     }
896 }
897
898 static void stream_close(VideoState *is)
899 {
900     VideoPicture *vp;
901     int i;
902     /* XXX: use a special url_shutdown call to abort parse cleanly */
903     is->abort_request = 1;
904     SDL_WaitThread(is->read_tid, NULL);
905     SDL_WaitThread(is->refresh_tid, NULL);
906     packet_queue_destroy(&is->videoq);
907     packet_queue_destroy(&is->audioq);
908     packet_queue_destroy(&is->subtitleq);
909
910     /* free all pictures */
911     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
912         vp = &is->pictq[i];
913 #if CONFIG_AVFILTER
914         avfilter_unref_bufferp(&vp->picref);
915 #endif
916         if (vp->bmp) {
917             SDL_FreeYUVOverlay(vp->bmp);
918             vp->bmp = NULL;
919         }
920     }
921     SDL_DestroyMutex(is->pictq_mutex);
922     SDL_DestroyCond(is->pictq_cond);
923     SDL_DestroyMutex(is->subpq_mutex);
924     SDL_DestroyCond(is->subpq_cond);
925     SDL_DestroyCond(is->continue_read_thread);
926 #if !CONFIG_AVFILTER
927     if (is->img_convert_ctx)
928         sws_freeContext(is->img_convert_ctx);
929 #endif
930     av_free(is);
931 }
932
933 static void do_exit(VideoState *is)
934 {
935     if (is) {
936         stream_close(is);
937     }
938     av_lockmgr_register(NULL);
939     uninit_opts();
940 #if CONFIG_AVFILTER
941     avfilter_uninit();
942     av_freep(&vfilters);
943 #endif
944     avformat_network_deinit();
945     if (show_status)
946         printf("\n");
947     SDL_Quit();
948     av_log(NULL, AV_LOG_QUIET, "%s", "");
949     exit(0);
950 }
951
952 static void sigterm_handler(int sig)
953 {
954     exit(123);
955 }
956
957 static int video_open(VideoState *is, int force_set_video_mode)
958 {
959     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
960     int w,h;
961     VideoPicture *vp = &is->pictq[is->pictq_rindex];
962     SDL_Rect rect;
963
964     if (is_full_screen) flags |= SDL_FULLSCREEN;
965     else                flags |= SDL_RESIZABLE;
966
967     if (is_full_screen && fs_screen_width) {
968         w = fs_screen_width;
969         h = fs_screen_height;
970     } else if (!is_full_screen && screen_width) {
971         w = screen_width;
972         h = screen_height;
973     } else if (vp->width) {
974         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
975         w = rect.w;
976         h = rect.h;
977     } else {
978         w = 640;
979         h = 480;
980     }
981     if (screen && is->width == screen->w && screen->w == w
982        && is->height== screen->h && screen->h == h && !force_set_video_mode)
983         return 0;
984     screen = SDL_SetVideoMode(w, h, 0, flags);
985     if (!screen) {
986         fprintf(stderr, "SDL: could not set video mode - exiting\n");
987         do_exit(is);
988     }
989     if (!window_title)
990         window_title = input_filename;
991     SDL_WM_SetCaption(window_title, window_title);
992
993     is->width  = screen->w;
994     is->height = screen->h;
995
996     return 0;
997 }
998
999 /* display the current picture, if any */
1000 static void video_display(VideoState *is)
1001 {
1002     if (!screen)
1003         video_open(is, 0);
1004     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1005         video_audio_display(is);
1006     else if (is->video_st)
1007         video_image_display(is);
1008 }
1009
1010 static int refresh_thread(void *opaque)
1011 {
1012     VideoState *is= opaque;
1013     while (!is->abort_request) {
1014         SDL_Event event;
1015         event.type = FF_REFRESH_EVENT;
1016         event.user.data1 = opaque;
1017         if (!is->refresh && (!is->paused || is->force_refresh)) {
1018             is->refresh = 1;
1019             SDL_PushEvent(&event);
1020         }
1021         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1022         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1023     }
1024     return 0;
1025 }
1026
1027 /* get the current audio clock value */
1028 static double get_audio_clock(VideoState *is)
1029 {
1030     if (is->paused) {
1031         return is->audio_current_pts;
1032     } else {
1033         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1034     }
1035 }
1036
1037 /* get the current video clock value */
1038 static double get_video_clock(VideoState *is)
1039 {
1040     if (is->paused) {
1041         return is->video_current_pts;
1042     } else {
1043         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1044     }
1045 }
1046
1047 /* get the current external clock value */
1048 static double get_external_clock(VideoState *is)
1049 {
1050     int64_t ti;
1051     ti = av_gettime();
1052     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1053 }
1054
1055 /* get the current master clock value */
1056 static double get_master_clock(VideoState *is)
1057 {
1058     double val;
1059
1060     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1061         if (is->video_st)
1062             val = get_video_clock(is);
1063         else
1064             val = get_audio_clock(is);
1065     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1066         if (is->audio_st)
1067             val = get_audio_clock(is);
1068         else
1069             val = get_video_clock(is);
1070     } else {
1071         val = get_external_clock(is);
1072     }
1073     return val;
1074 }
1075
1076 /* seek in the stream */
1077 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1078 {
1079     if (!is->seek_req) {
1080         is->seek_pos = pos;
1081         is->seek_rel = rel;
1082         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1083         if (seek_by_bytes)
1084             is->seek_flags |= AVSEEK_FLAG_BYTE;
1085         is->seek_req = 1;
1086     }
1087 }
1088
1089 /* pause or resume the video */
1090 static void stream_toggle_pause(VideoState *is)
1091 {
1092     if (is->paused) {
1093         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1094         if (is->read_pause_return != AVERROR(ENOSYS)) {
1095             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1096         }
1097         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1098     }
1099     is->paused = !is->paused;
1100 }
1101
1102 static double compute_target_delay(double delay, VideoState *is)
1103 {
1104     double sync_threshold, diff;
1105
1106     /* update delay to follow master synchronisation source */
1107     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1108          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1109         /* if video is slave, we try to correct big delays by
1110            duplicating or deleting a frame */
1111         diff = get_video_clock(is) - get_master_clock(is);
1112
1113         /* skip or repeat frame. We take into account the
1114            delay to compute the threshold. I still don't know
1115            if it is the best guess */
1116         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1117         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1118             if (diff <= -sync_threshold)
1119                 delay = 0;
1120             else if (diff >= sync_threshold)
1121                 delay = 2 * delay;
1122         }
1123     }
1124
1125     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1126             delay, -diff);
1127
1128     return delay;
1129 }
1130
1131 static void pictq_next_picture(VideoState *is) {
1132     /* update queue size and signal for next picture */
1133     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1134         is->pictq_rindex = 0;
1135
1136     SDL_LockMutex(is->pictq_mutex);
1137     is->pictq_size--;
1138     SDL_CondSignal(is->pictq_cond);
1139     SDL_UnlockMutex(is->pictq_mutex);
1140 }
1141
1142 static void pictq_prev_picture(VideoState *is) {
1143     VideoPicture *prevvp;
1144     /* update queue size and signal for the previous picture */
1145     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1146     if (prevvp->allocated && !prevvp->skip) {
1147         SDL_LockMutex(is->pictq_mutex);
1148         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1149             if (--is->pictq_rindex == -1)
1150                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1151             is->pictq_size++;
1152         }
1153         SDL_CondSignal(is->pictq_cond);
1154         SDL_UnlockMutex(is->pictq_mutex);
1155     }
1156 }
1157
1158 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1159     double time = av_gettime() / 1000000.0;
1160     /* update current video pts */
1161     is->video_current_pts = pts;
1162     is->video_current_pts_drift = is->video_current_pts - time;
1163     is->video_current_pos = pos;
1164     is->frame_last_pts = pts;
1165 }
1166
1167 /* called to display each frame */
1168 static void video_refresh(void *opaque)
1169 {
1170     VideoState *is = opaque;
1171     VideoPicture *vp;
1172     double time;
1173
1174     SubPicture *sp, *sp2;
1175
1176     if (is->video_st) {
1177         if (is->force_refresh)
1178             pictq_prev_picture(is);
1179 retry:
1180         if (is->pictq_size == 0) {
1181             SDL_LockMutex(is->pictq_mutex);
1182             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1183                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1184                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1185             }
1186             SDL_UnlockMutex(is->pictq_mutex);
1187             // nothing to do, no picture to display in the que
1188         } else {
1189             double last_duration, duration, delay;
1190             /* dequeue the picture */
1191             vp = &is->pictq[is->pictq_rindex];
1192
1193             if (vp->skip) {
1194                 pictq_next_picture(is);
1195                 goto retry;
1196             }
1197
1198             if (is->paused)
1199                 goto display;
1200
1201             /* compute nominal last_duration */
1202             last_duration = vp->pts - is->frame_last_pts;
1203             if (last_duration > 0 && last_duration < 10.0) {
1204                 /* if duration of the last frame was sane, update last_duration in video state */
1205                 is->frame_last_duration = last_duration;
1206             }
1207             delay = compute_target_delay(is->frame_last_duration, is);
1208
1209             time= av_gettime()/1000000.0;
1210             if (time < is->frame_timer + delay)
1211                 return;
1212
1213             if (delay > 0)
1214                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1215
1216             SDL_LockMutex(is->pictq_mutex);
1217             update_video_pts(is, vp->pts, vp->pos);
1218             SDL_UnlockMutex(is->pictq_mutex);
1219
1220             if (is->pictq_size > 1) {
1221                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1222                 duration = nextvp->pts - vp->pts;
1223                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1224                     is->frame_drops_late++;
1225                     pictq_next_picture(is);
1226                     goto retry;
1227                 }
1228             }
1229
1230             if (is->subtitle_st) {
1231                 if (is->subtitle_stream_changed) {
1232                     SDL_LockMutex(is->subpq_mutex);
1233
1234                     while (is->subpq_size) {
1235                         free_subpicture(&is->subpq[is->subpq_rindex]);
1236
1237                         /* update queue size and signal for next picture */
1238                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1239                             is->subpq_rindex = 0;
1240
1241                         is->subpq_size--;
1242                     }
1243                     is->subtitle_stream_changed = 0;
1244
1245                     SDL_CondSignal(is->subpq_cond);
1246                     SDL_UnlockMutex(is->subpq_mutex);
1247                 } else {
1248                     if (is->subpq_size > 0) {
1249                         sp = &is->subpq[is->subpq_rindex];
1250
1251                         if (is->subpq_size > 1)
1252                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1253                         else
1254                             sp2 = NULL;
1255
1256                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1257                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1258                         {
1259                             free_subpicture(sp);
1260
1261                             /* update queue size and signal for next picture */
1262                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1263                                 is->subpq_rindex = 0;
1264
1265                             SDL_LockMutex(is->subpq_mutex);
1266                             is->subpq_size--;
1267                             SDL_CondSignal(is->subpq_cond);
1268                             SDL_UnlockMutex(is->subpq_mutex);
1269                         }
1270                     }
1271                 }
1272             }
1273
1274 display:
1275             /* display picture */
1276             if (!display_disable)
1277                 video_display(is);
1278
1279             pictq_next_picture(is);
1280         }
1281     } else if (is->audio_st) {
1282         /* draw the next audio frame */
1283
1284         /* if only audio stream, then display the audio bars (better
1285            than nothing, just to test the implementation */
1286
1287         /* display picture */
1288         if (!display_disable)
1289             video_display(is);
1290     }
1291     is->force_refresh = 0;
1292     if (show_status) {
1293         static int64_t last_time;
1294         int64_t cur_time;
1295         int aqsize, vqsize, sqsize;
1296         double av_diff;
1297
1298         cur_time = av_gettime();
1299         if (!last_time || (cur_time - last_time) >= 30000) {
1300             aqsize = 0;
1301             vqsize = 0;
1302             sqsize = 0;
1303             if (is->audio_st)
1304                 aqsize = is->audioq.size;
1305             if (is->video_st)
1306                 vqsize = is->videoq.size;
1307             if (is->subtitle_st)
1308                 sqsize = is->subtitleq.size;
1309             av_diff = 0;
1310             if (is->audio_st && is->video_st)
1311                 av_diff = get_audio_clock(is) - get_video_clock(is);
1312             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1313                    get_master_clock(is),
1314                    av_diff,
1315                    is->frame_drops_early + is->frame_drops_late,
1316                    aqsize / 1024,
1317                    vqsize / 1024,
1318                    sqsize,
1319                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1320                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1321             fflush(stdout);
1322             last_time = cur_time;
1323         }
1324     }
1325 }
1326
1327 /* allocate a picture (needs to do that in main thread to avoid
1328    potential locking problems */
1329 static void alloc_picture(VideoState *is)
1330 {
1331     VideoPicture *vp;
1332
1333     vp = &is->pictq[is->pictq_windex];
1334
1335     if (vp->bmp)
1336         SDL_FreeYUVOverlay(vp->bmp);
1337
1338 #if CONFIG_AVFILTER
1339     avfilter_unref_bufferp(&vp->picref);
1340 #endif
1341
1342     video_open(is, 0);
1343
1344     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1345                                    SDL_YV12_OVERLAY,
1346                                    screen);
1347     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1348         /* SDL allocates a buffer smaller than requested if the video
1349          * overlay hardware is unable to support the requested size. */
1350         fprintf(stderr, "Error: the video system does not support an image\n"
1351                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1352                         "to reduce the image size.\n", vp->width, vp->height );
1353         do_exit(is);
1354     }
1355
1356     SDL_LockMutex(is->pictq_mutex);
1357     vp->allocated = 1;
1358     SDL_CondSignal(is->pictq_cond);
1359     SDL_UnlockMutex(is->pictq_mutex);
1360 }
1361
1362 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1363 {
1364     VideoPicture *vp;
1365     double frame_delay, pts = pts1;
1366
1367     /* compute the exact PTS for the picture if it is omitted in the stream
1368      * pts1 is the dts of the pkt / pts of the frame */
1369     if (pts != 0) {
1370         /* update video clock with pts, if present */
1371         is->video_clock = pts;
1372     } else {
1373         pts = is->video_clock;
1374     }
1375     /* update video clock for next frame */
1376     frame_delay = av_q2d(is->video_st->codec->time_base);
1377     /* for MPEG2, the frame can be repeated, so we update the
1378        clock accordingly */
1379     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1380     is->video_clock += frame_delay;
1381
1382 #if defined(DEBUG_SYNC) && 0
1383     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1384            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1385 #endif
1386
1387     /* wait until we have space to put a new picture */
1388     SDL_LockMutex(is->pictq_mutex);
1389
1390     /* keep the last already displayed picture in the queue */
1391     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1392            !is->videoq.abort_request) {
1393         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1394     }
1395     SDL_UnlockMutex(is->pictq_mutex);
1396
1397     if (is->videoq.abort_request)
1398         return -1;
1399
1400     vp = &is->pictq[is->pictq_windex];
1401
1402 #if CONFIG_AVFILTER
1403     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1404 #else
1405     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1406 #endif
1407
1408     /* alloc or resize hardware picture buffer */
1409     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1410         vp->width  != src_frame->width ||
1411         vp->height != src_frame->height) {
1412         SDL_Event event;
1413
1414         vp->allocated  = 0;
1415         vp->reallocate = 0;
1416         vp->width = src_frame->width;
1417         vp->height = src_frame->height;
1418
1419         /* the allocation must be done in the main thread to avoid
1420            locking problems. */
1421         event.type = FF_ALLOC_EVENT;
1422         event.user.data1 = is;
1423         SDL_PushEvent(&event);
1424
1425         /* wait until the picture is allocated */
1426         SDL_LockMutex(is->pictq_mutex);
1427         while (!vp->allocated && !is->videoq.abort_request) {
1428             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1429         }
1430         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1431         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1432             while (!vp->allocated) {
1433                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1434             }
1435         }
1436         SDL_UnlockMutex(is->pictq_mutex);
1437
1438         if (is->videoq.abort_request)
1439             return -1;
1440     }
1441
1442     /* if the frame is not skipped, then display it */
1443     if (vp->bmp) {
1444         AVPicture pict = { { 0 } };
1445 #if CONFIG_AVFILTER
1446         avfilter_unref_bufferp(&vp->picref);
1447         vp->picref = src_frame->opaque;
1448 #endif
1449
1450         /* get a pointer on the bitmap */
1451         SDL_LockYUVOverlay (vp->bmp);
1452
1453         pict.data[0] = vp->bmp->pixels[0];
1454         pict.data[1] = vp->bmp->pixels[2];
1455         pict.data[2] = vp->bmp->pixels[1];
1456
1457         pict.linesize[0] = vp->bmp->pitches[0];
1458         pict.linesize[1] = vp->bmp->pitches[2];
1459         pict.linesize[2] = vp->bmp->pitches[1];
1460
1461 #if CONFIG_AVFILTER
1462         // FIXME use direct rendering
1463         av_picture_copy(&pict, (AVPicture *)src_frame,
1464                         src_frame->format, vp->width, vp->height);
1465 #else
1466         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1467         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1468             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1469             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1470         if (is->img_convert_ctx == NULL) {
1471             fprintf(stderr, "Cannot initialize the conversion context\n");
1472             exit(1);
1473         }
1474         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1475                   0, vp->height, pict.data, pict.linesize);
1476 #endif
1477         /* update the bitmap content */
1478         SDL_UnlockYUVOverlay(vp->bmp);
1479
1480         vp->pts = pts;
1481         vp->pos = pos;
1482         vp->skip = 0;
1483
1484         /* now we can update the picture count */
1485         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1486             is->pictq_windex = 0;
1487         SDL_LockMutex(is->pictq_mutex);
1488         is->pictq_size++;
1489         SDL_UnlockMutex(is->pictq_mutex);
1490     }
1491     return 0;
1492 }
1493
1494 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1495 {
1496     int got_picture, i;
1497
1498     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1499         return -1;
1500
1501     if (pkt->data == flush_pkt.data) {
1502         avcodec_flush_buffers(is->video_st->codec);
1503
1504         SDL_LockMutex(is->pictq_mutex);
1505         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1506         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1507             is->pictq[i].skip = 1;
1508         }
1509         while (is->pictq_size && !is->videoq.abort_request) {
1510             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1511         }
1512         is->video_current_pos = -1;
1513         is->frame_last_pts = AV_NOPTS_VALUE;
1514         is->frame_last_duration = 0;
1515         is->frame_timer = (double)av_gettime() / 1000000.0;
1516         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1517         SDL_UnlockMutex(is->pictq_mutex);
1518
1519         return 0;
1520     }
1521
1522     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1523         return 0;
1524
1525     if (got_picture) {
1526         int ret = 1;
1527
1528         if (decoder_reorder_pts == -1) {
1529             *pts = av_frame_get_best_effort_timestamp(frame);
1530         } else if (decoder_reorder_pts) {
1531             *pts = frame->pkt_pts;
1532         } else {
1533             *pts = frame->pkt_dts;
1534         }
1535
1536         if (*pts == AV_NOPTS_VALUE) {
1537             *pts = 0;
1538         }
1539
1540         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1541              (framedrop>0 || (framedrop && is->audio_st))) {
1542             SDL_LockMutex(is->pictq_mutex);
1543             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1544                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1545                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1546                 double ptsdiff = dpts - is->frame_last_pts;
1547                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1548                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1549                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1550                     is->frame_last_dropped_pos = pkt->pos;
1551                     is->frame_last_dropped_pts = dpts;
1552                     is->frame_drops_early++;
1553                     ret = 0;
1554                 }
1555             }
1556             SDL_UnlockMutex(is->pictq_mutex);
1557         }
1558
1559         return ret;
1560     }
1561     return 0;
1562 }
1563
1564 #if CONFIG_AVFILTER
1565 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1566                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1567 {
1568     int ret;
1569     AVFilterInOut *outputs = NULL, *inputs = NULL;
1570
1571     if (filtergraph) {
1572         outputs = avfilter_inout_alloc();
1573         inputs  = avfilter_inout_alloc();
1574         if (!outputs || !inputs) {
1575             ret = AVERROR(ENOMEM);
1576             goto fail;
1577         }
1578
1579         outputs->name       = av_strdup("in");
1580         outputs->filter_ctx = source_ctx;
1581         outputs->pad_idx    = 0;
1582         outputs->next       = NULL;
1583
1584         inputs->name        = av_strdup("out");
1585         inputs->filter_ctx  = sink_ctx;
1586         inputs->pad_idx     = 0;
1587         inputs->next        = NULL;
1588
1589         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1590             goto fail;
1591     } else {
1592         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1593             goto fail;
1594     }
1595
1596     return avfilter_graph_config(graph, NULL);
1597 fail:
1598     avfilter_inout_free(&outputs);
1599     avfilter_inout_free(&inputs);
1600     return ret;
1601 }
1602
1603 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1604 {
1605     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1606     char sws_flags_str[128];
1607     char buffersrc_args[256];
1608     int ret;
1609     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1610     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format, *filt_crop;
1611     AVCodecContext *codec = is->video_st->codec;
1612
1613     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1614     graph->scale_sws_opts = av_strdup(sws_flags_str);
1615
1616     snprintf(buffersrc_args, sizeof(buffersrc_args),
1617              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1618              codec->width, codec->height, codec->pix_fmt,
1619              is->video_st->time_base.num, is->video_st->time_base.den,
1620              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1621
1622     if ((ret = avfilter_graph_create_filter(&filt_src,
1623                                             avfilter_get_by_name("buffer"),
1624                                             "ffplay_buffer", buffersrc_args, NULL,
1625                                             graph)) < 0)
1626         return ret;
1627
1628     buffersink_params->pixel_fmts = pix_fmts;
1629     ret = avfilter_graph_create_filter(&filt_out,
1630                                        avfilter_get_by_name("ffbuffersink"),
1631                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1632     av_freep(&buffersink_params);
1633     if (ret < 0)
1634         return ret;
1635
1636     /* SDL YUV code is not handling odd width/height for some driver
1637      * combinations, therefore we crop the picture to an even width/height. */
1638     if ((ret = avfilter_graph_create_filter(&filt_crop,
1639                                             avfilter_get_by_name("crop"),
1640                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1641         return ret;
1642     if ((ret = avfilter_graph_create_filter(&filt_format,
1643                                             avfilter_get_by_name("format"),
1644                                             "format", "yuv420p", NULL, graph)) < 0)
1645         return ret;
1646     if ((ret = avfilter_link(filt_crop, 0, filt_format, 0)) < 0)
1647         return ret;
1648     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1649         return ret;
1650
1651     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1652         return ret;
1653
1654     is->in_video_filter  = filt_src;
1655     is->out_video_filter = filt_out;
1656
1657     return ret;
1658 }
1659
1660 #endif  /* CONFIG_AVFILTER */
1661
1662 static int video_thread(void *arg)
1663 {
1664     AVPacket pkt = { 0 };
1665     VideoState *is = arg;
1666     AVFrame *frame = avcodec_alloc_frame();
1667     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1668     double pts;
1669     int ret;
1670
1671 #if CONFIG_AVFILTER
1672     AVCodecContext *codec = is->video_st->codec;
1673     AVFilterGraph *graph = avfilter_graph_alloc();
1674     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1675     int last_w = 0;
1676     int last_h = 0;
1677     enum PixelFormat last_format = -2;
1678
1679     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1680         is->use_dr1 = 1;
1681         codec->get_buffer     = codec_get_buffer;
1682         codec->release_buffer = codec_release_buffer;
1683         codec->opaque         = &is->buffer_pool;
1684     }
1685 #endif
1686
1687     for (;;) {
1688 #if CONFIG_AVFILTER
1689         AVFilterBufferRef *picref;
1690         AVRational tb;
1691 #endif
1692         while (is->paused && !is->videoq.abort_request)
1693             SDL_Delay(10);
1694
1695         avcodec_get_frame_defaults(frame);
1696         av_free_packet(&pkt);
1697
1698         ret = get_video_frame(is, frame, &pts_int, &pkt);
1699         if (ret < 0)
1700             goto the_end;
1701
1702         if (!ret)
1703             continue;
1704
1705 #if CONFIG_AVFILTER
1706         if (   last_w != is->video_st->codec->width
1707             || last_h != is->video_st->codec->height
1708             || last_format != is->video_st->codec->pix_fmt) {
1709             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1710                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1711             avfilter_graph_free(&graph);
1712             graph = avfilter_graph_alloc();
1713             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1714                 SDL_Event event;
1715                 event.type = FF_QUIT_EVENT;
1716                 event.user.data1 = is;
1717                 SDL_PushEvent(&event);
1718                 av_free_packet(&pkt);
1719                 goto the_end;
1720             }
1721             filt_in  = is->in_video_filter;
1722             filt_out = is->out_video_filter;
1723             last_w = is->video_st->codec->width;
1724             last_h = is->video_st->codec->height;
1725             last_format = is->video_st->codec->pix_fmt;
1726         }
1727
1728         frame->pts = pts_int;
1729         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1730         if (is->use_dr1 && frame->opaque) {
1731             FrameBuffer      *buf = frame->opaque;
1732             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1733                                         frame->data, frame->linesize,
1734                                         AV_PERM_READ | AV_PERM_PRESERVE,
1735                                         frame->width, frame->height,
1736                                         frame->format);
1737
1738             avfilter_copy_frame_props(fb, frame);
1739             fb->buf->priv           = buf;
1740             fb->buf->free           = filter_release_buffer;
1741
1742             buf->refcount++;
1743             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1744
1745         } else
1746             av_buffersrc_write_frame(filt_in, frame);
1747
1748         av_free_packet(&pkt);
1749
1750         while (ret >= 0) {
1751             is->frame_last_returned_time = av_gettime() / 1000000.0;
1752
1753             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1754             if (ret < 0) {
1755                 ret = 0;
1756                 break;
1757             }
1758
1759             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1760             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1761                 is->frame_last_filter_delay = 0;
1762
1763             avfilter_copy_buf_props(frame, picref);
1764
1765             pts_int = picref->pts;
1766             tb      = filt_out->inputs[0]->time_base;
1767             pos     = picref->pos;
1768             frame->opaque = picref;
1769
1770             if (av_cmp_q(tb, is->video_st->time_base)) {
1771                 av_unused int64_t pts1 = pts_int;
1772                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1773                 av_dlog(NULL, "video_thread(): "
1774                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1775                         tb.num, tb.den, pts1,
1776                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1777             }
1778             pts = pts_int * av_q2d(is->video_st->time_base);
1779             ret = queue_picture(is, frame, pts, pos);
1780         }
1781 #else
1782         pts = pts_int * av_q2d(is->video_st->time_base);
1783         ret = queue_picture(is, frame, pts, pkt.pos);
1784 #endif
1785
1786         if (ret < 0)
1787             goto the_end;
1788
1789         if (is->step)
1790             stream_toggle_pause(is);
1791     }
1792  the_end:
1793     avcodec_flush_buffers(is->video_st->codec);
1794 #if CONFIG_AVFILTER
1795     avfilter_graph_free(&graph);
1796 #endif
1797     av_free_packet(&pkt);
1798     avcodec_free_frame(&frame);
1799     return 0;
1800 }
1801
1802 static int subtitle_thread(void *arg)
1803 {
1804     VideoState *is = arg;
1805     SubPicture *sp;
1806     AVPacket pkt1, *pkt = &pkt1;
1807     int got_subtitle;
1808     double pts;
1809     int i, j;
1810     int r, g, b, y, u, v, a;
1811
1812     for (;;) {
1813         while (is->paused && !is->subtitleq.abort_request) {
1814             SDL_Delay(10);
1815         }
1816         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1817             break;
1818
1819         if (pkt->data == flush_pkt.data) {
1820             avcodec_flush_buffers(is->subtitle_st->codec);
1821             continue;
1822         }
1823         SDL_LockMutex(is->subpq_mutex);
1824         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1825                !is->subtitleq.abort_request) {
1826             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1827         }
1828         SDL_UnlockMutex(is->subpq_mutex);
1829
1830         if (is->subtitleq.abort_request)
1831             return 0;
1832
1833         sp = &is->subpq[is->subpq_windex];
1834
1835        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1836            this packet, if any */
1837         pts = 0;
1838         if (pkt->pts != AV_NOPTS_VALUE)
1839             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1840
1841         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1842                                  &got_subtitle, pkt);
1843         if (got_subtitle && sp->sub.format == 0) {
1844             if (sp->sub.pts != AV_NOPTS_VALUE)
1845                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1846             sp->pts = pts;
1847
1848             for (i = 0; i < sp->sub.num_rects; i++)
1849             {
1850                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1851                 {
1852                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1853                     y = RGB_TO_Y_CCIR(r, g, b);
1854                     u = RGB_TO_U_CCIR(r, g, b, 0);
1855                     v = RGB_TO_V_CCIR(r, g, b, 0);
1856                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1857                 }
1858             }
1859
1860             /* now we can update the picture count */
1861             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1862                 is->subpq_windex = 0;
1863             SDL_LockMutex(is->subpq_mutex);
1864             is->subpq_size++;
1865             SDL_UnlockMutex(is->subpq_mutex);
1866         }
1867         av_free_packet(pkt);
1868     }
1869     return 0;
1870 }
1871
1872 /* copy samples for viewing in editor window */
1873 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1874 {
1875     int size, len;
1876
1877     size = samples_size / sizeof(short);
1878     while (size > 0) {
1879         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1880         if (len > size)
1881             len = size;
1882         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1883         samples += len;
1884         is->sample_array_index += len;
1885         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1886             is->sample_array_index = 0;
1887         size -= len;
1888     }
1889 }
1890
1891 /* return the wanted number of samples to get better sync if sync_type is video
1892  * or external master clock */
1893 static int synchronize_audio(VideoState *is, int nb_samples)
1894 {
1895     int wanted_nb_samples = nb_samples;
1896
1897     /* if not master, then we try to remove or add samples to correct the clock */
1898     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1899          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1900         double diff, avg_diff;
1901         int min_nb_samples, max_nb_samples;
1902
1903         diff = get_audio_clock(is) - get_master_clock(is);
1904
1905         if (diff < AV_NOSYNC_THRESHOLD) {
1906             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1907             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1908                 /* not enough measures to have a correct estimate */
1909                 is->audio_diff_avg_count++;
1910             } else {
1911                 /* estimate the A-V difference */
1912                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1913
1914                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1915                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1916                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1917                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1918                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1919                 }
1920                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1921                         diff, avg_diff, wanted_nb_samples - nb_samples,
1922                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1923             }
1924         } else {
1925             /* too big difference : may be initial PTS errors, so
1926                reset A-V filter */
1927             is->audio_diff_avg_count = 0;
1928             is->audio_diff_cum       = 0;
1929         }
1930     }
1931
1932     return wanted_nb_samples;
1933 }
1934
1935 /* decode one audio frame and returns its uncompressed size */
1936 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1937 {
1938     AVPacket *pkt_temp = &is->audio_pkt_temp;
1939     AVPacket *pkt = &is->audio_pkt;
1940     AVCodecContext *dec = is->audio_st->codec;
1941     int len1, len2, data_size, resampled_data_size;
1942     int64_t dec_channel_layout;
1943     int got_frame;
1944     double pts;
1945     int new_packet = 0;
1946     int flush_complete = 0;
1947     int wanted_nb_samples;
1948
1949     for (;;) {
1950         /* NOTE: the audio packet can contain several frames */
1951         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1952             if (!is->frame) {
1953                 if (!(is->frame = avcodec_alloc_frame()))
1954                     return AVERROR(ENOMEM);
1955             } else
1956                 avcodec_get_frame_defaults(is->frame);
1957
1958             if (is->paused)
1959                 return -1;
1960
1961             if (flush_complete)
1962                 break;
1963             new_packet = 0;
1964             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1965             if (len1 < 0) {
1966                 /* if error, we skip the frame */
1967                 pkt_temp->size = 0;
1968                 break;
1969             }
1970
1971             pkt_temp->data += len1;
1972             pkt_temp->size -= len1;
1973
1974             if (!got_frame) {
1975                 /* stop sending empty packets if the decoder is finished */
1976                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1977                     flush_complete = 1;
1978                 continue;
1979             }
1980             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1981                                                    is->frame->nb_samples,
1982                                                    dec->sample_fmt, 1);
1983
1984             dec_channel_layout =
1985                 (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ?
1986                 dec->channel_layout : av_get_default_channel_layout(dec->channels);
1987             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
1988
1989             if (dec->sample_fmt    != is->audio_src.fmt            ||
1990                 dec_channel_layout != is->audio_src.channel_layout ||
1991                 dec->sample_rate   != is->audio_src.freq           ||
1992                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
1993                 swr_free(&is->swr_ctx);
1994                 is->swr_ctx = swr_alloc_set_opts(NULL,
1995                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
1996                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
1997                                                  0, NULL);
1998                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
1999                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2000                         dec->sample_rate,   av_get_sample_fmt_name(dec->sample_fmt),   dec->channels,
2001                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2002                     break;
2003                 }
2004                 is->audio_src.channel_layout = dec_channel_layout;
2005                 is->audio_src.channels = dec->channels;
2006                 is->audio_src.freq = dec->sample_rate;
2007                 is->audio_src.fmt = dec->sample_fmt;
2008             }
2009
2010             if (is->swr_ctx) {
2011                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2012                 uint8_t *out[] = {is->audio_buf2};
2013                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2014                 if (wanted_nb_samples != is->frame->nb_samples) {
2015                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
2016                                                 wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
2017                         fprintf(stderr, "swr_set_compensation() failed\n");
2018                         break;
2019                     }
2020                 }
2021                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2022                 if (len2 < 0) {
2023                     fprintf(stderr, "swr_convert() failed\n");
2024                     break;
2025                 }
2026                 if (len2 == out_count) {
2027                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2028                     swr_init(is->swr_ctx);
2029                 }
2030                 is->audio_buf = is->audio_buf2;
2031                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2032             } else {
2033                 is->audio_buf = is->frame->data[0];
2034                 resampled_data_size = data_size;
2035             }
2036
2037             /* if no pts, then compute it */
2038             pts = is->audio_clock;
2039             *pts_ptr = pts;
2040             is->audio_clock += (double)data_size /
2041                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2042 #ifdef DEBUG
2043             {
2044                 static double last_clock;
2045                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2046                        is->audio_clock - last_clock,
2047                        is->audio_clock, pts);
2048                 last_clock = is->audio_clock;
2049             }
2050 #endif
2051             return resampled_data_size;
2052         }
2053
2054         /* free the current packet */
2055         if (pkt->data)
2056             av_free_packet(pkt);
2057         memset(pkt_temp, 0, sizeof(*pkt_temp));
2058
2059         if (is->paused || is->audioq.abort_request) {
2060             return -1;
2061         }
2062
2063         if (is->audioq.nb_packets == 0)
2064             SDL_CondSignal(is->continue_read_thread);
2065
2066         /* read next packet */
2067         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2068             return -1;
2069
2070         if (pkt->data == flush_pkt.data) {
2071             avcodec_flush_buffers(dec);
2072             flush_complete = 0;
2073         }
2074
2075         *pkt_temp = *pkt;
2076
2077         /* if update the audio clock with the pts */
2078         if (pkt->pts != AV_NOPTS_VALUE) {
2079             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2080         }
2081     }
2082 }
2083
2084 /* prepare a new audio buffer */
2085 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2086 {
2087     VideoState *is = opaque;
2088     int audio_size, len1;
2089     int bytes_per_sec;
2090     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2091     double pts;
2092
2093     audio_callback_time = av_gettime();
2094
2095     while (len > 0) {
2096         if (is->audio_buf_index >= is->audio_buf_size) {
2097            audio_size = audio_decode_frame(is, &pts);
2098            if (audio_size < 0) {
2099                 /* if error, just output silence */
2100                is->audio_buf      = is->silence_buf;
2101                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2102            } else {
2103                if (is->show_mode != SHOW_MODE_VIDEO)
2104                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2105                is->audio_buf_size = audio_size;
2106            }
2107            is->audio_buf_index = 0;
2108         }
2109         len1 = is->audio_buf_size - is->audio_buf_index;
2110         if (len1 > len)
2111             len1 = len;
2112         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2113         len -= len1;
2114         stream += len1;
2115         is->audio_buf_index += len1;
2116     }
2117     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2118     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2119     /* Let's assume the audio driver that is used by SDL has two periods. */
2120     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2121     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2122 }
2123
2124 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2125 {
2126     SDL_AudioSpec wanted_spec, spec;
2127     const char *env;
2128     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2129
2130     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2131     if (env) {
2132         wanted_nb_channels = atoi(env);
2133         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2134     }
2135     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2136         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2137         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2138     }
2139     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2140     wanted_spec.freq = wanted_sample_rate;
2141     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2142         fprintf(stderr, "Invalid sample rate or channel count!\n");
2143         return -1;
2144     }
2145     wanted_spec.format = AUDIO_S16SYS;
2146     wanted_spec.silence = 0;
2147     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2148     wanted_spec.callback = sdl_audio_callback;
2149     wanted_spec.userdata = opaque;
2150     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2151         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2152         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2153         if (!wanted_spec.channels) {
2154             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2155             return -1;
2156         }
2157         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2158     }
2159     if (spec.format != AUDIO_S16SYS) {
2160         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2161         return -1;
2162     }
2163     if (spec.channels != wanted_spec.channels) {
2164         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2165         if (!wanted_channel_layout) {
2166             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2167             return -1;
2168         }
2169     }
2170
2171     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2172     audio_hw_params->freq = spec.freq;
2173     audio_hw_params->channel_layout = wanted_channel_layout;
2174     audio_hw_params->channels =  spec.channels;
2175     return spec.size;
2176 }
2177
2178 /* open a given stream. Return 0 if OK */
2179 static int stream_component_open(VideoState *is, int stream_index)
2180 {
2181     AVFormatContext *ic = is->ic;
2182     AVCodecContext *avctx;
2183     AVCodec *codec;
2184     AVDictionary *opts;
2185     AVDictionaryEntry *t = NULL;
2186
2187     if (stream_index < 0 || stream_index >= ic->nb_streams)
2188         return -1;
2189     avctx = ic->streams[stream_index]->codec;
2190
2191     codec = avcodec_find_decoder(avctx->codec_id);
2192     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2193
2194     switch(avctx->codec_type){
2195         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2196         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2197         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2198     }
2199     if (!codec)
2200         return -1;
2201
2202     avctx->workaround_bugs   = workaround_bugs;
2203     avctx->lowres            = lowres;
2204     if(avctx->lowres > codec->max_lowres){
2205         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2206                 codec->max_lowres);
2207         avctx->lowres= codec->max_lowres;
2208     }
2209     avctx->idct_algo         = idct;
2210     avctx->skip_frame        = skip_frame;
2211     avctx->skip_idct         = skip_idct;
2212     avctx->skip_loop_filter  = skip_loop_filter;
2213     avctx->error_concealment = error_concealment;
2214
2215     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2216     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2217     if(codec->capabilities & CODEC_CAP_DR1)
2218         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2219
2220     if (!av_dict_get(opts, "threads", NULL, 0))
2221         av_dict_set(&opts, "threads", "auto", 0);
2222     if (!codec ||
2223         avcodec_open2(avctx, codec, &opts) < 0)
2224         return -1;
2225     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2226         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2227         return AVERROR_OPTION_NOT_FOUND;
2228     }
2229
2230     /* prepare audio output */
2231     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2232         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2233         if (audio_hw_buf_size < 0)
2234             return -1;
2235         is->audio_hw_buf_size = audio_hw_buf_size;
2236         is->audio_tgt = is->audio_src;
2237     }
2238
2239     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2240     switch (avctx->codec_type) {
2241     case AVMEDIA_TYPE_AUDIO:
2242         is->audio_stream = stream_index;
2243         is->audio_st = ic->streams[stream_index];
2244         is->audio_buf_size  = 0;
2245         is->audio_buf_index = 0;
2246
2247         /* init averaging filter */
2248         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2249         is->audio_diff_avg_count = 0;
2250         /* since we do not have a precise anough audio fifo fullness,
2251            we correct audio sync only if larger than this threshold */
2252         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2253
2254         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2255         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2256         packet_queue_start(&is->audioq);
2257         SDL_PauseAudio(0);
2258         break;
2259     case AVMEDIA_TYPE_VIDEO:
2260         is->video_stream = stream_index;
2261         is->video_st = ic->streams[stream_index];
2262
2263         packet_queue_start(&is->videoq);
2264         is->video_tid = SDL_CreateThread(video_thread, is);
2265         break;
2266     case AVMEDIA_TYPE_SUBTITLE:
2267         is->subtitle_stream = stream_index;
2268         is->subtitle_st = ic->streams[stream_index];
2269         packet_queue_start(&is->subtitleq);
2270
2271         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2272         break;
2273     default:
2274         break;
2275     }
2276     return 0;
2277 }
2278
2279 static void stream_component_close(VideoState *is, int stream_index)
2280 {
2281     AVFormatContext *ic = is->ic;
2282     AVCodecContext *avctx;
2283
2284     if (stream_index < 0 || stream_index >= ic->nb_streams)
2285         return;
2286     avctx = ic->streams[stream_index]->codec;
2287
2288     switch (avctx->codec_type) {
2289     case AVMEDIA_TYPE_AUDIO:
2290         packet_queue_abort(&is->audioq);
2291
2292         SDL_CloseAudio();
2293
2294         packet_queue_flush(&is->audioq);
2295         av_free_packet(&is->audio_pkt);
2296         swr_free(&is->swr_ctx);
2297         av_freep(&is->audio_buf1);
2298         is->audio_buf = NULL;
2299         avcodec_free_frame(&is->frame);
2300
2301         if (is->rdft) {
2302             av_rdft_end(is->rdft);
2303             av_freep(&is->rdft_data);
2304             is->rdft = NULL;
2305             is->rdft_bits = 0;
2306         }
2307         break;
2308     case AVMEDIA_TYPE_VIDEO:
2309         packet_queue_abort(&is->videoq);
2310
2311         /* note: we also signal this mutex to make sure we deblock the
2312            video thread in all cases */
2313         SDL_LockMutex(is->pictq_mutex);
2314         SDL_CondSignal(is->pictq_cond);
2315         SDL_UnlockMutex(is->pictq_mutex);
2316
2317         SDL_WaitThread(is->video_tid, NULL);
2318
2319         packet_queue_flush(&is->videoq);
2320         break;
2321     case AVMEDIA_TYPE_SUBTITLE:
2322         packet_queue_abort(&is->subtitleq);
2323
2324         /* note: we also signal this mutex to make sure we deblock the
2325            video thread in all cases */
2326         SDL_LockMutex(is->subpq_mutex);
2327         is->subtitle_stream_changed = 1;
2328
2329         SDL_CondSignal(is->subpq_cond);
2330         SDL_UnlockMutex(is->subpq_mutex);
2331
2332         SDL_WaitThread(is->subtitle_tid, NULL);
2333
2334         packet_queue_flush(&is->subtitleq);
2335         break;
2336     default:
2337         break;
2338     }
2339
2340     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2341     avcodec_close(avctx);
2342 #if CONFIG_AVFILTER
2343     free_buffer_pool(&is->buffer_pool);
2344 #endif
2345     switch (avctx->codec_type) {
2346     case AVMEDIA_TYPE_AUDIO:
2347         is->audio_st = NULL;
2348         is->audio_stream = -1;
2349         break;
2350     case AVMEDIA_TYPE_VIDEO:
2351         is->video_st = NULL;
2352         is->video_stream = -1;
2353         break;
2354     case AVMEDIA_TYPE_SUBTITLE:
2355         is->subtitle_st = NULL;
2356         is->subtitle_stream = -1;
2357         break;
2358     default:
2359         break;
2360     }
2361 }
2362
2363 static int decode_interrupt_cb(void *ctx)
2364 {
2365     VideoState *is = ctx;
2366     return is->abort_request;
2367 }
2368
2369 /* this thread gets the stream from the disk or the network */
2370 static int read_thread(void *arg)
2371 {
2372     VideoState *is = arg;
2373     AVFormatContext *ic = NULL;
2374     int err, i, ret;
2375     int st_index[AVMEDIA_TYPE_NB];
2376     AVPacket pkt1, *pkt = &pkt1;
2377     int eof = 0;
2378     int pkt_in_play_range = 0;
2379     AVDictionaryEntry *t;
2380     AVDictionary **opts;
2381     int orig_nb_streams;
2382     SDL_mutex *wait_mutex = SDL_CreateMutex();
2383
2384     memset(st_index, -1, sizeof(st_index));
2385     is->last_video_stream = is->video_stream = -1;
2386     is->last_audio_stream = is->audio_stream = -1;
2387     is->last_subtitle_stream = is->subtitle_stream = -1;
2388
2389     ic = avformat_alloc_context();
2390     ic->interrupt_callback.callback = decode_interrupt_cb;
2391     ic->interrupt_callback.opaque = is;
2392     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2393     if (err < 0) {
2394         print_error(is->filename, err);
2395         ret = -1;
2396         goto fail;
2397     }
2398     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2399         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2400         ret = AVERROR_OPTION_NOT_FOUND;
2401         goto fail;
2402     }
2403     is->ic = ic;
2404
2405     if (genpts)
2406         ic->flags |= AVFMT_FLAG_GENPTS;
2407
2408     opts = setup_find_stream_info_opts(ic, codec_opts);
2409     orig_nb_streams = ic->nb_streams;
2410
2411     err = avformat_find_stream_info(ic, opts);
2412     if (err < 0) {
2413         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2414         ret = -1;
2415         goto fail;
2416     }
2417     for (i = 0; i < orig_nb_streams; i++)
2418         av_dict_free(&opts[i]);
2419     av_freep(&opts);
2420
2421     if (ic->pb)
2422         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2423
2424     if (seek_by_bytes < 0)
2425         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2426
2427     /* if seeking requested, we execute it */
2428     if (start_time != AV_NOPTS_VALUE) {
2429         int64_t timestamp;
2430
2431         timestamp = start_time;
2432         /* add the stream start time */
2433         if (ic->start_time != AV_NOPTS_VALUE)
2434             timestamp += ic->start_time;
2435         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2436         if (ret < 0) {
2437             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2438                     is->filename, (double)timestamp / AV_TIME_BASE);
2439         }
2440     }
2441
2442     for (i = 0; i < ic->nb_streams; i++)
2443         ic->streams[i]->discard = AVDISCARD_ALL;
2444     if (!video_disable)
2445         st_index[AVMEDIA_TYPE_VIDEO] =
2446             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2447                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2448     if (!audio_disable)
2449         st_index[AVMEDIA_TYPE_AUDIO] =
2450             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2451                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2452                                 st_index[AVMEDIA_TYPE_VIDEO],
2453                                 NULL, 0);
2454     if (!video_disable)
2455         st_index[AVMEDIA_TYPE_SUBTITLE] =
2456             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2457                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2458                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2459                                  st_index[AVMEDIA_TYPE_AUDIO] :
2460                                  st_index[AVMEDIA_TYPE_VIDEO]),
2461                                 NULL, 0);
2462     if (show_status) {
2463         av_dump_format(ic, 0, is->filename, 0);
2464     }
2465
2466     is->show_mode = show_mode;
2467
2468     /* open the streams */
2469     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2470         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2471     }
2472
2473     ret = -1;
2474     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2475         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2476     }
2477     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2478     if (is->show_mode == SHOW_MODE_NONE)
2479         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2480
2481     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2482         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2483     }
2484
2485     if (is->video_stream < 0 && is->audio_stream < 0) {
2486         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2487         ret = -1;
2488         goto fail;
2489     }
2490
2491     for (;;) {
2492         if (is->abort_request)
2493             break;
2494         if (is->paused != is->last_paused) {
2495             is->last_paused = is->paused;
2496             if (is->paused)
2497                 is->read_pause_return = av_read_pause(ic);
2498             else
2499                 av_read_play(ic);
2500         }
2501 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2502         if (is->paused &&
2503                 (!strcmp(ic->iformat->name, "rtsp") ||
2504                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2505             /* wait 10 ms to avoid trying to get another packet */
2506             /* XXX: horrible */
2507             SDL_Delay(10);
2508             continue;
2509         }
2510 #endif
2511         if (is->seek_req) {
2512             int64_t seek_target = is->seek_pos;
2513             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2514             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2515 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2516 //      of the seek_pos/seek_rel variables
2517
2518             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2519             if (ret < 0) {
2520                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2521             } else {
2522                 if (is->audio_stream >= 0) {
2523                     packet_queue_flush(&is->audioq);
2524                     packet_queue_put(&is->audioq, &flush_pkt);
2525                 }
2526                 if (is->subtitle_stream >= 0) {
2527                     packet_queue_flush(&is->subtitleq);
2528                     packet_queue_put(&is->subtitleq, &flush_pkt);
2529                 }
2530                 if (is->video_stream >= 0) {
2531                     packet_queue_flush(&is->videoq);
2532                     packet_queue_put(&is->videoq, &flush_pkt);
2533                 }
2534             }
2535             is->seek_req = 0;
2536             eof = 0;
2537         }
2538         if (is->que_attachments_req) {
2539             avformat_queue_attached_pictures(ic);
2540             is->que_attachments_req = 0;
2541         }
2542
2543         /* if the queue are full, no need to read more */
2544         if (!infinite_buffer &&
2545               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2546             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2547                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2548                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2549             /* wait 10 ms */
2550             SDL_LockMutex(wait_mutex);
2551             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2552             SDL_UnlockMutex(wait_mutex);
2553             continue;
2554         }
2555         if (eof) {
2556             if (is->video_stream >= 0) {
2557                 av_init_packet(pkt);
2558                 pkt->data = NULL;
2559                 pkt->size = 0;
2560                 pkt->stream_index = is->video_stream;
2561                 packet_queue_put(&is->videoq, pkt);
2562             }
2563             if (is->audio_stream >= 0 &&
2564                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2565                 av_init_packet(pkt);
2566                 pkt->data = NULL;
2567                 pkt->size = 0;
2568                 pkt->stream_index = is->audio_stream;
2569                 packet_queue_put(&is->audioq, pkt);
2570             }
2571             SDL_Delay(10);
2572             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2573                 if (loop != 1 && (!loop || --loop)) {
2574                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2575                 } else if (autoexit) {
2576                     ret = AVERROR_EOF;
2577                     goto fail;
2578                 }
2579             }
2580             eof=0;
2581             continue;
2582         }
2583         ret = av_read_frame(ic, pkt);
2584         if (ret < 0) {
2585             if (ret == AVERROR_EOF || url_feof(ic->pb))
2586                 eof = 1;
2587             if (ic->pb && ic->pb->error)
2588                 break;
2589             SDL_LockMutex(wait_mutex);
2590             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2591             SDL_UnlockMutex(wait_mutex);
2592             continue;
2593         }
2594         /* check if packet is in play range specified by user, then queue, otherwise discard */
2595         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2596                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2597                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2598                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2599                 <= ((double)duration / 1000000);
2600         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2601             packet_queue_put(&is->audioq, pkt);
2602         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2603             packet_queue_put(&is->videoq, pkt);
2604         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2605             packet_queue_put(&is->subtitleq, pkt);
2606         } else {
2607             av_free_packet(pkt);
2608         }
2609     }
2610     /* wait until the end */
2611     while (!is->abort_request) {
2612         SDL_Delay(100);
2613     }
2614
2615     ret = 0;
2616  fail:
2617     /* close each stream */
2618     if (is->audio_stream >= 0)
2619         stream_component_close(is, is->audio_stream);
2620     if (is->video_stream >= 0)
2621         stream_component_close(is, is->video_stream);
2622     if (is->subtitle_stream >= 0)
2623         stream_component_close(is, is->subtitle_stream);
2624     if (is->ic) {
2625         avformat_close_input(&is->ic);
2626     }
2627
2628     if (ret != 0) {
2629         SDL_Event event;
2630
2631         event.type = FF_QUIT_EVENT;
2632         event.user.data1 = is;
2633         SDL_PushEvent(&event);
2634     }
2635     SDL_DestroyMutex(wait_mutex);
2636     return 0;
2637 }
2638
2639 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2640 {
2641     VideoState *is;
2642
2643     is = av_mallocz(sizeof(VideoState));
2644     if (!is)
2645         return NULL;
2646     av_strlcpy(is->filename, filename, sizeof(is->filename));
2647     is->iformat = iformat;
2648     is->ytop    = 0;
2649     is->xleft   = 0;
2650
2651     /* start video display */
2652     is->pictq_mutex = SDL_CreateMutex();
2653     is->pictq_cond  = SDL_CreateCond();
2654
2655     is->subpq_mutex = SDL_CreateMutex();
2656     is->subpq_cond  = SDL_CreateCond();
2657
2658     packet_queue_init(&is->videoq);
2659     packet_queue_init(&is->audioq);
2660     packet_queue_init(&is->subtitleq);
2661
2662     is->continue_read_thread = SDL_CreateCond();
2663
2664     is->av_sync_type = av_sync_type;
2665     is->read_tid     = SDL_CreateThread(read_thread, is);
2666     if (!is->read_tid) {
2667         av_free(is);
2668         return NULL;
2669     }
2670     return is;
2671 }
2672
2673 static void stream_cycle_channel(VideoState *is, int codec_type)
2674 {
2675     AVFormatContext *ic = is->ic;
2676     int start_index, stream_index;
2677     int old_index;
2678     AVStream *st;
2679
2680     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2681         start_index = is->last_video_stream;
2682         old_index = is->video_stream;
2683     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2684         start_index = is->last_audio_stream;
2685         old_index = is->audio_stream;
2686     } else {
2687         start_index = is->last_subtitle_stream;
2688         old_index = is->subtitle_stream;
2689     }
2690     stream_index = start_index;
2691     for (;;) {
2692         if (++stream_index >= is->ic->nb_streams)
2693         {
2694             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2695             {
2696                 stream_index = -1;
2697                 is->last_subtitle_stream = -1;
2698                 goto the_end;
2699             }
2700             if (start_index == -1)
2701                 return;
2702             stream_index = 0;
2703         }
2704         if (stream_index == start_index)
2705             return;
2706         st = ic->streams[stream_index];
2707         if (st->codec->codec_type == codec_type) {
2708             /* check that parameters are OK */
2709             switch (codec_type) {
2710             case AVMEDIA_TYPE_AUDIO:
2711                 if (st->codec->sample_rate != 0 &&
2712                     st->codec->channels != 0)
2713                     goto the_end;
2714                 break;
2715             case AVMEDIA_TYPE_VIDEO:
2716             case AVMEDIA_TYPE_SUBTITLE:
2717                 goto the_end;
2718             default:
2719                 break;
2720             }
2721         }
2722     }
2723  the_end:
2724     stream_component_close(is, old_index);
2725     stream_component_open(is, stream_index);
2726     if (codec_type == AVMEDIA_TYPE_VIDEO)
2727         is->que_attachments_req = 1;
2728 }
2729
2730
2731 static void toggle_full_screen(VideoState *is)
2732 {
2733 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2734     /* OS X needs to reallocate the SDL overlays */
2735     int i;
2736     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2737         is->pictq[i].reallocate = 1;
2738 #endif
2739     is_full_screen = !is_full_screen;
2740     video_open(is, 1);
2741 }
2742
2743 static void toggle_pause(VideoState *is)
2744 {
2745     stream_toggle_pause(is);
2746     is->step = 0;
2747 }
2748
2749 static void step_to_next_frame(VideoState *is)
2750 {
2751     /* if the stream is paused unpause it, then step */
2752     if (is->paused)
2753         stream_toggle_pause(is);
2754     is->step = 1;
2755 }
2756
2757 static void toggle_audio_display(VideoState *is)
2758 {
2759     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2760     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2761     fill_rectangle(screen,
2762                 is->xleft, is->ytop, is->width, is->height,
2763                 bgcolor);
2764     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2765 }
2766
2767 /* handle an event sent by the GUI */
2768 static void event_loop(VideoState *cur_stream)
2769 {
2770     SDL_Event event;
2771     double incr, pos, frac;
2772
2773     for (;;) {
2774         double x;
2775         SDL_WaitEvent(&event);
2776         switch (event.type) {
2777         case SDL_KEYDOWN:
2778             if (exit_on_keydown) {
2779                 do_exit(cur_stream);
2780                 break;
2781             }
2782             switch (event.key.keysym.sym) {
2783             case SDLK_ESCAPE:
2784             case SDLK_q:
2785                 do_exit(cur_stream);
2786                 break;
2787             case SDLK_f:
2788                 toggle_full_screen(cur_stream);
2789                 cur_stream->force_refresh = 1;
2790                 break;
2791             case SDLK_p:
2792             case SDLK_SPACE:
2793                 toggle_pause(cur_stream);
2794                 break;
2795             case SDLK_s: // S: Step to next frame
2796                 step_to_next_frame(cur_stream);
2797                 break;
2798             case SDLK_a:
2799                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2800                 break;
2801             case SDLK_v:
2802                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2803                 break;
2804             case SDLK_t:
2805                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2806                 break;
2807             case SDLK_w:
2808                 toggle_audio_display(cur_stream);
2809                 cur_stream->force_refresh = 1;
2810                 break;
2811             case SDLK_PAGEUP:
2812                 incr = 600.0;
2813                 goto do_seek;
2814             case SDLK_PAGEDOWN:
2815                 incr = -600.0;
2816                 goto do_seek;
2817             case SDLK_LEFT:
2818                 incr = -10.0;
2819                 goto do_seek;
2820             case SDLK_RIGHT:
2821                 incr = 10.0;
2822                 goto do_seek;
2823             case SDLK_UP:
2824                 incr = 60.0;
2825                 goto do_seek;
2826             case SDLK_DOWN:
2827                 incr = -60.0;
2828             do_seek:
2829                     if (seek_by_bytes) {
2830                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2831                             pos = cur_stream->video_current_pos;
2832                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2833                             pos = cur_stream->audio_pkt.pos;
2834                         } else
2835                             pos = avio_tell(cur_stream->ic->pb);
2836                         if (cur_stream->ic->bit_rate)
2837                             incr *= cur_stream->ic->bit_rate / 8.0;
2838                         else
2839                             incr *= 180000.0;
2840                         pos += incr;
2841                         stream_seek(cur_stream, pos, incr, 1);
2842                     } else {
2843                         pos = get_master_clock(cur_stream);
2844                         pos += incr;
2845                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2846                     }
2847                 break;
2848             default:
2849                 break;
2850             }
2851             break;
2852         case SDL_VIDEOEXPOSE:
2853             cur_stream->force_refresh = 1;
2854             break;
2855         case SDL_MOUSEBUTTONDOWN:
2856             if (exit_on_mousedown) {
2857                 do_exit(cur_stream);
2858                 break;
2859             }
2860         case SDL_MOUSEMOTION:
2861             if (event.type == SDL_MOUSEBUTTONDOWN) {
2862                 x = event.button.x;
2863             } else {
2864                 if (event.motion.state != SDL_PRESSED)
2865                     break;
2866                 x = event.motion.x;
2867             }
2868                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2869                     uint64_t size =  avio_size(cur_stream->ic->pb);
2870                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2871                 } else {
2872                     int64_t ts;
2873                     int ns, hh, mm, ss;
2874                     int tns, thh, tmm, tss;
2875                     tns  = cur_stream->ic->duration / 1000000LL;
2876                     thh  = tns / 3600;
2877                     tmm  = (tns % 3600) / 60;
2878                     tss  = (tns % 60);
2879                     frac = x / cur_stream->width;
2880                     ns   = frac * tns;
2881                     hh   = ns / 3600;
2882                     mm   = (ns % 3600) / 60;
2883                     ss   = (ns % 60);
2884                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2885                             hh, mm, ss, thh, tmm, tss);
2886                     ts = frac * cur_stream->ic->duration;
2887                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2888                         ts += cur_stream->ic->start_time;
2889                     stream_seek(cur_stream, ts, 0, 0);
2890                 }
2891             break;
2892         case SDL_VIDEORESIZE:
2893                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2894                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2895                 screen_width  = cur_stream->width  = event.resize.w;
2896                 screen_height = cur_stream->height = event.resize.h;
2897                 cur_stream->force_refresh = 1;
2898             break;
2899         case SDL_QUIT:
2900         case FF_QUIT_EVENT:
2901             do_exit(cur_stream);
2902             break;
2903         case FF_ALLOC_EVENT:
2904             alloc_picture(event.user.data1);
2905             break;
2906         case FF_REFRESH_EVENT:
2907             video_refresh(event.user.data1);
2908             cur_stream->refresh = 0;
2909             break;
2910         default:
2911             break;
2912         }
2913     }
2914 }
2915
2916 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2917 {
2918     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2919     return opt_default(NULL, "video_size", arg);
2920 }
2921
2922 static int opt_width(void *optctx, const char *opt, const char *arg)
2923 {
2924     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2925     return 0;
2926 }
2927
2928 static int opt_height(void *optctx, const char *opt, const char *arg)
2929 {
2930     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2931     return 0;
2932 }
2933
2934 static int opt_format(void *optctx, const char *opt, const char *arg)
2935 {
2936     file_iformat = av_find_input_format(arg);
2937     if (!file_iformat) {
2938         fprintf(stderr, "Unknown input format: %s\n", arg);
2939         return AVERROR(EINVAL);
2940     }
2941     return 0;
2942 }
2943
2944 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2945 {
2946     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2947     return opt_default(NULL, "pixel_format", arg);
2948 }
2949
2950 static int opt_sync(void *optctx, const char *opt, const char *arg)
2951 {
2952     if (!strcmp(arg, "audio"))
2953         av_sync_type = AV_SYNC_AUDIO_MASTER;
2954     else if (!strcmp(arg, "video"))
2955         av_sync_type = AV_SYNC_VIDEO_MASTER;
2956     else if (!strcmp(arg, "ext"))
2957         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2958     else {
2959         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2960         exit(1);
2961     }
2962     return 0;
2963 }
2964
2965 static int opt_seek(void *optctx, const char *opt, const char *arg)
2966 {
2967     start_time = parse_time_or_die(opt, arg, 1);
2968     return 0;
2969 }
2970
2971 static int opt_duration(void *optctx, const char *opt, const char *arg)
2972 {
2973     duration = parse_time_or_die(opt, arg, 1);
2974     return 0;
2975 }
2976
2977 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
2978 {
2979     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2980                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2981                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2982                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2983     return 0;
2984 }
2985
2986 static void opt_input_file(void *optctx, const char *filename)
2987 {
2988     if (input_filename) {
2989         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2990                 filename, input_filename);
2991         exit_program(1);
2992     }
2993     if (!strcmp(filename, "-"))
2994         filename = "pipe:";
2995     input_filename = filename;
2996 }
2997
2998 static int opt_codec(void *o, const char *opt, const char *arg)
2999 {
3000     switch(opt[strlen(opt)-1]){
3001     case 'a' :    audio_codec_name = arg; break;
3002     case 's' : subtitle_codec_name = arg; break;
3003     case 'v' :    video_codec_name = arg; break;
3004     }
3005     return 0;
3006 }
3007
3008 static int dummy;
3009
3010 static const OptionDef options[] = {
3011 #include "cmdutils_common_opts.h"
3012     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3013     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3014     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3015     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3016     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3017     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3018     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3019     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3020     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3021     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3022     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3023     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3024     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3025     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3026     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3027     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3028     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3029     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3030     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3031     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3032     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3033     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3034     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3035     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3036     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3037     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3038     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3039     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3040     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3041     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3042     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3043     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3044     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3045     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3046 #if CONFIG_AVFILTER
3047     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3048 #endif
3049     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3050     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3051     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3052     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3053     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3054     { NULL, },
3055 };
3056
3057 static void show_usage(void)
3058 {
3059     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3060     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3061     av_log(NULL, AV_LOG_INFO, "\n");
3062 }
3063
3064 void show_help_default(const char *opt, const char *arg)
3065 {
3066     av_log_set_callback(log_callback_help);
3067     show_usage();
3068     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3069     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3070     printf("\n");
3071     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3072     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3073 #if !CONFIG_AVFILTER
3074     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3075 #else
3076     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3077 #endif
3078     printf("\nWhile playing:\n"
3079            "q, ESC              quit\n"
3080            "f                   toggle full screen\n"
3081            "p, SPC              pause\n"
3082            "a                   cycle audio channel\n"
3083            "v                   cycle video channel\n"
3084            "t                   cycle subtitle channel\n"
3085            "w                   show audio waves\n"
3086            "s                   activate frame-step mode\n"
3087            "left/right          seek backward/forward 10 seconds\n"
3088            "down/up             seek backward/forward 1 minute\n"
3089            "page down/page up   seek backward/forward 10 minutes\n"
3090            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3091            );
3092 }
3093
3094 static int lockmgr(void **mtx, enum AVLockOp op)
3095 {
3096    switch(op) {
3097       case AV_LOCK_CREATE:
3098           *mtx = SDL_CreateMutex();
3099           if(!*mtx)
3100               return 1;
3101           return 0;
3102       case AV_LOCK_OBTAIN:
3103           return !!SDL_LockMutex(*mtx);
3104       case AV_LOCK_RELEASE:
3105           return !!SDL_UnlockMutex(*mtx);
3106       case AV_LOCK_DESTROY:
3107           SDL_DestroyMutex(*mtx);
3108           return 0;
3109    }
3110    return 1;
3111 }
3112
3113 /* Called from the main */
3114 int main(int argc, char **argv)
3115 {
3116     int flags;
3117     VideoState *is;
3118     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3119
3120     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3121     parse_loglevel(argc, argv, options);
3122
3123     /* register all codecs, demux and protocols */
3124     avcodec_register_all();
3125 #if CONFIG_AVDEVICE
3126     avdevice_register_all();
3127 #endif
3128 #if CONFIG_AVFILTER
3129     avfilter_register_all();
3130 #endif
3131     av_register_all();
3132     avformat_network_init();
3133
3134     init_opts();
3135
3136     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3137     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3138
3139     show_banner(argc, argv, options);
3140
3141     parse_options(NULL, argc, argv, options, opt_input_file);
3142
3143     if (!input_filename) {
3144         show_usage();
3145         fprintf(stderr, "An input file must be specified\n");
3146         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3147         exit(1);
3148     }
3149
3150     if (display_disable) {
3151         video_disable = 1;
3152     }
3153     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3154     if (audio_disable)
3155         flags &= ~SDL_INIT_AUDIO;
3156     if (display_disable)
3157         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3158 #if !defined(__MINGW32__) && !defined(__APPLE__)
3159     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3160 #endif
3161     if (SDL_Init (flags)) {
3162         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3163         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3164         exit(1);
3165     }
3166
3167     if (!display_disable) {
3168 #if HAVE_SDL_VIDEO_SIZE
3169         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3170         fs_screen_width = vi->current_w;
3171         fs_screen_height = vi->current_h;
3172 #endif
3173     }
3174
3175     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3176     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3177     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3178
3179     if (av_lockmgr_register(lockmgr)) {
3180         fprintf(stderr, "Could not initialize lock manager!\n");
3181         do_exit(NULL);
3182     }
3183
3184     av_init_packet(&flush_pkt);
3185     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3186
3187     is = stream_open(input_filename, file_iformat);
3188     if (!is) {
3189         fprintf(stderr, "Failed to initialize VideoState!\n");
3190         do_exit(NULL);
3191     }
3192
3193     event_loop(is);
3194
3195     /* never returns */
3196
3197     return 0;
3198 }