]> git.sesse.net Git - ffmpeg/blob - ffplay.c
flashsvenc: replace bitstream description by a link to the specification
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     //    QETimer *video_timer;
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208     PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212 #endif
213
214     float skip_frames;
215     float skip_frames_index;
216     int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int frame_width = 0;
230 static int frame_height = 0;
231 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB]={
235     [AVMEDIA_TYPE_AUDIO]=-1,
236     [AVMEDIA_TYPE_VIDEO]=-1,
237     [AVMEDIA_TYPE_SUBTITLE]=-1,
238 };
239 static int seek_by_bytes=-1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug = 0;
246 static int debug_mv = 0;
247 static int step = 0;
248 static int thread_count = 1;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int lowres = 0;
253 static int idct = FF_IDCT_AUTO;
254 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
257 static int error_recognition = FF_ER_CAREFUL;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts= -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop=1;
264 static int framedrop=1;
265
266 static int rdftspeed=20;
267 #if CONFIG_AVFILTER
268 static char *vfilters = NULL;
269 #endif
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_free_packet(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     /* duplicate the packet */
324     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325         return -1;
326
327     pkt1 = av_malloc(sizeof(AVPacketList));
328     if (!pkt1)
329         return -1;
330     pkt1->pkt = *pkt;
331     pkt1->next = NULL;
332
333
334     SDL_LockMutex(q->mutex);
335
336     if (!q->last_pkt)
337
338         q->first_pkt = pkt1;
339     else
340         q->last_pkt->next = pkt1;
341     q->last_pkt = pkt1;
342     q->nb_packets++;
343     q->size += pkt1->pkt.size + sizeof(*pkt1);
344     /* XXX: should duplicate packet data in DV case */
345     SDL_CondSignal(q->cond);
346
347     SDL_UnlockMutex(q->mutex);
348     return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for(;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414     unsigned int v = ((const uint32_t *)(s))[0];\
415     a = (v >> 24) & 0xff;\
416     r = (v >> 16) & 0xff;\
417     g = (v >> 8) & 0xff;\
418     b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424     a = (val >> 24) & 0xff;\
425     y = (val >> 16) & 0xff;\
426     u = (val >> 8) & 0xff;\
427     v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 {
440     int wrap, wrap3, width2, skip2;
441     int y, u, v, a, u1, v1, a1, w, h;
442     uint8_t *lum, *cb, *cr;
443     const uint8_t *p;
444     const uint32_t *pal;
445     int dstx, dsty, dstw, dsth;
446
447     dstw = av_clip(rect->w, 0, imgw);
448     dsth = av_clip(rect->h, 0, imgh);
449     dstx = av_clip(rect->x, 0, imgw - dstw);
450     dsty = av_clip(rect->y, 0, imgh - dsth);
451     lum = dst->data[0] + dsty * dst->linesize[0];
452     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
453     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454
455     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
456     skip2 = dstx >> 1;
457     wrap = dst->linesize[0];
458     wrap3 = rect->pict.linesize[0];
459     p = rect->pict.data[0];
460     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
461
462     if (dsty & 1) {
463         lum += dstx;
464         cb += skip2;
465         cr += skip2;
466
467         if (dstx & 1) {
468             YUVA_IN(y, u, v, a, p, pal);
469             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
471             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
472             cb++;
473             cr++;
474             lum++;
475             p += BPP;
476         }
477         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
478             YUVA_IN(y, u, v, a, p, pal);
479             u1 = u;
480             v1 = v;
481             a1 = a;
482             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
484             YUVA_IN(y, u, v, a, p + BPP, pal);
485             u1 += u;
486             v1 += v;
487             a1 += a;
488             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
489             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
490             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
491             cb++;
492             cr++;
493             p += 2 * BPP;
494             lum += 2;
495         }
496         if (w) {
497             YUVA_IN(y, u, v, a, p, pal);
498             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
500             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
501             p++;
502             lum++;
503         }
504         p += wrap3 - dstw * BPP;
505         lum += wrap - dstw - dstx;
506         cb += dst->linesize[1] - width2 - skip2;
507         cr += dst->linesize[2] - width2 - skip2;
508     }
509     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
510         lum += dstx;
511         cb += skip2;
512         cr += skip2;
513
514         if (dstx & 1) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             p += wrap3;
521             lum += wrap;
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += -wrap3 + BPP;
532             lum += -wrap + 1;
533         }
534         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
535             YUVA_IN(y, u, v, a, p, pal);
536             u1 = u;
537             v1 = v;
538             a1 = a;
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541             YUVA_IN(y, u, v, a, p + BPP, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546             p += wrap3;
547             lum += wrap;
548
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555             YUVA_IN(y, u, v, a, p + BPP, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560
561             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
562             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563
564             cb++;
565             cr++;
566             p += -wrap3 + 2 * BPP;
567             lum += -wrap + 2;
568         }
569         if (w) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             p += wrap3;
576             lum += wrap;
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 += u;
579             v1 += v;
580             a1 += a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
583             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
584             cb++;
585             cr++;
586             p += -wrap3 + BPP;
587             lum += -wrap + 1;
588         }
589         p += wrap3 + (wrap3 - dstw * BPP);
590         lum += wrap + (wrap - dstw - dstx);
591         cb += dst->linesize[1] - width2 - skip2;
592         cr += dst->linesize[2] - width2 - skip2;
593     }
594     /* handle odd height */
595     if (h) {
596         lum += dstx;
597         cb += skip2;
598         cr += skip2;
599
600         if (dstx & 1) {
601             YUVA_IN(y, u, v, a, p, pal);
602             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605             cb++;
606             cr++;
607             lum++;
608             p += BPP;
609         }
610         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
617             YUVA_IN(y, u, v, a, p + BPP, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
624             cb++;
625             cr++;
626             p += 2 * BPP;
627             lum += 2;
628         }
629         if (w) {
630             YUVA_IN(y, u, v, a, p, pal);
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
633             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634         }
635     }
636 }
637
638 static void free_subpicture(SubPicture *sp)
639 {
640     avsubtitle_free(&sp->sub);
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645     VideoPicture *vp;
646     SubPicture *sp;
647     AVPicture pict;
648     float aspect_ratio;
649     int width, height, x, y;
650     SDL_Rect rect;
651     int i;
652
653     vp = &is->pictq[is->pictq_rindex];
654     if (vp->bmp) {
655 #if CONFIG_AVFILTER
656          if (vp->picref->video->pixel_aspect.num == 0)
657              aspect_ratio = 0;
658          else
659              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
660 #else
661
662         /* XXX: use variable in the frame */
663         if (is->video_st->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665         else if (is->video_st->codec->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667         else
668             aspect_ratio = 0;
669 #endif
670         if (aspect_ratio <= 0.0)
671             aspect_ratio = 1.0;
672         aspect_ratio *= (float)vp->width / (float)vp->height;
673
674         if (is->subtitle_st)
675         {
676             if (is->subpq_size > 0)
677             {
678                 sp = &is->subpq[is->subpq_rindex];
679
680                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
681                 {
682                     SDL_LockYUVOverlay (vp->bmp);
683
684                     pict.data[0] = vp->bmp->pixels[0];
685                     pict.data[1] = vp->bmp->pixels[2];
686                     pict.data[2] = vp->bmp->pixels[1];
687
688                     pict.linesize[0] = vp->bmp->pitches[0];
689                     pict.linesize[1] = vp->bmp->pitches[2];
690                     pict.linesize[2] = vp->bmp->pitches[1];
691
692                     for (i = 0; i < sp->sub.num_rects; i++)
693                         blend_subrect(&pict, sp->sub.rects[i],
694                                       vp->bmp->w, vp->bmp->h);
695
696                     SDL_UnlockYUVOverlay (vp->bmp);
697                 }
698             }
699         }
700
701
702         /* XXX: we suppose the screen has a 1.0 pixel ratio */
703         height = is->height;
704         width = ((int)rint(height * aspect_ratio)) & ~1;
705         if (width > is->width) {
706             width = is->width;
707             height = ((int)rint(width / aspect_ratio)) & ~1;
708         }
709         x = (is->width - width) / 2;
710         y = (is->height - height) / 2;
711         is->no_background = 0;
712         rect.x = is->xleft + x;
713         rect.y = is->ytop  + y;
714         rect.w = width;
715         rect.h = height;
716         SDL_DisplayYUVOverlay(vp->bmp, &rect);
717     }
718 }
719
720 /* get the current audio output buffer size, in samples. With SDL, we
721    cannot have a precise information */
722 static int audio_write_get_buf_size(VideoState *is)
723 {
724     return is->audio_buf_size - is->audio_buf_index;
725 }
726
727 static inline int compute_mod(int a, int b)
728 {
729     a = a % b;
730     if (a >= 0)
731         return a;
732     else
733         return a + b;
734 }
735
736 static void video_audio_display(VideoState *s)
737 {
738     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
739     int ch, channels, h, h2, bgcolor, fgcolor;
740     int16_t time_diff;
741     int rdft_bits, nb_freq;
742
743     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
744         ;
745     nb_freq= 1<<(rdft_bits-1);
746
747     /* compute display index : center on currently output samples */
748     channels = s->audio_st->codec->channels;
749     nb_display_channels = channels;
750     if (!s->paused) {
751         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
752         n = 2 * channels;
753         delay = audio_write_get_buf_size(s);
754         delay /= n;
755
756         /* to be more precise, we take into account the time spent since
757            the last buffer computation */
758         if (audio_callback_time) {
759             time_diff = av_gettime() - audio_callback_time;
760             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
761         }
762
763         delay += 2*data_used;
764         if (delay < data_used)
765             delay = data_used;
766
767         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
768         if(s->show_audio==1){
769             h= INT_MIN;
770             for(i=0; i<1000; i+=channels){
771                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
772                 int a= s->sample_array[idx];
773                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
774                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
775                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
776                 int score= a-d;
777                 if(h<score && (b^c)<0){
778                     h= score;
779                     i_start= idx;
780                 }
781             }
782         }
783
784         s->last_i_start = i_start;
785     } else {
786         i_start = s->last_i_start;
787     }
788
789     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
790     if(s->show_audio==1){
791         fill_rectangle(screen,
792                        s->xleft, s->ytop, s->width, s->height,
793                        bgcolor);
794
795         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797         /* total height for one channel */
798         h = s->height / nb_display_channels;
799         /* graph height / 2 */
800         h2 = (h * 9) / 20;
801         for(ch = 0;ch < nb_display_channels; ch++) {
802             i = i_start + ch;
803             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804             for(x = 0; x < s->width; x++) {
805                 y = (s->sample_array[i] * h2) >> 15;
806                 if (y < 0) {
807                     y = -y;
808                     ys = y1 - y;
809                 } else {
810                     ys = y1;
811                 }
812                 fill_rectangle(screen,
813                                s->xleft + x, ys, 1, y,
814                                fgcolor);
815                 i += channels;
816                 if (i >= SAMPLE_ARRAY_SIZE)
817                     i -= SAMPLE_ARRAY_SIZE;
818             }
819         }
820
821         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823         for(ch = 1;ch < nb_display_channels; ch++) {
824             y = s->ytop + ch * h;
825             fill_rectangle(screen,
826                            s->xleft, y, s->width, 1,
827                            fgcolor);
828         }
829         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830     }else{
831         nb_display_channels= FFMIN(nb_display_channels, 2);
832         if(rdft_bits != s->rdft_bits){
833             av_rdft_end(s->rdft);
834             av_free(s->rdft_data);
835             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
836             s->rdft_bits= rdft_bits;
837             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
838         }
839         {
840             FFTSample *data[2];
841             for(ch = 0;ch < nb_display_channels; ch++) {
842                 data[ch] = s->rdft_data + 2*nb_freq*ch;
843                 i = i_start + ch;
844                 for(x = 0; x < 2*nb_freq; x++) {
845                     double w= (x-nb_freq)*(1.0/nb_freq);
846                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
847                     i += channels;
848                     if (i >= SAMPLE_ARRAY_SIZE)
849                         i -= SAMPLE_ARRAY_SIZE;
850                 }
851                 av_rdft_calc(s->rdft, data[ch]);
852             }
853             //least efficient way to do this, we should of course directly access it but its more than fast enough
854             for(y=0; y<s->height; y++){
855                 double w= 1/sqrt(nb_freq);
856                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
857                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
858                        + data[1][2*y+1]*data[1][2*y+1])) : a;
859                 a= FFMIN(a,255);
860                 b= FFMIN(b,255);
861                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
862
863                 fill_rectangle(screen,
864                             s->xpos, s->height-y, 1, 1,
865                             fgcolor);
866             }
867         }
868         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
869         s->xpos++;
870         if(s->xpos >= s->width)
871             s->xpos= s->xleft;
872     }
873 }
874
875 static int video_open(VideoState *is){
876     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
877     int w,h;
878
879     if(is_full_screen) flags |= SDL_FULLSCREEN;
880     else               flags |= SDL_RESIZABLE;
881
882     if (is_full_screen && fs_screen_width) {
883         w = fs_screen_width;
884         h = fs_screen_height;
885     } else if(!is_full_screen && screen_width){
886         w = screen_width;
887         h = screen_height;
888 #if CONFIG_AVFILTER
889     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
890         w = is->out_video_filter->inputs[0]->w;
891         h = is->out_video_filter->inputs[0]->h;
892 #else
893     }else if (is->video_st && is->video_st->codec->width){
894         w = is->video_st->codec->width;
895         h = is->video_st->codec->height;
896 #endif
897     } else {
898         w = 640;
899         h = 480;
900     }
901     if(screen && is->width == screen->w && screen->w == w
902        && is->height== screen->h && screen->h == h)
903         return 0;
904
905 #ifndef __APPLE__
906     screen = SDL_SetVideoMode(w, h, 0, flags);
907 #else
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #endif
911     if (!screen) {
912         fprintf(stderr, "SDL: could not set video mode - exiting\n");
913         return -1;
914     }
915     if (!window_title)
916         window_title = input_filename;
917     SDL_WM_SetCaption(window_title, window_title);
918
919     is->width = screen->w;
920     is->height = screen->h;
921
922     return 0;
923 }
924
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928     if(!screen)
929         video_open(cur_stream);
930     if (is->audio_st && is->show_audio)
931         video_audio_display(is);
932     else if (is->video_st)
933         video_image_display(is);
934 }
935
936 static int refresh_thread(void *opaque)
937 {
938     VideoState *is= opaque;
939     while(!is->abort_request){
940         SDL_Event event;
941         event.type = FF_REFRESH_EVENT;
942         event.user.data1 = opaque;
943         if(!is->refresh){
944             is->refresh=1;
945             SDL_PushEvent(&event);
946         }
947         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948     }
949     return 0;
950 }
951
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955     double pts;
956     int hw_buf_size, bytes_per_sec;
957     pts = is->audio_clock;
958     hw_buf_size = audio_write_get_buf_size(is);
959     bytes_per_sec = 0;
960     if (is->audio_st) {
961         bytes_per_sec = is->audio_st->codec->sample_rate *
962             2 * is->audio_st->codec->channels;
963     }
964     if (bytes_per_sec)
965         pts -= (double)hw_buf_size / bytes_per_sec;
966     return pts;
967 }
968
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972     if (is->paused) {
973         return is->video_current_pts;
974     } else {
975         return is->video_current_pts_drift + av_gettime() / 1000000.0;
976     }
977 }
978
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982     int64_t ti;
983     ti = av_gettime();
984     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990     double val;
991
992     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993         if (is->video_st)
994             val = get_video_clock(is);
995         else
996             val = get_audio_clock(is);
997     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998         if (is->audio_st)
999             val = get_audio_clock(is);
1000         else
1001             val = get_video_clock(is);
1002     } else {
1003         val = get_external_clock(is);
1004     }
1005     return val;
1006 }
1007
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011     if (!is->seek_req) {
1012         is->seek_pos = pos;
1013         is->seek_rel = rel;
1014         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015         if (seek_by_bytes)
1016             is->seek_flags |= AVSEEK_FLAG_BYTE;
1017         is->seek_req = 1;
1018     }
1019 }
1020
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024     if (is->paused) {
1025         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026         if(is->read_pause_return != AVERROR(ENOSYS)){
1027             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1028         }
1029         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1030     }
1031     is->paused = !is->paused;
1032 }
1033
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036     double delay, sync_threshold, diff;
1037
1038     /* compute nominal delay */
1039     delay = frame_current_pts - is->frame_last_pts;
1040     if (delay <= 0 || delay >= 10.0) {
1041         /* if incorrect delay, use previous one */
1042         delay = is->frame_last_delay;
1043     } else {
1044         is->frame_last_delay = delay;
1045     }
1046     is->frame_last_pts = frame_current_pts;
1047
1048     /* update delay to follow master synchronisation source */
1049     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1050          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1051         /* if video is slave, we try to correct big delays by
1052            duplicating or deleting a frame */
1053         diff = get_video_clock(is) - get_master_clock(is);
1054
1055         /* skip or repeat frame. We take into account the
1056            delay to compute the threshold. I still don't know
1057            if it is the best guess */
1058         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060             if (diff <= -sync_threshold)
1061                 delay = 0;
1062             else if (diff >= sync_threshold)
1063                 delay = 2 * delay;
1064         }
1065     }
1066     is->frame_timer += delay;
1067
1068     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069             delay, frame_current_pts, -diff);
1070
1071     return is->frame_timer;
1072 }
1073
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077     VideoState *is = opaque;
1078     VideoPicture *vp;
1079
1080     SubPicture *sp, *sp2;
1081
1082     if (is->video_st) {
1083 retry:
1084         if (is->pictq_size == 0) {
1085             //nothing to do, no picture to display in the que
1086         } else {
1087             double time= av_gettime()/1000000.0;
1088             double next_target;
1089             /* dequeue the picture */
1090             vp = &is->pictq[is->pictq_rindex];
1091
1092             if(time < vp->target_clock)
1093                 return;
1094             /* update current video pts */
1095             is->video_current_pts = vp->pts;
1096             is->video_current_pts_drift = is->video_current_pts - time;
1097             is->video_current_pos = vp->pos;
1098             if(is->pictq_size > 1){
1099                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1100                 assert(nextvp->target_clock >= vp->target_clock);
1101                 next_target= nextvp->target_clock;
1102             }else{
1103                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1104             }
1105             if(framedrop && time > next_target){
1106                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107                 if(is->pictq_size > 1 || time > next_target + 0.5){
1108                     /* update queue size and signal for next picture */
1109                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110                         is->pictq_rindex = 0;
1111
1112                     SDL_LockMutex(is->pictq_mutex);
1113                     is->pictq_size--;
1114                     SDL_CondSignal(is->pictq_cond);
1115                     SDL_UnlockMutex(is->pictq_mutex);
1116                     goto retry;
1117                 }
1118             }
1119
1120             if(is->subtitle_st) {
1121                 if (is->subtitle_stream_changed) {
1122                     SDL_LockMutex(is->subpq_mutex);
1123
1124                     while (is->subpq_size) {
1125                         free_subpicture(&is->subpq[is->subpq_rindex]);
1126
1127                         /* update queue size and signal for next picture */
1128                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129                             is->subpq_rindex = 0;
1130
1131                         is->subpq_size--;
1132                     }
1133                     is->subtitle_stream_changed = 0;
1134
1135                     SDL_CondSignal(is->subpq_cond);
1136                     SDL_UnlockMutex(is->subpq_mutex);
1137                 } else {
1138                     if (is->subpq_size > 0) {
1139                         sp = &is->subpq[is->subpq_rindex];
1140
1141                         if (is->subpq_size > 1)
1142                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143                         else
1144                             sp2 = NULL;
1145
1146                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148                         {
1149                             free_subpicture(sp);
1150
1151                             /* update queue size and signal for next picture */
1152                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153                                 is->subpq_rindex = 0;
1154
1155                             SDL_LockMutex(is->subpq_mutex);
1156                             is->subpq_size--;
1157                             SDL_CondSignal(is->subpq_cond);
1158                             SDL_UnlockMutex(is->subpq_mutex);
1159                         }
1160                     }
1161                 }
1162             }
1163
1164             /* display picture */
1165             if (!display_disable)
1166                 video_display(is);
1167
1168             /* update queue size and signal for next picture */
1169             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1170                 is->pictq_rindex = 0;
1171
1172             SDL_LockMutex(is->pictq_mutex);
1173             is->pictq_size--;
1174             SDL_CondSignal(is->pictq_cond);
1175             SDL_UnlockMutex(is->pictq_mutex);
1176         }
1177     } else if (is->audio_st) {
1178         /* draw the next audio frame */
1179
1180         /* if only audio stream, then display the audio bars (better
1181            than nothing, just to test the implementation */
1182
1183         /* display picture */
1184         if (!display_disable)
1185             video_display(is);
1186     }
1187     if (show_status) {
1188         static int64_t last_time;
1189         int64_t cur_time;
1190         int aqsize, vqsize, sqsize;
1191         double av_diff;
1192
1193         cur_time = av_gettime();
1194         if (!last_time || (cur_time - last_time) >= 30000) {
1195             aqsize = 0;
1196             vqsize = 0;
1197             sqsize = 0;
1198             if (is->audio_st)
1199                 aqsize = is->audioq.size;
1200             if (is->video_st)
1201                 vqsize = is->videoq.size;
1202             if (is->subtitle_st)
1203                 sqsize = is->subtitleq.size;
1204             av_diff = 0;
1205             if (is->audio_st && is->video_st)
1206                 av_diff = get_audio_clock(is) - get_video_clock(is);
1207             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1208                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1209             fflush(stdout);
1210             last_time = cur_time;
1211         }
1212     }
1213 }
1214
1215 static void stream_close(VideoState *is)
1216 {
1217     VideoPicture *vp;
1218     int i;
1219     /* XXX: use a special url_shutdown call to abort parse cleanly */
1220     is->abort_request = 1;
1221     SDL_WaitThread(is->parse_tid, NULL);
1222     SDL_WaitThread(is->refresh_tid, NULL);
1223
1224     /* free all pictures */
1225     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1226         vp = &is->pictq[i];
1227 #if CONFIG_AVFILTER
1228         if (vp->picref) {
1229             avfilter_unref_buffer(vp->picref);
1230             vp->picref = NULL;
1231         }
1232 #endif
1233         if (vp->bmp) {
1234             SDL_FreeYUVOverlay(vp->bmp);
1235             vp->bmp = NULL;
1236         }
1237     }
1238     SDL_DestroyMutex(is->pictq_mutex);
1239     SDL_DestroyCond(is->pictq_cond);
1240     SDL_DestroyMutex(is->subpq_mutex);
1241     SDL_DestroyCond(is->subpq_cond);
1242 #if !CONFIG_AVFILTER
1243     if (is->img_convert_ctx)
1244         sws_freeContext(is->img_convert_ctx);
1245 #endif
1246     av_free(is);
1247 }
1248
1249 static void do_exit(void)
1250 {
1251     if (cur_stream) {
1252         stream_close(cur_stream);
1253         cur_stream = NULL;
1254     }
1255     uninit_opts();
1256 #if CONFIG_AVFILTER
1257     avfilter_uninit();
1258 #endif
1259     if (show_status)
1260         printf("\n");
1261     SDL_Quit();
1262     av_log(NULL, AV_LOG_QUIET, "");
1263     exit(0);
1264 }
1265
1266 /* allocate a picture (needs to do that in main thread to avoid
1267    potential locking problems */
1268 static void alloc_picture(void *opaque)
1269 {
1270     VideoState *is = opaque;
1271     VideoPicture *vp;
1272
1273     vp = &is->pictq[is->pictq_windex];
1274
1275     if (vp->bmp)
1276         SDL_FreeYUVOverlay(vp->bmp);
1277
1278 #if CONFIG_AVFILTER
1279     if (vp->picref)
1280         avfilter_unref_buffer(vp->picref);
1281     vp->picref = NULL;
1282
1283     vp->width   = is->out_video_filter->inputs[0]->w;
1284     vp->height  = is->out_video_filter->inputs[0]->h;
1285     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1286 #else
1287     vp->width   = is->video_st->codec->width;
1288     vp->height  = is->video_st->codec->height;
1289     vp->pix_fmt = is->video_st->codec->pix_fmt;
1290 #endif
1291
1292     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1293                                    SDL_YV12_OVERLAY,
1294                                    screen);
1295     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1296         /* SDL allocates a buffer smaller than requested if the video
1297          * overlay hardware is unable to support the requested size. */
1298         fprintf(stderr, "Error: the video system does not support an image\n"
1299                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1300                         "to reduce the image size.\n", vp->width, vp->height );
1301         do_exit();
1302     }
1303
1304     SDL_LockMutex(is->pictq_mutex);
1305     vp->allocated = 1;
1306     SDL_CondSignal(is->pictq_cond);
1307     SDL_UnlockMutex(is->pictq_mutex);
1308 }
1309
1310 /**
1311  *
1312  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1313  */
1314 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1315 {
1316     VideoPicture *vp;
1317 #if CONFIG_AVFILTER
1318     AVPicture pict_src;
1319 #else
1320     int dst_pix_fmt = PIX_FMT_YUV420P;
1321 #endif
1322     /* wait until we have space to put a new picture */
1323     SDL_LockMutex(is->pictq_mutex);
1324
1325     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1326         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1327
1328     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1329            !is->videoq.abort_request) {
1330         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1331     }
1332     SDL_UnlockMutex(is->pictq_mutex);
1333
1334     if (is->videoq.abort_request)
1335         return -1;
1336
1337     vp = &is->pictq[is->pictq_windex];
1338
1339     /* alloc or resize hardware picture buffer */
1340     if (!vp->bmp ||
1341 #if CONFIG_AVFILTER
1342         vp->width  != is->out_video_filter->inputs[0]->w ||
1343         vp->height != is->out_video_filter->inputs[0]->h) {
1344 #else
1345         vp->width != is->video_st->codec->width ||
1346         vp->height != is->video_st->codec->height) {
1347 #endif
1348         SDL_Event event;
1349
1350         vp->allocated = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         SDL_UnlockMutex(is->pictq_mutex);
1364
1365         if (is->videoq.abort_request)
1366             return -1;
1367     }
1368
1369     /* if the frame is not skipped, then display it */
1370     if (vp->bmp) {
1371         AVPicture pict;
1372 #if CONFIG_AVFILTER
1373         if(vp->picref)
1374             avfilter_unref_buffer(vp->picref);
1375         vp->picref = src_frame->opaque;
1376 #endif
1377
1378         /* get a pointer on the bitmap */
1379         SDL_LockYUVOverlay (vp->bmp);
1380
1381         memset(&pict,0,sizeof(AVPicture));
1382         pict.data[0] = vp->bmp->pixels[0];
1383         pict.data[1] = vp->bmp->pixels[2];
1384         pict.data[2] = vp->bmp->pixels[1];
1385
1386         pict.linesize[0] = vp->bmp->pitches[0];
1387         pict.linesize[1] = vp->bmp->pitches[2];
1388         pict.linesize[2] = vp->bmp->pitches[1];
1389
1390 #if CONFIG_AVFILTER
1391         pict_src.data[0] = src_frame->data[0];
1392         pict_src.data[1] = src_frame->data[1];
1393         pict_src.data[2] = src_frame->data[2];
1394
1395         pict_src.linesize[0] = src_frame->linesize[0];
1396         pict_src.linesize[1] = src_frame->linesize[1];
1397         pict_src.linesize[2] = src_frame->linesize[2];
1398
1399         //FIXME use direct rendering
1400         av_picture_copy(&pict, &pict_src,
1401                         vp->pix_fmt, vp->width, vp->height);
1402 #else
1403         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1404         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1405             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1406             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1407         if (is->img_convert_ctx == NULL) {
1408             fprintf(stderr, "Cannot initialize the conversion context\n");
1409             exit(1);
1410         }
1411         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1412                   0, vp->height, pict.data, pict.linesize);
1413 #endif
1414         /* update the bitmap content */
1415         SDL_UnlockYUVOverlay(vp->bmp);
1416
1417         vp->pts = pts;
1418         vp->pos = pos;
1419
1420         /* now we can update the picture count */
1421         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422             is->pictq_windex = 0;
1423         SDL_LockMutex(is->pictq_mutex);
1424         vp->target_clock= compute_target_time(vp->pts, is);
1425
1426         is->pictq_size++;
1427         SDL_UnlockMutex(is->pictq_mutex);
1428     }
1429     return 0;
1430 }
1431
1432 /**
1433  * compute the exact PTS for the picture if it is omitted in the stream
1434  * @param pts1 the dts of the pkt / pts of the frame
1435  */
1436 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1437 {
1438     double frame_delay, pts;
1439
1440     pts = pts1;
1441
1442     if (pts != 0) {
1443         /* update video clock with pts, if present */
1444         is->video_clock = pts;
1445     } else {
1446         pts = is->video_clock;
1447     }
1448     /* update video clock for next frame */
1449     frame_delay = av_q2d(is->video_st->codec->time_base);
1450     /* for MPEG2, the frame can be repeated, so we update the
1451        clock accordingly */
1452     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1453     is->video_clock += frame_delay;
1454
1455     return queue_picture(is, src_frame, pts, pos);
1456 }
1457
1458 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1459 {
1460     int got_picture, i;
1461
1462     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1463         return -1;
1464
1465     if (pkt->data == flush_pkt.data) {
1466         avcodec_flush_buffers(is->video_st->codec);
1467
1468         SDL_LockMutex(is->pictq_mutex);
1469         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1470         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1471             is->pictq[i].target_clock= 0;
1472         }
1473         while (is->pictq_size && !is->videoq.abort_request) {
1474             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1475         }
1476         is->video_current_pos = -1;
1477         SDL_UnlockMutex(is->pictq_mutex);
1478
1479         init_pts_correction(&is->pts_ctx);
1480         is->frame_last_pts = AV_NOPTS_VALUE;
1481         is->frame_last_delay = 0;
1482         is->frame_timer = (double)av_gettime() / 1000000.0;
1483         is->skip_frames = 1;
1484         is->skip_frames_index = 0;
1485         return 0;
1486     }
1487
1488     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1489
1490     if (got_picture) {
1491         if (decoder_reorder_pts == -1) {
1492             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1493         } else if (decoder_reorder_pts) {
1494             *pts = frame->pkt_pts;
1495         } else {
1496             *pts = frame->pkt_dts;
1497         }
1498
1499         if (*pts == AV_NOPTS_VALUE) {
1500             *pts = 0;
1501         }
1502
1503         is->skip_frames_index += 1;
1504         if(is->skip_frames_index >= is->skip_frames){
1505             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1506             return 1;
1507         }
1508
1509     }
1510     return 0;
1511 }
1512
1513 #if CONFIG_AVFILTER
1514 typedef struct {
1515     VideoState *is;
1516     AVFrame *frame;
1517     int use_dr1;
1518 } FilterPriv;
1519
1520 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1521 {
1522     AVFilterContext *ctx = codec->opaque;
1523     AVFilterBufferRef  *ref;
1524     int perms = AV_PERM_WRITE;
1525     int i, w, h, stride[4];
1526     unsigned edge;
1527     int pixel_size;
1528
1529     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1530         perms |= AV_PERM_NEG_LINESIZES;
1531
1532     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1533         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1534         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1535         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1536     }
1537     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1538
1539     w = codec->width;
1540     h = codec->height;
1541     avcodec_align_dimensions2(codec, &w, &h, stride);
1542     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1543     w += edge << 1;
1544     h += edge << 1;
1545
1546     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1547         return -1;
1548
1549     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1550     ref->video->w = codec->width;
1551     ref->video->h = codec->height;
1552     for(i = 0; i < 4; i ++) {
1553         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1554         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1555
1556         if (ref->data[i]) {
1557             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1558         }
1559         pic->data[i]     = ref->data[i];
1560         pic->linesize[i] = ref->linesize[i];
1561     }
1562     pic->opaque = ref;
1563     pic->age    = INT_MAX;
1564     pic->type   = FF_BUFFER_TYPE_USER;
1565     pic->reordered_opaque = codec->reordered_opaque;
1566     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1567     else           pic->pkt_pts = AV_NOPTS_VALUE;
1568     return 0;
1569 }
1570
1571 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1572 {
1573     memset(pic->data, 0, sizeof(pic->data));
1574     avfilter_unref_buffer(pic->opaque);
1575 }
1576
1577 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1578 {
1579     AVFilterBufferRef *ref = pic->opaque;
1580
1581     if (pic->data[0] == NULL) {
1582         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1583         return codec->get_buffer(codec, pic);
1584     }
1585
1586     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1587         (codec->pix_fmt != ref->format)) {
1588         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1589         return -1;
1590     }
1591
1592     pic->reordered_opaque = codec->reordered_opaque;
1593     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1594     else           pic->pkt_pts = AV_NOPTS_VALUE;
1595     return 0;
1596 }
1597
1598 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1599 {
1600     FilterPriv *priv = ctx->priv;
1601     AVCodecContext *codec;
1602     if(!opaque) return -1;
1603
1604     priv->is = opaque;
1605     codec    = priv->is->video_st->codec;
1606     codec->opaque = ctx;
1607     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1608         priv->use_dr1 = 1;
1609         codec->get_buffer     = input_get_buffer;
1610         codec->release_buffer = input_release_buffer;
1611         codec->reget_buffer   = input_reget_buffer;
1612         codec->thread_safe_callbacks = 1;
1613     }
1614
1615     priv->frame = avcodec_alloc_frame();
1616
1617     return 0;
1618 }
1619
1620 static void input_uninit(AVFilterContext *ctx)
1621 {
1622     FilterPriv *priv = ctx->priv;
1623     av_free(priv->frame);
1624 }
1625
1626 static int input_request_frame(AVFilterLink *link)
1627 {
1628     FilterPriv *priv = link->src->priv;
1629     AVFilterBufferRef *picref;
1630     int64_t pts = 0;
1631     AVPacket pkt;
1632     int ret;
1633
1634     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1635         av_free_packet(&pkt);
1636     if (ret < 0)
1637         return -1;
1638
1639     if(priv->use_dr1) {
1640         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1641     } else {
1642         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1643         av_image_copy(picref->data, picref->linesize,
1644                       priv->frame->data, priv->frame->linesize,
1645                       picref->format, link->w, link->h);
1646     }
1647     av_free_packet(&pkt);
1648
1649     picref->pts = pts;
1650     picref->pos = pkt.pos;
1651     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1652     avfilter_start_frame(link, picref);
1653     avfilter_draw_slice(link, 0, link->h, 1);
1654     avfilter_end_frame(link);
1655
1656     return 0;
1657 }
1658
1659 static int input_query_formats(AVFilterContext *ctx)
1660 {
1661     FilterPriv *priv = ctx->priv;
1662     enum PixelFormat pix_fmts[] = {
1663         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1664     };
1665
1666     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1667     return 0;
1668 }
1669
1670 static int input_config_props(AVFilterLink *link)
1671 {
1672     FilterPriv *priv  = link->src->priv;
1673     AVCodecContext *c = priv->is->video_st->codec;
1674
1675     link->w = c->width;
1676     link->h = c->height;
1677     link->time_base = priv->is->video_st->time_base;
1678
1679     return 0;
1680 }
1681
1682 static AVFilter input_filter =
1683 {
1684     .name      = "ffplay_input",
1685
1686     .priv_size = sizeof(FilterPriv),
1687
1688     .init      = input_init,
1689     .uninit    = input_uninit,
1690
1691     .query_formats = input_query_formats,
1692
1693     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1694     .outputs   = (AVFilterPad[]) {{ .name = "default",
1695                                     .type = AVMEDIA_TYPE_VIDEO,
1696                                     .request_frame = input_request_frame,
1697                                     .config_props  = input_config_props, },
1698                                   { .name = NULL }},
1699 };
1700
1701 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1702 {
1703     char sws_flags_str[128];
1704     int ret;
1705     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1706     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1707     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1708     graph->scale_sws_opts = av_strdup(sws_flags_str);
1709
1710     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1711                                             NULL, is, graph)) < 0)
1712         return ret;
1713     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1714                                             NULL, &ffsink_ctx, graph)) < 0)
1715         return ret;
1716
1717     if(vfilters) {
1718         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1719         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1720
1721         outputs->name    = av_strdup("in");
1722         outputs->filter_ctx = filt_src;
1723         outputs->pad_idx = 0;
1724         outputs->next    = NULL;
1725
1726         inputs->name    = av_strdup("out");
1727         inputs->filter_ctx = filt_out;
1728         inputs->pad_idx = 0;
1729         inputs->next    = NULL;
1730
1731         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1732             return ret;
1733         av_freep(&vfilters);
1734     } else {
1735         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1736             return ret;
1737     }
1738
1739     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1740         return ret;
1741
1742     is->out_video_filter = filt_out;
1743
1744     return ret;
1745 }
1746
1747 #endif  /* CONFIG_AVFILTER */
1748
1749 static int video_thread(void *arg)
1750 {
1751     VideoState *is = arg;
1752     AVFrame *frame= avcodec_alloc_frame();
1753     int64_t pts_int;
1754     double pts;
1755     int ret;
1756
1757 #if CONFIG_AVFILTER
1758     AVFilterGraph *graph = avfilter_graph_alloc();
1759     AVFilterContext *filt_out = NULL;
1760     int64_t pos;
1761
1762     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1763         goto the_end;
1764     filt_out = is->out_video_filter;
1765 #endif
1766
1767     for(;;) {
1768 #if !CONFIG_AVFILTER
1769         AVPacket pkt;
1770 #else
1771         AVFilterBufferRef *picref;
1772         AVRational tb;
1773 #endif
1774         while (is->paused && !is->videoq.abort_request)
1775             SDL_Delay(10);
1776 #if CONFIG_AVFILTER
1777         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1778         if (picref) {
1779             pts_int = picref->pts;
1780             pos     = picref->pos;
1781             frame->opaque = picref;
1782         }
1783
1784         if (av_cmp_q(tb, is->video_st->time_base)) {
1785             av_unused int64_t pts1 = pts_int;
1786             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1787             av_dlog(NULL, "video_thread(): "
1788                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1789                     tb.num, tb.den, pts1,
1790                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1791         }
1792 #else
1793         ret = get_video_frame(is, frame, &pts_int, &pkt);
1794 #endif
1795
1796         if (ret < 0) goto the_end;
1797
1798         if (!ret)
1799             continue;
1800
1801         pts = pts_int*av_q2d(is->video_st->time_base);
1802
1803 #if CONFIG_AVFILTER
1804         ret = output_picture2(is, frame, pts, pos);
1805 #else
1806         ret = output_picture2(is, frame, pts,  pkt.pos);
1807         av_free_packet(&pkt);
1808 #endif
1809         if (ret < 0)
1810             goto the_end;
1811
1812         if (step)
1813             if (cur_stream)
1814                 stream_pause(cur_stream);
1815     }
1816  the_end:
1817 #if CONFIG_AVFILTER
1818     avfilter_graph_free(&graph);
1819 #endif
1820     av_free(frame);
1821     return 0;
1822 }
1823
1824 static int subtitle_thread(void *arg)
1825 {
1826     VideoState *is = arg;
1827     SubPicture *sp;
1828     AVPacket pkt1, *pkt = &pkt1;
1829     int got_subtitle;
1830     double pts;
1831     int i, j;
1832     int r, g, b, y, u, v, a;
1833
1834     for(;;) {
1835         while (is->paused && !is->subtitleq.abort_request) {
1836             SDL_Delay(10);
1837         }
1838         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1839             break;
1840
1841         if(pkt->data == flush_pkt.data){
1842             avcodec_flush_buffers(is->subtitle_st->codec);
1843             continue;
1844         }
1845         SDL_LockMutex(is->subpq_mutex);
1846         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1847                !is->subtitleq.abort_request) {
1848             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1849         }
1850         SDL_UnlockMutex(is->subpq_mutex);
1851
1852         if (is->subtitleq.abort_request)
1853             return 0;
1854
1855         sp = &is->subpq[is->subpq_windex];
1856
1857        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1858            this packet, if any */
1859         pts = 0;
1860         if (pkt->pts != AV_NOPTS_VALUE)
1861             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1862
1863         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1864                                  &got_subtitle, pkt);
1865
1866         if (got_subtitle && sp->sub.format == 0) {
1867             sp->pts = pts;
1868
1869             for (i = 0; i < sp->sub.num_rects; i++)
1870             {
1871                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1872                 {
1873                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1874                     y = RGB_TO_Y_CCIR(r, g, b);
1875                     u = RGB_TO_U_CCIR(r, g, b, 0);
1876                     v = RGB_TO_V_CCIR(r, g, b, 0);
1877                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1878                 }
1879             }
1880
1881             /* now we can update the picture count */
1882             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1883                 is->subpq_windex = 0;
1884             SDL_LockMutex(is->subpq_mutex);
1885             is->subpq_size++;
1886             SDL_UnlockMutex(is->subpq_mutex);
1887         }
1888         av_free_packet(pkt);
1889     }
1890     return 0;
1891 }
1892
1893 /* copy samples for viewing in editor window */
1894 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1895 {
1896     int size, len;
1897
1898     size = samples_size / sizeof(short);
1899     while (size > 0) {
1900         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1901         if (len > size)
1902             len = size;
1903         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1904         samples += len;
1905         is->sample_array_index += len;
1906         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1907             is->sample_array_index = 0;
1908         size -= len;
1909     }
1910 }
1911
1912 /* return the new audio buffer size (samples can be added or deleted
1913    to get better sync if video or external master clock) */
1914 static int synchronize_audio(VideoState *is, short *samples,
1915                              int samples_size1, double pts)
1916 {
1917     int n, samples_size;
1918     double ref_clock;
1919
1920     n = 2 * is->audio_st->codec->channels;
1921     samples_size = samples_size1;
1922
1923     /* if not master, then we try to remove or add samples to correct the clock */
1924     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1925          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1926         double diff, avg_diff;
1927         int wanted_size, min_size, max_size, nb_samples;
1928
1929         ref_clock = get_master_clock(is);
1930         diff = get_audio_clock(is) - ref_clock;
1931
1932         if (diff < AV_NOSYNC_THRESHOLD) {
1933             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1934             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1935                 /* not enough measures to have a correct estimate */
1936                 is->audio_diff_avg_count++;
1937             } else {
1938                 /* estimate the A-V difference */
1939                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1940
1941                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1942                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1943                     nb_samples = samples_size / n;
1944
1945                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1946                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1947                     if (wanted_size < min_size)
1948                         wanted_size = min_size;
1949                     else if (wanted_size > max_size)
1950                         wanted_size = max_size;
1951
1952                     /* add or remove samples to correction the synchro */
1953                     if (wanted_size < samples_size) {
1954                         /* remove samples */
1955                         samples_size = wanted_size;
1956                     } else if (wanted_size > samples_size) {
1957                         uint8_t *samples_end, *q;
1958                         int nb;
1959
1960                         /* add samples */
1961                         nb = (samples_size - wanted_size);
1962                         samples_end = (uint8_t *)samples + samples_size - n;
1963                         q = samples_end + n;
1964                         while (nb > 0) {
1965                             memcpy(q, samples_end, n);
1966                             q += n;
1967                             nb -= n;
1968                         }
1969                         samples_size = wanted_size;
1970                     }
1971                 }
1972                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1973                         diff, avg_diff, samples_size - samples_size1,
1974                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1975             }
1976         } else {
1977             /* too big difference : may be initial PTS errors, so
1978                reset A-V filter */
1979             is->audio_diff_avg_count = 0;
1980             is->audio_diff_cum = 0;
1981         }
1982     }
1983
1984     return samples_size;
1985 }
1986
1987 /* decode one audio frame and returns its uncompressed size */
1988 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1989 {
1990     AVPacket *pkt_temp = &is->audio_pkt_temp;
1991     AVPacket *pkt = &is->audio_pkt;
1992     AVCodecContext *dec= is->audio_st->codec;
1993     int n, len1, data_size;
1994     double pts;
1995
1996     for(;;) {
1997         /* NOTE: the audio packet can contain several frames */
1998         while (pkt_temp->size > 0) {
1999             data_size = sizeof(is->audio_buf1);
2000             len1 = avcodec_decode_audio3(dec,
2001                                         (int16_t *)is->audio_buf1, &data_size,
2002                                         pkt_temp);
2003             if (len1 < 0) {
2004                 /* if error, we skip the frame */
2005                 pkt_temp->size = 0;
2006                 break;
2007             }
2008
2009             pkt_temp->data += len1;
2010             pkt_temp->size -= len1;
2011             if (data_size <= 0)
2012                 continue;
2013
2014             if (dec->sample_fmt != is->audio_src_fmt) {
2015                 if (is->reformat_ctx)
2016                     av_audio_convert_free(is->reformat_ctx);
2017                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2018                                                          dec->sample_fmt, 1, NULL, 0);
2019                 if (!is->reformat_ctx) {
2020                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2021                         av_get_sample_fmt_name(dec->sample_fmt),
2022                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2023                         break;
2024                 }
2025                 is->audio_src_fmt= dec->sample_fmt;
2026             }
2027
2028             if (is->reformat_ctx) {
2029                 const void *ibuf[6]= {is->audio_buf1};
2030                 void *obuf[6]= {is->audio_buf2};
2031                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2032                 int ostride[6]= {2};
2033                 int len= data_size/istride[0];
2034                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2035                     printf("av_audio_convert() failed\n");
2036                     break;
2037                 }
2038                 is->audio_buf= is->audio_buf2;
2039                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2040                           remove this legacy cruft */
2041                 data_size= len*2;
2042             }else{
2043                 is->audio_buf= is->audio_buf1;
2044             }
2045
2046             /* if no pts, then compute it */
2047             pts = is->audio_clock;
2048             *pts_ptr = pts;
2049             n = 2 * dec->channels;
2050             is->audio_clock += (double)data_size /
2051                 (double)(n * dec->sample_rate);
2052 #ifdef DEBUG
2053             {
2054                 static double last_clock;
2055                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2056                        is->audio_clock - last_clock,
2057                        is->audio_clock, pts);
2058                 last_clock = is->audio_clock;
2059             }
2060 #endif
2061             return data_size;
2062         }
2063
2064         /* free the current packet */
2065         if (pkt->data)
2066             av_free_packet(pkt);
2067
2068         if (is->paused || is->audioq.abort_request) {
2069             return -1;
2070         }
2071
2072         /* read next packet */
2073         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2074             return -1;
2075         if(pkt->data == flush_pkt.data){
2076             avcodec_flush_buffers(dec);
2077             continue;
2078         }
2079
2080         pkt_temp->data = pkt->data;
2081         pkt_temp->size = pkt->size;
2082
2083         /* if update the audio clock with the pts */
2084         if (pkt->pts != AV_NOPTS_VALUE) {
2085             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2086         }
2087     }
2088 }
2089
2090 /* prepare a new audio buffer */
2091 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2092 {
2093     VideoState *is = opaque;
2094     int audio_size, len1;
2095     double pts;
2096
2097     audio_callback_time = av_gettime();
2098
2099     while (len > 0) {
2100         if (is->audio_buf_index >= is->audio_buf_size) {
2101            audio_size = audio_decode_frame(is, &pts);
2102            if (audio_size < 0) {
2103                 /* if error, just output silence */
2104                is->audio_buf = is->audio_buf1;
2105                is->audio_buf_size = 1024;
2106                memset(is->audio_buf, 0, is->audio_buf_size);
2107            } else {
2108                if (is->show_audio)
2109                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2110                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2111                                               pts);
2112                is->audio_buf_size = audio_size;
2113            }
2114            is->audio_buf_index = 0;
2115         }
2116         len1 = is->audio_buf_size - is->audio_buf_index;
2117         if (len1 > len)
2118             len1 = len;
2119         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2120         len -= len1;
2121         stream += len1;
2122         is->audio_buf_index += len1;
2123     }
2124 }
2125
2126 /* open a given stream. Return 0 if OK */
2127 static int stream_component_open(VideoState *is, int stream_index)
2128 {
2129     AVFormatContext *ic = is->ic;
2130     AVCodecContext *avctx;
2131     AVCodec *codec;
2132     SDL_AudioSpec wanted_spec, spec;
2133     AVDictionary *opts;
2134     AVDictionaryEntry *t = NULL;
2135
2136     if (stream_index < 0 || stream_index >= ic->nb_streams)
2137         return -1;
2138     avctx = ic->streams[stream_index]->codec;
2139
2140     opts = filter_codec_opts(codec_opts, avctx->codec_id, 0);
2141
2142     /* prepare audio output */
2143     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2144         if (avctx->channels > 0) {
2145             avctx->request_channels = FFMIN(2, avctx->channels);
2146         } else {
2147             avctx->request_channels = 2;
2148         }
2149     }
2150
2151     codec = avcodec_find_decoder(avctx->codec_id);
2152     avctx->debug_mv = debug_mv;
2153     avctx->debug = debug;
2154     avctx->workaround_bugs = workaround_bugs;
2155     avctx->lowres = lowres;
2156     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2157     avctx->idct_algo= idct;
2158     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2159     avctx->skip_frame= skip_frame;
2160     avctx->skip_idct= skip_idct;
2161     avctx->skip_loop_filter= skip_loop_filter;
2162     avctx->error_recognition= error_recognition;
2163     avctx->error_concealment= error_concealment;
2164     avctx->thread_count= thread_count;
2165
2166     if (!codec ||
2167         avcodec_open2(avctx, codec, &opts) < 0)
2168         return -1;
2169     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2170         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2171         return AVERROR_OPTION_NOT_FOUND;
2172     }
2173
2174     /* prepare audio output */
2175     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2176         wanted_spec.freq = avctx->sample_rate;
2177         wanted_spec.format = AUDIO_S16SYS;
2178         wanted_spec.channels = avctx->channels;
2179         wanted_spec.silence = 0;
2180         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2181         wanted_spec.callback = sdl_audio_callback;
2182         wanted_spec.userdata = is;
2183         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2184             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2185             return -1;
2186         }
2187         is->audio_hw_buf_size = spec.size;
2188         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2189     }
2190
2191     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2192     switch(avctx->codec_type) {
2193     case AVMEDIA_TYPE_AUDIO:
2194         is->audio_stream = stream_index;
2195         is->audio_st = ic->streams[stream_index];
2196         is->audio_buf_size = 0;
2197         is->audio_buf_index = 0;
2198
2199         /* init averaging filter */
2200         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2201         is->audio_diff_avg_count = 0;
2202         /* since we do not have a precise anough audio fifo fullness,
2203            we correct audio sync only if larger than this threshold */
2204         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2205
2206         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2207         packet_queue_init(&is->audioq);
2208         SDL_PauseAudio(0);
2209         break;
2210     case AVMEDIA_TYPE_VIDEO:
2211         is->video_stream = stream_index;
2212         is->video_st = ic->streams[stream_index];
2213
2214         packet_queue_init(&is->videoq);
2215         is->video_tid = SDL_CreateThread(video_thread, is);
2216         break;
2217     case AVMEDIA_TYPE_SUBTITLE:
2218         is->subtitle_stream = stream_index;
2219         is->subtitle_st = ic->streams[stream_index];
2220         packet_queue_init(&is->subtitleq);
2221
2222         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2223         break;
2224     default:
2225         break;
2226     }
2227     return 0;
2228 }
2229
2230 static void stream_component_close(VideoState *is, int stream_index)
2231 {
2232     AVFormatContext *ic = is->ic;
2233     AVCodecContext *avctx;
2234
2235     if (stream_index < 0 || stream_index >= ic->nb_streams)
2236         return;
2237     avctx = ic->streams[stream_index]->codec;
2238
2239     switch(avctx->codec_type) {
2240     case AVMEDIA_TYPE_AUDIO:
2241         packet_queue_abort(&is->audioq);
2242
2243         SDL_CloseAudio();
2244
2245         packet_queue_end(&is->audioq);
2246         if (is->reformat_ctx)
2247             av_audio_convert_free(is->reformat_ctx);
2248         is->reformat_ctx = NULL;
2249         break;
2250     case AVMEDIA_TYPE_VIDEO:
2251         packet_queue_abort(&is->videoq);
2252
2253         /* note: we also signal this mutex to make sure we deblock the
2254            video thread in all cases */
2255         SDL_LockMutex(is->pictq_mutex);
2256         SDL_CondSignal(is->pictq_cond);
2257         SDL_UnlockMutex(is->pictq_mutex);
2258
2259         SDL_WaitThread(is->video_tid, NULL);
2260
2261         packet_queue_end(&is->videoq);
2262         break;
2263     case AVMEDIA_TYPE_SUBTITLE:
2264         packet_queue_abort(&is->subtitleq);
2265
2266         /* note: we also signal this mutex to make sure we deblock the
2267            video thread in all cases */
2268         SDL_LockMutex(is->subpq_mutex);
2269         is->subtitle_stream_changed = 1;
2270
2271         SDL_CondSignal(is->subpq_cond);
2272         SDL_UnlockMutex(is->subpq_mutex);
2273
2274         SDL_WaitThread(is->subtitle_tid, NULL);
2275
2276         packet_queue_end(&is->subtitleq);
2277         break;
2278     default:
2279         break;
2280     }
2281
2282     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2283     avcodec_close(avctx);
2284     switch(avctx->codec_type) {
2285     case AVMEDIA_TYPE_AUDIO:
2286         is->audio_st = NULL;
2287         is->audio_stream = -1;
2288         break;
2289     case AVMEDIA_TYPE_VIDEO:
2290         is->video_st = NULL;
2291         is->video_stream = -1;
2292         break;
2293     case AVMEDIA_TYPE_SUBTITLE:
2294         is->subtitle_st = NULL;
2295         is->subtitle_stream = -1;
2296         break;
2297     default:
2298         break;
2299     }
2300 }
2301
2302 /* since we have only one decoding thread, we can use a global
2303    variable instead of a thread local variable */
2304 static VideoState *global_video_state;
2305
2306 static int decode_interrupt_cb(void)
2307 {
2308     return (global_video_state && global_video_state->abort_request);
2309 }
2310
2311 /* this thread gets the stream from the disk or the network */
2312 static int decode_thread(void *arg)
2313 {
2314     VideoState *is = arg;
2315     AVFormatContext *ic = NULL;
2316     int err, i, ret;
2317     int st_index[AVMEDIA_TYPE_NB];
2318     AVPacket pkt1, *pkt = &pkt1;
2319     int eof=0;
2320     int pkt_in_play_range = 0;
2321     AVDictionaryEntry *t;
2322     AVDictionary **opts;
2323     int orig_nb_streams;
2324
2325     memset(st_index, -1, sizeof(st_index));
2326     is->video_stream = -1;
2327     is->audio_stream = -1;
2328     is->subtitle_stream = -1;
2329
2330     global_video_state = is;
2331     avio_set_interrupt_cb(decode_interrupt_cb);
2332
2333     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2334     if (err < 0) {
2335         print_error(is->filename, err);
2336         ret = -1;
2337         goto fail;
2338     }
2339     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2340         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2341         ret = AVERROR_OPTION_NOT_FOUND;
2342         goto fail;
2343     }
2344     is->ic = ic;
2345
2346     if(genpts)
2347         ic->flags |= AVFMT_FLAG_GENPTS;
2348
2349     opts = setup_find_stream_info_opts(ic);
2350     orig_nb_streams = ic->nb_streams;
2351
2352     err = avformat_find_stream_info(ic, opts);
2353     if (err < 0) {
2354         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2355         ret = -1;
2356         goto fail;
2357     }
2358     for (i = 0; i < orig_nb_streams; i++)
2359         av_dict_free(&opts[i]);
2360     av_freep(&opts);
2361
2362     if(ic->pb)
2363         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2364
2365     if(seek_by_bytes<0)
2366         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2367
2368     /* if seeking requested, we execute it */
2369     if (start_time != AV_NOPTS_VALUE) {
2370         int64_t timestamp;
2371
2372         timestamp = start_time;
2373         /* add the stream start time */
2374         if (ic->start_time != AV_NOPTS_VALUE)
2375             timestamp += ic->start_time;
2376         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2377         if (ret < 0) {
2378             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2379                     is->filename, (double)timestamp / AV_TIME_BASE);
2380         }
2381     }
2382
2383     for (i = 0; i < ic->nb_streams; i++)
2384         ic->streams[i]->discard = AVDISCARD_ALL;
2385     if (!video_disable)
2386         st_index[AVMEDIA_TYPE_VIDEO] =
2387             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2388                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2389     if (!audio_disable)
2390         st_index[AVMEDIA_TYPE_AUDIO] =
2391             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2392                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2393                                 st_index[AVMEDIA_TYPE_VIDEO],
2394                                 NULL, 0);
2395     if (!video_disable)
2396         st_index[AVMEDIA_TYPE_SUBTITLE] =
2397             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2398                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2399                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2400                                  st_index[AVMEDIA_TYPE_AUDIO] :
2401                                  st_index[AVMEDIA_TYPE_VIDEO]),
2402                                 NULL, 0);
2403     if (show_status) {
2404         av_dump_format(ic, 0, is->filename, 0);
2405     }
2406
2407     /* open the streams */
2408     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2409         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2410     }
2411
2412     ret=-1;
2413     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2414         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2415     }
2416     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2417     if(ret<0) {
2418         if (!display_disable)
2419             is->show_audio = 2;
2420     }
2421
2422     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2423         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2424     }
2425
2426     if (is->video_stream < 0 && is->audio_stream < 0) {
2427         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2428         ret = -1;
2429         goto fail;
2430     }
2431
2432     for(;;) {
2433         if (is->abort_request)
2434             break;
2435         if (is->paused != is->last_paused) {
2436             is->last_paused = is->paused;
2437             if (is->paused)
2438                 is->read_pause_return= av_read_pause(ic);
2439             else
2440                 av_read_play(ic);
2441         }
2442 #if CONFIG_RTSP_DEMUXER
2443         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2444             /* wait 10 ms to avoid trying to get another packet */
2445             /* XXX: horrible */
2446             SDL_Delay(10);
2447             continue;
2448         }
2449 #endif
2450         if (is->seek_req) {
2451             int64_t seek_target= is->seek_pos;
2452             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2453             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2454 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2455 //      of the seek_pos/seek_rel variables
2456
2457             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2458             if (ret < 0) {
2459                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2460             }else{
2461                 if (is->audio_stream >= 0) {
2462                     packet_queue_flush(&is->audioq);
2463                     packet_queue_put(&is->audioq, &flush_pkt);
2464                 }
2465                 if (is->subtitle_stream >= 0) {
2466                     packet_queue_flush(&is->subtitleq);
2467                     packet_queue_put(&is->subtitleq, &flush_pkt);
2468                 }
2469                 if (is->video_stream >= 0) {
2470                     packet_queue_flush(&is->videoq);
2471                     packet_queue_put(&is->videoq, &flush_pkt);
2472                 }
2473             }
2474             is->seek_req = 0;
2475             eof= 0;
2476         }
2477
2478         /* if the queue are full, no need to read more */
2479         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2480             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2481                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2482                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2483             /* wait 10 ms */
2484             SDL_Delay(10);
2485             continue;
2486         }
2487         if(eof) {
2488             if(is->video_stream >= 0){
2489                 av_init_packet(pkt);
2490                 pkt->data=NULL;
2491                 pkt->size=0;
2492                 pkt->stream_index= is->video_stream;
2493                 packet_queue_put(&is->videoq, pkt);
2494             }
2495             SDL_Delay(10);
2496             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2497                 if(loop!=1 && (!loop || --loop)){
2498                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2499                 }else if(autoexit){
2500                     ret=AVERROR_EOF;
2501                     goto fail;
2502                 }
2503             }
2504             continue;
2505         }
2506         ret = av_read_frame(ic, pkt);
2507         if (ret < 0) {
2508             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2509                 eof=1;
2510             if (ic->pb && ic->pb->error)
2511                 break;
2512             SDL_Delay(100); /* wait for user event */
2513             continue;
2514         }
2515         /* check if packet is in play range specified by user, then queue, otherwise discard */
2516         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2517                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2518                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2519                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2520                 <= ((double)duration/1000000);
2521         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2522             packet_queue_put(&is->audioq, pkt);
2523         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2524             packet_queue_put(&is->videoq, pkt);
2525         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2526             packet_queue_put(&is->subtitleq, pkt);
2527         } else {
2528             av_free_packet(pkt);
2529         }
2530     }
2531     /* wait until the end */
2532     while (!is->abort_request) {
2533         SDL_Delay(100);
2534     }
2535
2536     ret = 0;
2537  fail:
2538     /* disable interrupting */
2539     global_video_state = NULL;
2540
2541     /* close each stream */
2542     if (is->audio_stream >= 0)
2543         stream_component_close(is, is->audio_stream);
2544     if (is->video_stream >= 0)
2545         stream_component_close(is, is->video_stream);
2546     if (is->subtitle_stream >= 0)
2547         stream_component_close(is, is->subtitle_stream);
2548     if (is->ic) {
2549         av_close_input_file(is->ic);
2550         is->ic = NULL; /* safety */
2551     }
2552     avio_set_interrupt_cb(NULL);
2553
2554     if (ret != 0) {
2555         SDL_Event event;
2556
2557         event.type = FF_QUIT_EVENT;
2558         event.user.data1 = is;
2559         SDL_PushEvent(&event);
2560     }
2561     return 0;
2562 }
2563
2564 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2565 {
2566     VideoState *is;
2567
2568     is = av_mallocz(sizeof(VideoState));
2569     if (!is)
2570         return NULL;
2571     av_strlcpy(is->filename, filename, sizeof(is->filename));
2572     is->iformat = iformat;
2573     is->ytop = 0;
2574     is->xleft = 0;
2575
2576     /* start video display */
2577     is->pictq_mutex = SDL_CreateMutex();
2578     is->pictq_cond = SDL_CreateCond();
2579
2580     is->subpq_mutex = SDL_CreateMutex();
2581     is->subpq_cond = SDL_CreateCond();
2582
2583     is->av_sync_type = av_sync_type;
2584     is->parse_tid = SDL_CreateThread(decode_thread, is);
2585     if (!is->parse_tid) {
2586         av_free(is);
2587         return NULL;
2588     }
2589     return is;
2590 }
2591
2592 static void stream_cycle_channel(VideoState *is, int codec_type)
2593 {
2594     AVFormatContext *ic = is->ic;
2595     int start_index, stream_index;
2596     AVStream *st;
2597
2598     if (codec_type == AVMEDIA_TYPE_VIDEO)
2599         start_index = is->video_stream;
2600     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2601         start_index = is->audio_stream;
2602     else
2603         start_index = is->subtitle_stream;
2604     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2605         return;
2606     stream_index = start_index;
2607     for(;;) {
2608         if (++stream_index >= is->ic->nb_streams)
2609         {
2610             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2611             {
2612                 stream_index = -1;
2613                 goto the_end;
2614             } else
2615                 stream_index = 0;
2616         }
2617         if (stream_index == start_index)
2618             return;
2619         st = ic->streams[stream_index];
2620         if (st->codec->codec_type == codec_type) {
2621             /* check that parameters are OK */
2622             switch(codec_type) {
2623             case AVMEDIA_TYPE_AUDIO:
2624                 if (st->codec->sample_rate != 0 &&
2625                     st->codec->channels != 0)
2626                     goto the_end;
2627                 break;
2628             case AVMEDIA_TYPE_VIDEO:
2629             case AVMEDIA_TYPE_SUBTITLE:
2630                 goto the_end;
2631             default:
2632                 break;
2633             }
2634         }
2635     }
2636  the_end:
2637     stream_component_close(is, start_index);
2638     stream_component_open(is, stream_index);
2639 }
2640
2641
2642 static void toggle_full_screen(void)
2643 {
2644     is_full_screen = !is_full_screen;
2645     video_open(cur_stream);
2646 }
2647
2648 static void toggle_pause(void)
2649 {
2650     if (cur_stream)
2651         stream_pause(cur_stream);
2652     step = 0;
2653 }
2654
2655 static void step_to_next_frame(void)
2656 {
2657     if (cur_stream) {
2658         /* if the stream is paused unpause it, then step */
2659         if (cur_stream->paused)
2660             stream_pause(cur_stream);
2661     }
2662     step = 1;
2663 }
2664
2665 static void toggle_audio_display(void)
2666 {
2667     if (cur_stream) {
2668         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2669         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2670         fill_rectangle(screen,
2671                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2672                     bgcolor);
2673         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2674     }
2675 }
2676
2677 /* handle an event sent by the GUI */
2678 static void event_loop(void)
2679 {
2680     SDL_Event event;
2681     double incr, pos, frac;
2682
2683     for(;;) {
2684         double x;
2685         SDL_WaitEvent(&event);
2686         switch(event.type) {
2687         case SDL_KEYDOWN:
2688             if (exit_on_keydown) {
2689                 do_exit();
2690                 break;
2691             }
2692             switch(event.key.keysym.sym) {
2693             case SDLK_ESCAPE:
2694             case SDLK_q:
2695                 do_exit();
2696                 break;
2697             case SDLK_f:
2698                 toggle_full_screen();
2699                 break;
2700             case SDLK_p:
2701             case SDLK_SPACE:
2702                 toggle_pause();
2703                 break;
2704             case SDLK_s: //S: Step to next frame
2705                 step_to_next_frame();
2706                 break;
2707             case SDLK_a:
2708                 if (cur_stream)
2709                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2710                 break;
2711             case SDLK_v:
2712                 if (cur_stream)
2713                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2714                 break;
2715             case SDLK_t:
2716                 if (cur_stream)
2717                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2718                 break;
2719             case SDLK_w:
2720                 toggle_audio_display();
2721                 break;
2722             case SDLK_LEFT:
2723                 incr = -10.0;
2724                 goto do_seek;
2725             case SDLK_RIGHT:
2726                 incr = 10.0;
2727                 goto do_seek;
2728             case SDLK_UP:
2729                 incr = 60.0;
2730                 goto do_seek;
2731             case SDLK_DOWN:
2732                 incr = -60.0;
2733             do_seek:
2734                 if (cur_stream) {
2735                     if (seek_by_bytes) {
2736                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2737                             pos= cur_stream->video_current_pos;
2738                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2739                             pos= cur_stream->audio_pkt.pos;
2740                         }else
2741                             pos = avio_tell(cur_stream->ic->pb);
2742                         if (cur_stream->ic->bit_rate)
2743                             incr *= cur_stream->ic->bit_rate / 8.0;
2744                         else
2745                             incr *= 180000.0;
2746                         pos += incr;
2747                         stream_seek(cur_stream, pos, incr, 1);
2748                     } else {
2749                         pos = get_master_clock(cur_stream);
2750                         pos += incr;
2751                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2752                     }
2753                 }
2754                 break;
2755             default:
2756                 break;
2757             }
2758             break;
2759         case SDL_MOUSEBUTTONDOWN:
2760             if (exit_on_mousedown) {
2761                 do_exit();
2762                 break;
2763             }
2764         case SDL_MOUSEMOTION:
2765             if(event.type ==SDL_MOUSEBUTTONDOWN){
2766                 x= event.button.x;
2767             }else{
2768                 if(event.motion.state != SDL_PRESSED)
2769                     break;
2770                 x= event.motion.x;
2771             }
2772             if (cur_stream) {
2773                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2774                     uint64_t size=  avio_size(cur_stream->ic->pb);
2775                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2776                 }else{
2777                     int64_t ts;
2778                     int ns, hh, mm, ss;
2779                     int tns, thh, tmm, tss;
2780                     tns = cur_stream->ic->duration/1000000LL;
2781                     thh = tns/3600;
2782                     tmm = (tns%3600)/60;
2783                     tss = (tns%60);
2784                     frac = x/cur_stream->width;
2785                     ns = frac*tns;
2786                     hh = ns/3600;
2787                     mm = (ns%3600)/60;
2788                     ss = (ns%60);
2789                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2790                             hh, mm, ss, thh, tmm, tss);
2791                     ts = frac*cur_stream->ic->duration;
2792                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2793                         ts += cur_stream->ic->start_time;
2794                     stream_seek(cur_stream, ts, 0, 0);
2795                 }
2796             }
2797             break;
2798         case SDL_VIDEORESIZE:
2799             if (cur_stream) {
2800                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2801                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2802                 screen_width = cur_stream->width = event.resize.w;
2803                 screen_height= cur_stream->height= event.resize.h;
2804             }
2805             break;
2806         case SDL_QUIT:
2807         case FF_QUIT_EVENT:
2808             do_exit();
2809             break;
2810         case FF_ALLOC_EVENT:
2811             video_open(event.user.data1);
2812             alloc_picture(event.user.data1);
2813             break;
2814         case FF_REFRESH_EVENT:
2815             video_refresh_timer(event.user.data1);
2816             cur_stream->refresh=0;
2817             break;
2818         default:
2819             break;
2820         }
2821     }
2822 }
2823
2824 static int opt_frame_size(const char *opt, const char *arg)
2825 {
2826     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2827         fprintf(stderr, "Incorrect frame size\n");
2828         return AVERROR(EINVAL);
2829     }
2830     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2831         fprintf(stderr, "Frame size must be a multiple of 2\n");
2832         return AVERROR(EINVAL);
2833     }
2834     return 0;
2835 }
2836
2837 static int opt_width(const char *opt, const char *arg)
2838 {
2839     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2840     return 0;
2841 }
2842
2843 static int opt_height(const char *opt, const char *arg)
2844 {
2845     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2846     return 0;
2847 }
2848
2849 static int opt_format(const char *opt, const char *arg)
2850 {
2851     file_iformat = av_find_input_format(arg);
2852     if (!file_iformat) {
2853         fprintf(stderr, "Unknown input format: %s\n", arg);
2854         return AVERROR(EINVAL);
2855     }
2856     return 0;
2857 }
2858
2859 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2860 {
2861     frame_pix_fmt = av_get_pix_fmt(arg);
2862     return 0;
2863 }
2864
2865 static int opt_sync(const char *opt, const char *arg)
2866 {
2867     if (!strcmp(arg, "audio"))
2868         av_sync_type = AV_SYNC_AUDIO_MASTER;
2869     else if (!strcmp(arg, "video"))
2870         av_sync_type = AV_SYNC_VIDEO_MASTER;
2871     else if (!strcmp(arg, "ext"))
2872         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2873     else {
2874         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2875         exit(1);
2876     }
2877     return 0;
2878 }
2879
2880 static int opt_seek(const char *opt, const char *arg)
2881 {
2882     start_time = parse_time_or_die(opt, arg, 1);
2883     return 0;
2884 }
2885
2886 static int opt_duration(const char *opt, const char *arg)
2887 {
2888     duration = parse_time_or_die(opt, arg, 1);
2889     return 0;
2890 }
2891
2892 static int opt_debug(const char *opt, const char *arg)
2893 {
2894     av_log_set_level(99);
2895     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2896     return 0;
2897 }
2898
2899 static int opt_vismv(const char *opt, const char *arg)
2900 {
2901     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2902     return 0;
2903 }
2904
2905 static int opt_thread_count(const char *opt, const char *arg)
2906 {
2907     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2908 #if !HAVE_THREADS
2909     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2910 #endif
2911     return 0;
2912 }
2913
2914 static const OptionDef options[] = {
2915 #include "cmdutils_common_opts.h"
2916     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2917     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2918     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2919     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2920     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2921     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2922     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2923     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2924     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2925     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2926     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2927     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2928     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2929     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2930     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2931     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2932     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2933     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2934     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2935     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2936     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2937     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2938     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2939     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2940     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2941     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2942     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2943     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2944     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2945     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2946     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2947     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2948     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2949     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2950     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2951     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2952     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2953 #if CONFIG_AVFILTER
2954     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2955 #endif
2956     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2957     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2958     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2959     { NULL, },
2960 };
2961
2962 static void show_usage(void)
2963 {
2964     printf("Simple media player\n");
2965     printf("usage: ffplay [options] input_file\n");
2966     printf("\n");
2967 }
2968
2969 static void show_help(void)
2970 {
2971     av_log_set_callback(log_callback_help);
2972     show_usage();
2973     show_help_options(options, "Main options:\n",
2974                       OPT_EXPERT, 0);
2975     show_help_options(options, "\nAdvanced options:\n",
2976                       OPT_EXPERT, OPT_EXPERT);
2977     printf("\n");
2978     av_opt_show2(avcodec_opts[0], NULL,
2979                  AV_OPT_FLAG_DECODING_PARAM, 0);
2980     printf("\n");
2981     av_opt_show2(avformat_opts, NULL,
2982                  AV_OPT_FLAG_DECODING_PARAM, 0);
2983 #if !CONFIG_AVFILTER
2984     printf("\n");
2985     av_opt_show2(sws_opts, NULL,
2986                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2987 #endif
2988     printf("\nWhile playing:\n"
2989            "q, ESC              quit\n"
2990            "f                   toggle full screen\n"
2991            "p, SPC              pause\n"
2992            "a                   cycle audio channel\n"
2993            "v                   cycle video channel\n"
2994            "t                   cycle subtitle channel\n"
2995            "w                   show audio waves\n"
2996            "s                   activate frame-step mode\n"
2997            "left/right          seek backward/forward 10 seconds\n"
2998            "down/up             seek backward/forward 1 minute\n"
2999            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3000            );
3001 }
3002
3003 static void opt_input_file(const char *filename)
3004 {
3005     if (input_filename) {
3006         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3007                 filename, input_filename);
3008         exit(1);
3009     }
3010     if (!strcmp(filename, "-"))
3011         filename = "pipe:";
3012     input_filename = filename;
3013 }
3014
3015 /* Called from the main */
3016 int main(int argc, char **argv)
3017 {
3018     int flags;
3019
3020     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3021
3022     /* register all codecs, demux and protocols */
3023     avcodec_register_all();
3024 #if CONFIG_AVDEVICE
3025     avdevice_register_all();
3026 #endif
3027 #if CONFIG_AVFILTER
3028     avfilter_register_all();
3029 #endif
3030     av_register_all();
3031
3032     init_opts();
3033
3034     show_banner();
3035
3036     parse_options(argc, argv, options, opt_input_file);
3037
3038     if (!input_filename) {
3039         show_usage();
3040         fprintf(stderr, "An input file must be specified\n");
3041         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3042         exit(1);
3043     }
3044
3045     if (display_disable) {
3046         video_disable = 1;
3047     }
3048     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3049 #if !defined(__MINGW32__) && !defined(__APPLE__)
3050     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3051 #endif
3052     if (SDL_Init (flags)) {
3053         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3054         exit(1);
3055     }
3056
3057     if (!display_disable) {
3058 #if HAVE_SDL_VIDEO_SIZE
3059         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3060         fs_screen_width = vi->current_w;
3061         fs_screen_height = vi->current_h;
3062 #endif
3063     }
3064
3065     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3066     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3067     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3068
3069     av_init_packet(&flush_pkt);
3070     flush_pkt.data= "FLUSH";
3071
3072     cur_stream = stream_open(input_filename, file_iformat);
3073
3074     event_loop();
3075
3076     /* never returns */
3077
3078     return 0;
3079 }