]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: warn that -s is no longer working, suggest alternative
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     //    QETimer *video_timer;
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208     PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212 #endif
213
214     float skip_frames;
215     float skip_frames_index;
216     int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int debug = 0;
244 static int debug_mv = 0;
245 static int step = 0;
246 static int thread_count = 1;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int lowres = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
255 static int error_recognition = FF_ER_CAREFUL;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts= -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop=1;
262 static int framedrop=1;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for(;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (vp->picref->video->pixel_aspect.num == 0)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st)
673         {
674             if (is->subpq_size > 0)
675             {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679                 {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = width;
713         rect.h = height;
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 /* get the current audio output buffer size, in samples. With SDL, we
719    cannot have a precise information */
720 static int audio_write_get_buf_size(VideoState *is)
721 {
722     return is->audio_buf_size - is->audio_buf_index;
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     a = a % b;
728     if (a >= 0)
729         return a;
730     else
731         return a + b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
742         ;
743     nb_freq= 1<<(rdft_bits-1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->audio_st->codec->channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
750         n = 2 * channels;
751         delay = audio_write_get_buf_size(s);
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
759         }
760
761         delay += 2*data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if(s->show_audio==1){
767             h= INT_MIN;
768             for(i=0; i<1000; i+=channels){
769                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a= s->sample_array[idx];
771                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
772                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
773                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
774                 int score= a-d;
775                 if(h<score && (b^c)<0){
776                     h= score;
777                     i_start= idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if(s->show_audio==1){
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for(ch = 0;ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for(x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for(ch = 1;ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     }else{
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if(rdft_bits != s->rdft_bits){
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits= rdft_bits;
835             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for(ch = 0;ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2*nb_freq*ch;
841                 i = i_start + ch;
842                 for(x = 0; x < 2*nb_freq; x++) {
843                     double w= (x-nb_freq)*(1.0/nb_freq);
844                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             //least efficient way to do this, we should of course directly access it but its more than fast enough
852             for(y=0; y<s->height; y++){
853                 double w= 1/sqrt(nb_freq);
854                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
855                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
856                        + data[1][2*y+1]*data[1][2*y+1])) : a;
857                 a= FFMIN(a,255);
858                 b= FFMIN(b,255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if(s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static int video_open(VideoState *is){
874     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
875     int w,h;
876
877     if(is_full_screen) flags |= SDL_FULLSCREEN;
878     else               flags |= SDL_RESIZABLE;
879
880     if (is_full_screen && fs_screen_width) {
881         w = fs_screen_width;
882         h = fs_screen_height;
883     } else if(!is_full_screen && screen_width){
884         w = screen_width;
885         h = screen_height;
886 #if CONFIG_AVFILTER
887     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
888         w = is->out_video_filter->inputs[0]->w;
889         h = is->out_video_filter->inputs[0]->h;
890 #else
891     }else if (is->video_st && is->video_st->codec->width){
892         w = is->video_st->codec->width;
893         h = is->video_st->codec->height;
894 #endif
895     } else {
896         w = 640;
897         h = 480;
898     }
899     if(screen && is->width == screen->w && screen->w == w
900        && is->height== screen->h && screen->h == h)
901         return 0;
902
903 #ifndef __APPLE__
904     screen = SDL_SetVideoMode(w, h, 0, flags);
905 #else
906     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
907     screen = SDL_SetVideoMode(w, h, 24, flags);
908 #endif
909     if (!screen) {
910         fprintf(stderr, "SDL: could not set video mode - exiting\n");
911         return -1;
912     }
913     if (!window_title)
914         window_title = input_filename;
915     SDL_WM_SetCaption(window_title, window_title);
916
917     is->width = screen->w;
918     is->height = screen->h;
919
920     return 0;
921 }
922
923 /* display the current picture, if any */
924 static void video_display(VideoState *is)
925 {
926     if(!screen)
927         video_open(cur_stream);
928     if (is->audio_st && is->show_audio)
929         video_audio_display(is);
930     else if (is->video_st)
931         video_image_display(is);
932 }
933
934 static int refresh_thread(void *opaque)
935 {
936     VideoState *is= opaque;
937     while(!is->abort_request){
938         SDL_Event event;
939         event.type = FF_REFRESH_EVENT;
940         event.user.data1 = opaque;
941         if(!is->refresh){
942             is->refresh=1;
943             SDL_PushEvent(&event);
944         }
945         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
946     }
947     return 0;
948 }
949
950 /* get the current audio clock value */
951 static double get_audio_clock(VideoState *is)
952 {
953     double pts;
954     int hw_buf_size, bytes_per_sec;
955     pts = is->audio_clock;
956     hw_buf_size = audio_write_get_buf_size(is);
957     bytes_per_sec = 0;
958     if (is->audio_st) {
959         bytes_per_sec = is->audio_st->codec->sample_rate *
960             2 * is->audio_st->codec->channels;
961     }
962     if (bytes_per_sec)
963         pts -= (double)hw_buf_size / bytes_per_sec;
964     return pts;
965 }
966
967 /* get the current video clock value */
968 static double get_video_clock(VideoState *is)
969 {
970     if (is->paused) {
971         return is->video_current_pts;
972     } else {
973         return is->video_current_pts_drift + av_gettime() / 1000000.0;
974     }
975 }
976
977 /* get the current external clock value */
978 static double get_external_clock(VideoState *is)
979 {
980     int64_t ti;
981     ti = av_gettime();
982     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
983 }
984
985 /* get the current master clock value */
986 static double get_master_clock(VideoState *is)
987 {
988     double val;
989
990     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
991         if (is->video_st)
992             val = get_video_clock(is);
993         else
994             val = get_audio_clock(is);
995     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
996         if (is->audio_st)
997             val = get_audio_clock(is);
998         else
999             val = get_video_clock(is);
1000     } else {
1001         val = get_external_clock(is);
1002     }
1003     return val;
1004 }
1005
1006 /* seek in the stream */
1007 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1008 {
1009     if (!is->seek_req) {
1010         is->seek_pos = pos;
1011         is->seek_rel = rel;
1012         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1013         if (seek_by_bytes)
1014             is->seek_flags |= AVSEEK_FLAG_BYTE;
1015         is->seek_req = 1;
1016     }
1017 }
1018
1019 /* pause or resume the video */
1020 static void stream_pause(VideoState *is)
1021 {
1022     if (is->paused) {
1023         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1024         if(is->read_pause_return != AVERROR(ENOSYS)){
1025             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1026         }
1027         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1028     }
1029     is->paused = !is->paused;
1030 }
1031
1032 static double compute_target_time(double frame_current_pts, VideoState *is)
1033 {
1034     double delay, sync_threshold, diff;
1035
1036     /* compute nominal delay */
1037     delay = frame_current_pts - is->frame_last_pts;
1038     if (delay <= 0 || delay >= 10.0) {
1039         /* if incorrect delay, use previous one */
1040         delay = is->frame_last_delay;
1041     } else {
1042         is->frame_last_delay = delay;
1043     }
1044     is->frame_last_pts = frame_current_pts;
1045
1046     /* update delay to follow master synchronisation source */
1047     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1048          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1049         /* if video is slave, we try to correct big delays by
1050            duplicating or deleting a frame */
1051         diff = get_video_clock(is) - get_master_clock(is);
1052
1053         /* skip or repeat frame. We take into account the
1054            delay to compute the threshold. I still don't know
1055            if it is the best guess */
1056         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1057         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1058             if (diff <= -sync_threshold)
1059                 delay = 0;
1060             else if (diff >= sync_threshold)
1061                 delay = 2 * delay;
1062         }
1063     }
1064     is->frame_timer += delay;
1065
1066     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1067             delay, frame_current_pts, -diff);
1068
1069     return is->frame_timer;
1070 }
1071
1072 /* called to display each frame */
1073 static void video_refresh_timer(void *opaque)
1074 {
1075     VideoState *is = opaque;
1076     VideoPicture *vp;
1077
1078     SubPicture *sp, *sp2;
1079
1080     if (is->video_st) {
1081 retry:
1082         if (is->pictq_size == 0) {
1083             //nothing to do, no picture to display in the que
1084         } else {
1085             double time= av_gettime()/1000000.0;
1086             double next_target;
1087             /* dequeue the picture */
1088             vp = &is->pictq[is->pictq_rindex];
1089
1090             if(time < vp->target_clock)
1091                 return;
1092             /* update current video pts */
1093             is->video_current_pts = vp->pts;
1094             is->video_current_pts_drift = is->video_current_pts - time;
1095             is->video_current_pos = vp->pos;
1096             if(is->pictq_size > 1){
1097                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1098                 assert(nextvp->target_clock >= vp->target_clock);
1099                 next_target= nextvp->target_clock;
1100             }else{
1101                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1102             }
1103             if(framedrop && time > next_target){
1104                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1105                 if(is->pictq_size > 1 || time > next_target + 0.5){
1106                     /* update queue size and signal for next picture */
1107                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1108                         is->pictq_rindex = 0;
1109
1110                     SDL_LockMutex(is->pictq_mutex);
1111                     is->pictq_size--;
1112                     SDL_CondSignal(is->pictq_cond);
1113                     SDL_UnlockMutex(is->pictq_mutex);
1114                     goto retry;
1115                 }
1116             }
1117
1118             if(is->subtitle_st) {
1119                 if (is->subtitle_stream_changed) {
1120                     SDL_LockMutex(is->subpq_mutex);
1121
1122                     while (is->subpq_size) {
1123                         free_subpicture(&is->subpq[is->subpq_rindex]);
1124
1125                         /* update queue size and signal for next picture */
1126                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1127                             is->subpq_rindex = 0;
1128
1129                         is->subpq_size--;
1130                     }
1131                     is->subtitle_stream_changed = 0;
1132
1133                     SDL_CondSignal(is->subpq_cond);
1134                     SDL_UnlockMutex(is->subpq_mutex);
1135                 } else {
1136                     if (is->subpq_size > 0) {
1137                         sp = &is->subpq[is->subpq_rindex];
1138
1139                         if (is->subpq_size > 1)
1140                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1141                         else
1142                             sp2 = NULL;
1143
1144                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1145                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1146                         {
1147                             free_subpicture(sp);
1148
1149                             /* update queue size and signal for next picture */
1150                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1151                                 is->subpq_rindex = 0;
1152
1153                             SDL_LockMutex(is->subpq_mutex);
1154                             is->subpq_size--;
1155                             SDL_CondSignal(is->subpq_cond);
1156                             SDL_UnlockMutex(is->subpq_mutex);
1157                         }
1158                     }
1159                 }
1160             }
1161
1162             /* display picture */
1163             if (!display_disable)
1164                 video_display(is);
1165
1166             /* update queue size and signal for next picture */
1167             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1168                 is->pictq_rindex = 0;
1169
1170             SDL_LockMutex(is->pictq_mutex);
1171             is->pictq_size--;
1172             SDL_CondSignal(is->pictq_cond);
1173             SDL_UnlockMutex(is->pictq_mutex);
1174         }
1175     } else if (is->audio_st) {
1176         /* draw the next audio frame */
1177
1178         /* if only audio stream, then display the audio bars (better
1179            than nothing, just to test the implementation */
1180
1181         /* display picture */
1182         if (!display_disable)
1183             video_display(is);
1184     }
1185     if (show_status) {
1186         static int64_t last_time;
1187         int64_t cur_time;
1188         int aqsize, vqsize, sqsize;
1189         double av_diff;
1190
1191         cur_time = av_gettime();
1192         if (!last_time || (cur_time - last_time) >= 30000) {
1193             aqsize = 0;
1194             vqsize = 0;
1195             sqsize = 0;
1196             if (is->audio_st)
1197                 aqsize = is->audioq.size;
1198             if (is->video_st)
1199                 vqsize = is->videoq.size;
1200             if (is->subtitle_st)
1201                 sqsize = is->subtitleq.size;
1202             av_diff = 0;
1203             if (is->audio_st && is->video_st)
1204                 av_diff = get_audio_clock(is) - get_video_clock(is);
1205             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1206                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1207             fflush(stdout);
1208             last_time = cur_time;
1209         }
1210     }
1211 }
1212
1213 static void stream_close(VideoState *is)
1214 {
1215     VideoPicture *vp;
1216     int i;
1217     /* XXX: use a special url_shutdown call to abort parse cleanly */
1218     is->abort_request = 1;
1219     SDL_WaitThread(is->parse_tid, NULL);
1220     SDL_WaitThread(is->refresh_tid, NULL);
1221
1222     /* free all pictures */
1223     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1224         vp = &is->pictq[i];
1225 #if CONFIG_AVFILTER
1226         if (vp->picref) {
1227             avfilter_unref_buffer(vp->picref);
1228             vp->picref = NULL;
1229         }
1230 #endif
1231         if (vp->bmp) {
1232             SDL_FreeYUVOverlay(vp->bmp);
1233             vp->bmp = NULL;
1234         }
1235     }
1236     SDL_DestroyMutex(is->pictq_mutex);
1237     SDL_DestroyCond(is->pictq_cond);
1238     SDL_DestroyMutex(is->subpq_mutex);
1239     SDL_DestroyCond(is->subpq_cond);
1240 #if !CONFIG_AVFILTER
1241     if (is->img_convert_ctx)
1242         sws_freeContext(is->img_convert_ctx);
1243 #endif
1244     av_free(is);
1245 }
1246
1247 static void do_exit(void)
1248 {
1249     if (cur_stream) {
1250         stream_close(cur_stream);
1251         cur_stream = NULL;
1252     }
1253     uninit_opts();
1254 #if CONFIG_AVFILTER
1255     avfilter_uninit();
1256 #endif
1257     if (show_status)
1258         printf("\n");
1259     SDL_Quit();
1260     av_log(NULL, AV_LOG_QUIET, "");
1261     exit(0);
1262 }
1263
1264 /* allocate a picture (needs to do that in main thread to avoid
1265    potential locking problems */
1266 static void alloc_picture(void *opaque)
1267 {
1268     VideoState *is = opaque;
1269     VideoPicture *vp;
1270
1271     vp = &is->pictq[is->pictq_windex];
1272
1273     if (vp->bmp)
1274         SDL_FreeYUVOverlay(vp->bmp);
1275
1276 #if CONFIG_AVFILTER
1277     if (vp->picref)
1278         avfilter_unref_buffer(vp->picref);
1279     vp->picref = NULL;
1280
1281     vp->width   = is->out_video_filter->inputs[0]->w;
1282     vp->height  = is->out_video_filter->inputs[0]->h;
1283     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1284 #else
1285     vp->width   = is->video_st->codec->width;
1286     vp->height  = is->video_st->codec->height;
1287     vp->pix_fmt = is->video_st->codec->pix_fmt;
1288 #endif
1289
1290     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1291                                    SDL_YV12_OVERLAY,
1292                                    screen);
1293     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1294         /* SDL allocates a buffer smaller than requested if the video
1295          * overlay hardware is unable to support the requested size. */
1296         fprintf(stderr, "Error: the video system does not support an image\n"
1297                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1298                         "to reduce the image size.\n", vp->width, vp->height );
1299         do_exit();
1300     }
1301
1302     SDL_LockMutex(is->pictq_mutex);
1303     vp->allocated = 1;
1304     SDL_CondSignal(is->pictq_cond);
1305     SDL_UnlockMutex(is->pictq_mutex);
1306 }
1307
1308 /**
1309  *
1310  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1311  */
1312 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1313 {
1314     VideoPicture *vp;
1315 #if CONFIG_AVFILTER
1316     AVPicture pict_src;
1317 #else
1318     int dst_pix_fmt = PIX_FMT_YUV420P;
1319 #endif
1320     /* wait until we have space to put a new picture */
1321     SDL_LockMutex(is->pictq_mutex);
1322
1323     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1324         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1325
1326     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1327            !is->videoq.abort_request) {
1328         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1329     }
1330     SDL_UnlockMutex(is->pictq_mutex);
1331
1332     if (is->videoq.abort_request)
1333         return -1;
1334
1335     vp = &is->pictq[is->pictq_windex];
1336
1337     /* alloc or resize hardware picture buffer */
1338     if (!vp->bmp ||
1339 #if CONFIG_AVFILTER
1340         vp->width  != is->out_video_filter->inputs[0]->w ||
1341         vp->height != is->out_video_filter->inputs[0]->h) {
1342 #else
1343         vp->width != is->video_st->codec->width ||
1344         vp->height != is->video_st->codec->height) {
1345 #endif
1346         SDL_Event event;
1347
1348         vp->allocated = 0;
1349
1350         /* the allocation must be done in the main thread to avoid
1351            locking problems */
1352         event.type = FF_ALLOC_EVENT;
1353         event.user.data1 = is;
1354         SDL_PushEvent(&event);
1355
1356         /* wait until the picture is allocated */
1357         SDL_LockMutex(is->pictq_mutex);
1358         while (!vp->allocated && !is->videoq.abort_request) {
1359             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1360         }
1361         SDL_UnlockMutex(is->pictq_mutex);
1362
1363         if (is->videoq.abort_request)
1364             return -1;
1365     }
1366
1367     /* if the frame is not skipped, then display it */
1368     if (vp->bmp) {
1369         AVPicture pict;
1370 #if CONFIG_AVFILTER
1371         if(vp->picref)
1372             avfilter_unref_buffer(vp->picref);
1373         vp->picref = src_frame->opaque;
1374 #endif
1375
1376         /* get a pointer on the bitmap */
1377         SDL_LockYUVOverlay (vp->bmp);
1378
1379         memset(&pict,0,sizeof(AVPicture));
1380         pict.data[0] = vp->bmp->pixels[0];
1381         pict.data[1] = vp->bmp->pixels[2];
1382         pict.data[2] = vp->bmp->pixels[1];
1383
1384         pict.linesize[0] = vp->bmp->pitches[0];
1385         pict.linesize[1] = vp->bmp->pitches[2];
1386         pict.linesize[2] = vp->bmp->pitches[1];
1387
1388 #if CONFIG_AVFILTER
1389         pict_src.data[0] = src_frame->data[0];
1390         pict_src.data[1] = src_frame->data[1];
1391         pict_src.data[2] = src_frame->data[2];
1392
1393         pict_src.linesize[0] = src_frame->linesize[0];
1394         pict_src.linesize[1] = src_frame->linesize[1];
1395         pict_src.linesize[2] = src_frame->linesize[2];
1396
1397         //FIXME use direct rendering
1398         av_picture_copy(&pict, &pict_src,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock= compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 /**
1431  * compute the exact PTS for the picture if it is omitted in the stream
1432  * @param pts1 the dts of the pkt / pts of the frame
1433  */
1434 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1435 {
1436     double frame_delay, pts;
1437
1438     pts = pts1;
1439
1440     if (pts != 0) {
1441         /* update video clock with pts, if present */
1442         is->video_clock = pts;
1443     } else {
1444         pts = is->video_clock;
1445     }
1446     /* update video clock for next frame */
1447     frame_delay = av_q2d(is->video_st->codec->time_base);
1448     /* for MPEG2, the frame can be repeated, so we update the
1449        clock accordingly */
1450     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1451     is->video_clock += frame_delay;
1452
1453     return queue_picture(is, src_frame, pts, pos);
1454 }
1455
1456 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1457 {
1458     int got_picture, i;
1459
1460     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1461         return -1;
1462
1463     if (pkt->data == flush_pkt.data) {
1464         avcodec_flush_buffers(is->video_st->codec);
1465
1466         SDL_LockMutex(is->pictq_mutex);
1467         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1468         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1469             is->pictq[i].target_clock= 0;
1470         }
1471         while (is->pictq_size && !is->videoq.abort_request) {
1472             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1473         }
1474         is->video_current_pos = -1;
1475         SDL_UnlockMutex(is->pictq_mutex);
1476
1477         init_pts_correction(&is->pts_ctx);
1478         is->frame_last_pts = AV_NOPTS_VALUE;
1479         is->frame_last_delay = 0;
1480         is->frame_timer = (double)av_gettime() / 1000000.0;
1481         is->skip_frames = 1;
1482         is->skip_frames_index = 0;
1483         return 0;
1484     }
1485
1486     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1487
1488     if (got_picture) {
1489         if (decoder_reorder_pts == -1) {
1490             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1491         } else if (decoder_reorder_pts) {
1492             *pts = frame->pkt_pts;
1493         } else {
1494             *pts = frame->pkt_dts;
1495         }
1496
1497         if (*pts == AV_NOPTS_VALUE) {
1498             *pts = 0;
1499         }
1500
1501         is->skip_frames_index += 1;
1502         if(is->skip_frames_index >= is->skip_frames){
1503             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1504             return 1;
1505         }
1506
1507     }
1508     return 0;
1509 }
1510
1511 #if CONFIG_AVFILTER
1512 typedef struct {
1513     VideoState *is;
1514     AVFrame *frame;
1515     int use_dr1;
1516 } FilterPriv;
1517
1518 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1519 {
1520     AVFilterContext *ctx = codec->opaque;
1521     AVFilterBufferRef  *ref;
1522     int perms = AV_PERM_WRITE;
1523     int i, w, h, stride[4];
1524     unsigned edge;
1525     int pixel_size;
1526
1527     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1528         perms |= AV_PERM_NEG_LINESIZES;
1529
1530     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1531         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1532         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1533         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1534     }
1535     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1536
1537     w = codec->width;
1538     h = codec->height;
1539     avcodec_align_dimensions2(codec, &w, &h, stride);
1540     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1541     w += edge << 1;
1542     h += edge << 1;
1543
1544     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1545         return -1;
1546
1547     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1548     ref->video->w = codec->width;
1549     ref->video->h = codec->height;
1550     for(i = 0; i < 4; i ++) {
1551         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1552         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1553
1554         if (ref->data[i]) {
1555             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1556         }
1557         pic->data[i]     = ref->data[i];
1558         pic->linesize[i] = ref->linesize[i];
1559     }
1560     pic->opaque = ref;
1561     pic->age    = INT_MAX;
1562     pic->type   = FF_BUFFER_TYPE_USER;
1563     pic->reordered_opaque = codec->reordered_opaque;
1564     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1565     else           pic->pkt_pts = AV_NOPTS_VALUE;
1566     return 0;
1567 }
1568
1569 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1570 {
1571     memset(pic->data, 0, sizeof(pic->data));
1572     avfilter_unref_buffer(pic->opaque);
1573 }
1574
1575 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1576 {
1577     AVFilterBufferRef *ref = pic->opaque;
1578
1579     if (pic->data[0] == NULL) {
1580         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1581         return codec->get_buffer(codec, pic);
1582     }
1583
1584     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1585         (codec->pix_fmt != ref->format)) {
1586         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1587         return -1;
1588     }
1589
1590     pic->reordered_opaque = codec->reordered_opaque;
1591     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1592     else           pic->pkt_pts = AV_NOPTS_VALUE;
1593     return 0;
1594 }
1595
1596 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1597 {
1598     FilterPriv *priv = ctx->priv;
1599     AVCodecContext *codec;
1600     if(!opaque) return -1;
1601
1602     priv->is = opaque;
1603     codec    = priv->is->video_st->codec;
1604     codec->opaque = ctx;
1605     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1606         priv->use_dr1 = 1;
1607         codec->get_buffer     = input_get_buffer;
1608         codec->release_buffer = input_release_buffer;
1609         codec->reget_buffer   = input_reget_buffer;
1610         codec->thread_safe_callbacks = 1;
1611     }
1612
1613     priv->frame = avcodec_alloc_frame();
1614
1615     return 0;
1616 }
1617
1618 static void input_uninit(AVFilterContext *ctx)
1619 {
1620     FilterPriv *priv = ctx->priv;
1621     av_free(priv->frame);
1622 }
1623
1624 static int input_request_frame(AVFilterLink *link)
1625 {
1626     FilterPriv *priv = link->src->priv;
1627     AVFilterBufferRef *picref;
1628     int64_t pts = 0;
1629     AVPacket pkt;
1630     int ret;
1631
1632     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1633         av_free_packet(&pkt);
1634     if (ret < 0)
1635         return -1;
1636
1637     if(priv->use_dr1) {
1638         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1639     } else {
1640         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1641         av_image_copy(picref->data, picref->linesize,
1642                       priv->frame->data, priv->frame->linesize,
1643                       picref->format, link->w, link->h);
1644     }
1645     av_free_packet(&pkt);
1646
1647     picref->pts = pts;
1648     picref->pos = pkt.pos;
1649     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1650     avfilter_start_frame(link, picref);
1651     avfilter_draw_slice(link, 0, link->h, 1);
1652     avfilter_end_frame(link);
1653
1654     return 0;
1655 }
1656
1657 static int input_query_formats(AVFilterContext *ctx)
1658 {
1659     FilterPriv *priv = ctx->priv;
1660     enum PixelFormat pix_fmts[] = {
1661         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1662     };
1663
1664     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1665     return 0;
1666 }
1667
1668 static int input_config_props(AVFilterLink *link)
1669 {
1670     FilterPriv *priv  = link->src->priv;
1671     AVCodecContext *c = priv->is->video_st->codec;
1672
1673     link->w = c->width;
1674     link->h = c->height;
1675     link->time_base = priv->is->video_st->time_base;
1676
1677     return 0;
1678 }
1679
1680 static AVFilter input_filter =
1681 {
1682     .name      = "ffplay_input",
1683
1684     .priv_size = sizeof(FilterPriv),
1685
1686     .init      = input_init,
1687     .uninit    = input_uninit,
1688
1689     .query_formats = input_query_formats,
1690
1691     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1692     .outputs   = (AVFilterPad[]) {{ .name = "default",
1693                                     .type = AVMEDIA_TYPE_VIDEO,
1694                                     .request_frame = input_request_frame,
1695                                     .config_props  = input_config_props, },
1696                                   { .name = NULL }},
1697 };
1698
1699 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1700 {
1701     char sws_flags_str[128];
1702     int ret;
1703     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1704     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1705     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1706     graph->scale_sws_opts = av_strdup(sws_flags_str);
1707
1708     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1709                                             NULL, is, graph)) < 0)
1710         return ret;
1711     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1712                                             NULL, &ffsink_ctx, graph)) < 0)
1713         return ret;
1714
1715     if(vfilters) {
1716         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1717         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1718
1719         outputs->name    = av_strdup("in");
1720         outputs->filter_ctx = filt_src;
1721         outputs->pad_idx = 0;
1722         outputs->next    = NULL;
1723
1724         inputs->name    = av_strdup("out");
1725         inputs->filter_ctx = filt_out;
1726         inputs->pad_idx = 0;
1727         inputs->next    = NULL;
1728
1729         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1730             return ret;
1731         av_freep(&vfilters);
1732     } else {
1733         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1734             return ret;
1735     }
1736
1737     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1738         return ret;
1739
1740     is->out_video_filter = filt_out;
1741
1742     return ret;
1743 }
1744
1745 #endif  /* CONFIG_AVFILTER */
1746
1747 static int video_thread(void *arg)
1748 {
1749     VideoState *is = arg;
1750     AVFrame *frame= avcodec_alloc_frame();
1751     int64_t pts_int;
1752     double pts;
1753     int ret;
1754
1755 #if CONFIG_AVFILTER
1756     AVFilterGraph *graph = avfilter_graph_alloc();
1757     AVFilterContext *filt_out = NULL;
1758     int64_t pos;
1759
1760     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1761         goto the_end;
1762     filt_out = is->out_video_filter;
1763 #endif
1764
1765     for(;;) {
1766 #if !CONFIG_AVFILTER
1767         AVPacket pkt;
1768 #else
1769         AVFilterBufferRef *picref;
1770         AVRational tb;
1771 #endif
1772         while (is->paused && !is->videoq.abort_request)
1773             SDL_Delay(10);
1774 #if CONFIG_AVFILTER
1775         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1776         if (picref) {
1777             pts_int = picref->pts;
1778             pos     = picref->pos;
1779             frame->opaque = picref;
1780         }
1781
1782         if (av_cmp_q(tb, is->video_st->time_base)) {
1783             av_unused int64_t pts1 = pts_int;
1784             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1785             av_dlog(NULL, "video_thread(): "
1786                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1787                     tb.num, tb.den, pts1,
1788                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1789         }
1790 #else
1791         ret = get_video_frame(is, frame, &pts_int, &pkt);
1792 #endif
1793
1794         if (ret < 0) goto the_end;
1795
1796         if (!ret)
1797             continue;
1798
1799         pts = pts_int*av_q2d(is->video_st->time_base);
1800
1801 #if CONFIG_AVFILTER
1802         ret = output_picture2(is, frame, pts, pos);
1803 #else
1804         ret = output_picture2(is, frame, pts,  pkt.pos);
1805         av_free_packet(&pkt);
1806 #endif
1807         if (ret < 0)
1808             goto the_end;
1809
1810         if (step)
1811             if (cur_stream)
1812                 stream_pause(cur_stream);
1813     }
1814  the_end:
1815 #if CONFIG_AVFILTER
1816     avfilter_graph_free(&graph);
1817 #endif
1818     av_free(frame);
1819     return 0;
1820 }
1821
1822 static int subtitle_thread(void *arg)
1823 {
1824     VideoState *is = arg;
1825     SubPicture *sp;
1826     AVPacket pkt1, *pkt = &pkt1;
1827     int got_subtitle;
1828     double pts;
1829     int i, j;
1830     int r, g, b, y, u, v, a;
1831
1832     for(;;) {
1833         while (is->paused && !is->subtitleq.abort_request) {
1834             SDL_Delay(10);
1835         }
1836         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1837             break;
1838
1839         if(pkt->data == flush_pkt.data){
1840             avcodec_flush_buffers(is->subtitle_st->codec);
1841             continue;
1842         }
1843         SDL_LockMutex(is->subpq_mutex);
1844         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1845                !is->subtitleq.abort_request) {
1846             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1847         }
1848         SDL_UnlockMutex(is->subpq_mutex);
1849
1850         if (is->subtitleq.abort_request)
1851             return 0;
1852
1853         sp = &is->subpq[is->subpq_windex];
1854
1855        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1856            this packet, if any */
1857         pts = 0;
1858         if (pkt->pts != AV_NOPTS_VALUE)
1859             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1860
1861         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1862                                  &got_subtitle, pkt);
1863
1864         if (got_subtitle && sp->sub.format == 0) {
1865             sp->pts = pts;
1866
1867             for (i = 0; i < sp->sub.num_rects; i++)
1868             {
1869                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1870                 {
1871                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1872                     y = RGB_TO_Y_CCIR(r, g, b);
1873                     u = RGB_TO_U_CCIR(r, g, b, 0);
1874                     v = RGB_TO_V_CCIR(r, g, b, 0);
1875                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1876                 }
1877             }
1878
1879             /* now we can update the picture count */
1880             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1881                 is->subpq_windex = 0;
1882             SDL_LockMutex(is->subpq_mutex);
1883             is->subpq_size++;
1884             SDL_UnlockMutex(is->subpq_mutex);
1885         }
1886         av_free_packet(pkt);
1887     }
1888     return 0;
1889 }
1890
1891 /* copy samples for viewing in editor window */
1892 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1893 {
1894     int size, len;
1895
1896     size = samples_size / sizeof(short);
1897     while (size > 0) {
1898         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1899         if (len > size)
1900             len = size;
1901         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1902         samples += len;
1903         is->sample_array_index += len;
1904         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1905             is->sample_array_index = 0;
1906         size -= len;
1907     }
1908 }
1909
1910 /* return the new audio buffer size (samples can be added or deleted
1911    to get better sync if video or external master clock) */
1912 static int synchronize_audio(VideoState *is, short *samples,
1913                              int samples_size1, double pts)
1914 {
1915     int n, samples_size;
1916     double ref_clock;
1917
1918     n = 2 * is->audio_st->codec->channels;
1919     samples_size = samples_size1;
1920
1921     /* if not master, then we try to remove or add samples to correct the clock */
1922     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1923          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1924         double diff, avg_diff;
1925         int wanted_size, min_size, max_size, nb_samples;
1926
1927         ref_clock = get_master_clock(is);
1928         diff = get_audio_clock(is) - ref_clock;
1929
1930         if (diff < AV_NOSYNC_THRESHOLD) {
1931             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1932             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1933                 /* not enough measures to have a correct estimate */
1934                 is->audio_diff_avg_count++;
1935             } else {
1936                 /* estimate the A-V difference */
1937                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1938
1939                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1940                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1941                     nb_samples = samples_size / n;
1942
1943                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1944                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1945                     if (wanted_size < min_size)
1946                         wanted_size = min_size;
1947                     else if (wanted_size > max_size)
1948                         wanted_size = max_size;
1949
1950                     /* add or remove samples to correction the synchro */
1951                     if (wanted_size < samples_size) {
1952                         /* remove samples */
1953                         samples_size = wanted_size;
1954                     } else if (wanted_size > samples_size) {
1955                         uint8_t *samples_end, *q;
1956                         int nb;
1957
1958                         /* add samples */
1959                         nb = (samples_size - wanted_size);
1960                         samples_end = (uint8_t *)samples + samples_size - n;
1961                         q = samples_end + n;
1962                         while (nb > 0) {
1963                             memcpy(q, samples_end, n);
1964                             q += n;
1965                             nb -= n;
1966                         }
1967                         samples_size = wanted_size;
1968                     }
1969                 }
1970                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1971                         diff, avg_diff, samples_size - samples_size1,
1972                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1973             }
1974         } else {
1975             /* too big difference : may be initial PTS errors, so
1976                reset A-V filter */
1977             is->audio_diff_avg_count = 0;
1978             is->audio_diff_cum = 0;
1979         }
1980     }
1981
1982     return samples_size;
1983 }
1984
1985 /* decode one audio frame and returns its uncompressed size */
1986 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1987 {
1988     AVPacket *pkt_temp = &is->audio_pkt_temp;
1989     AVPacket *pkt = &is->audio_pkt;
1990     AVCodecContext *dec= is->audio_st->codec;
1991     int n, len1, data_size;
1992     double pts;
1993
1994     for(;;) {
1995         /* NOTE: the audio packet can contain several frames */
1996         while (pkt_temp->size > 0) {
1997             data_size = sizeof(is->audio_buf1);
1998             len1 = avcodec_decode_audio3(dec,
1999                                         (int16_t *)is->audio_buf1, &data_size,
2000                                         pkt_temp);
2001             if (len1 < 0) {
2002                 /* if error, we skip the frame */
2003                 pkt_temp->size = 0;
2004                 break;
2005             }
2006
2007             pkt_temp->data += len1;
2008             pkt_temp->size -= len1;
2009             if (data_size <= 0)
2010                 continue;
2011
2012             if (dec->sample_fmt != is->audio_src_fmt) {
2013                 if (is->reformat_ctx)
2014                     av_audio_convert_free(is->reformat_ctx);
2015                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2016                                                          dec->sample_fmt, 1, NULL, 0);
2017                 if (!is->reformat_ctx) {
2018                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2019                         av_get_sample_fmt_name(dec->sample_fmt),
2020                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2021                         break;
2022                 }
2023                 is->audio_src_fmt= dec->sample_fmt;
2024             }
2025
2026             if (is->reformat_ctx) {
2027                 const void *ibuf[6]= {is->audio_buf1};
2028                 void *obuf[6]= {is->audio_buf2};
2029                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2030                 int ostride[6]= {2};
2031                 int len= data_size/istride[0];
2032                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2033                     printf("av_audio_convert() failed\n");
2034                     break;
2035                 }
2036                 is->audio_buf= is->audio_buf2;
2037                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2038                           remove this legacy cruft */
2039                 data_size= len*2;
2040             }else{
2041                 is->audio_buf= is->audio_buf1;
2042             }
2043
2044             /* if no pts, then compute it */
2045             pts = is->audio_clock;
2046             *pts_ptr = pts;
2047             n = 2 * dec->channels;
2048             is->audio_clock += (double)data_size /
2049                 (double)(n * dec->sample_rate);
2050 #ifdef DEBUG
2051             {
2052                 static double last_clock;
2053                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2054                        is->audio_clock - last_clock,
2055                        is->audio_clock, pts);
2056                 last_clock = is->audio_clock;
2057             }
2058 #endif
2059             return data_size;
2060         }
2061
2062         /* free the current packet */
2063         if (pkt->data)
2064             av_free_packet(pkt);
2065
2066         if (is->paused || is->audioq.abort_request) {
2067             return -1;
2068         }
2069
2070         /* read next packet */
2071         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2072             return -1;
2073         if(pkt->data == flush_pkt.data){
2074             avcodec_flush_buffers(dec);
2075             continue;
2076         }
2077
2078         pkt_temp->data = pkt->data;
2079         pkt_temp->size = pkt->size;
2080
2081         /* if update the audio clock with the pts */
2082         if (pkt->pts != AV_NOPTS_VALUE) {
2083             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2084         }
2085     }
2086 }
2087
2088 /* prepare a new audio buffer */
2089 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2090 {
2091     VideoState *is = opaque;
2092     int audio_size, len1;
2093     double pts;
2094
2095     audio_callback_time = av_gettime();
2096
2097     while (len > 0) {
2098         if (is->audio_buf_index >= is->audio_buf_size) {
2099            audio_size = audio_decode_frame(is, &pts);
2100            if (audio_size < 0) {
2101                 /* if error, just output silence */
2102                is->audio_buf = is->audio_buf1;
2103                is->audio_buf_size = 1024;
2104                memset(is->audio_buf, 0, is->audio_buf_size);
2105            } else {
2106                if (is->show_audio)
2107                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2108                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2109                                               pts);
2110                is->audio_buf_size = audio_size;
2111            }
2112            is->audio_buf_index = 0;
2113         }
2114         len1 = is->audio_buf_size - is->audio_buf_index;
2115         if (len1 > len)
2116             len1 = len;
2117         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2118         len -= len1;
2119         stream += len1;
2120         is->audio_buf_index += len1;
2121     }
2122 }
2123
2124 /* open a given stream. Return 0 if OK */
2125 static int stream_component_open(VideoState *is, int stream_index)
2126 {
2127     AVFormatContext *ic = is->ic;
2128     AVCodecContext *avctx;
2129     AVCodec *codec;
2130     SDL_AudioSpec wanted_spec, spec;
2131     AVDictionary *opts;
2132     AVDictionaryEntry *t = NULL;
2133
2134     if (stream_index < 0 || stream_index >= ic->nb_streams)
2135         return -1;
2136     avctx = ic->streams[stream_index]->codec;
2137
2138     opts = filter_codec_opts(codec_opts, avctx->codec_id, 0);
2139
2140     /* prepare audio output */
2141     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2142         if (avctx->channels > 0) {
2143             avctx->request_channels = FFMIN(2, avctx->channels);
2144         } else {
2145             avctx->request_channels = 2;
2146         }
2147     }
2148
2149     codec = avcodec_find_decoder(avctx->codec_id);
2150     avctx->debug_mv = debug_mv;
2151     avctx->debug = debug;
2152     avctx->workaround_bugs = workaround_bugs;
2153     avctx->lowres = lowres;
2154     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2155     avctx->idct_algo= idct;
2156     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2157     avctx->skip_frame= skip_frame;
2158     avctx->skip_idct= skip_idct;
2159     avctx->skip_loop_filter= skip_loop_filter;
2160     avctx->error_recognition= error_recognition;
2161     avctx->error_concealment= error_concealment;
2162     avctx->thread_count= thread_count;
2163
2164     if (!codec ||
2165         avcodec_open2(avctx, codec, &opts) < 0)
2166         return -1;
2167     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2168         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2169         return AVERROR_OPTION_NOT_FOUND;
2170     }
2171
2172     /* prepare audio output */
2173     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2174         wanted_spec.freq = avctx->sample_rate;
2175         wanted_spec.format = AUDIO_S16SYS;
2176         wanted_spec.channels = avctx->channels;
2177         wanted_spec.silence = 0;
2178         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2179         wanted_spec.callback = sdl_audio_callback;
2180         wanted_spec.userdata = is;
2181         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2182             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2183             return -1;
2184         }
2185         is->audio_hw_buf_size = spec.size;
2186         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2187     }
2188
2189     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2190     switch(avctx->codec_type) {
2191     case AVMEDIA_TYPE_AUDIO:
2192         is->audio_stream = stream_index;
2193         is->audio_st = ic->streams[stream_index];
2194         is->audio_buf_size = 0;
2195         is->audio_buf_index = 0;
2196
2197         /* init averaging filter */
2198         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2199         is->audio_diff_avg_count = 0;
2200         /* since we do not have a precise anough audio fifo fullness,
2201            we correct audio sync only if larger than this threshold */
2202         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2203
2204         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2205         packet_queue_init(&is->audioq);
2206         SDL_PauseAudio(0);
2207         break;
2208     case AVMEDIA_TYPE_VIDEO:
2209         is->video_stream = stream_index;
2210         is->video_st = ic->streams[stream_index];
2211
2212         packet_queue_init(&is->videoq);
2213         is->video_tid = SDL_CreateThread(video_thread, is);
2214         break;
2215     case AVMEDIA_TYPE_SUBTITLE:
2216         is->subtitle_stream = stream_index;
2217         is->subtitle_st = ic->streams[stream_index];
2218         packet_queue_init(&is->subtitleq);
2219
2220         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2221         break;
2222     default:
2223         break;
2224     }
2225     return 0;
2226 }
2227
2228 static void stream_component_close(VideoState *is, int stream_index)
2229 {
2230     AVFormatContext *ic = is->ic;
2231     AVCodecContext *avctx;
2232
2233     if (stream_index < 0 || stream_index >= ic->nb_streams)
2234         return;
2235     avctx = ic->streams[stream_index]->codec;
2236
2237     switch(avctx->codec_type) {
2238     case AVMEDIA_TYPE_AUDIO:
2239         packet_queue_abort(&is->audioq);
2240
2241         SDL_CloseAudio();
2242
2243         packet_queue_end(&is->audioq);
2244         if (is->reformat_ctx)
2245             av_audio_convert_free(is->reformat_ctx);
2246         is->reformat_ctx = NULL;
2247         break;
2248     case AVMEDIA_TYPE_VIDEO:
2249         packet_queue_abort(&is->videoq);
2250
2251         /* note: we also signal this mutex to make sure we deblock the
2252            video thread in all cases */
2253         SDL_LockMutex(is->pictq_mutex);
2254         SDL_CondSignal(is->pictq_cond);
2255         SDL_UnlockMutex(is->pictq_mutex);
2256
2257         SDL_WaitThread(is->video_tid, NULL);
2258
2259         packet_queue_end(&is->videoq);
2260         break;
2261     case AVMEDIA_TYPE_SUBTITLE:
2262         packet_queue_abort(&is->subtitleq);
2263
2264         /* note: we also signal this mutex to make sure we deblock the
2265            video thread in all cases */
2266         SDL_LockMutex(is->subpq_mutex);
2267         is->subtitle_stream_changed = 1;
2268
2269         SDL_CondSignal(is->subpq_cond);
2270         SDL_UnlockMutex(is->subpq_mutex);
2271
2272         SDL_WaitThread(is->subtitle_tid, NULL);
2273
2274         packet_queue_end(&is->subtitleq);
2275         break;
2276     default:
2277         break;
2278     }
2279
2280     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2281     avcodec_close(avctx);
2282     switch(avctx->codec_type) {
2283     case AVMEDIA_TYPE_AUDIO:
2284         is->audio_st = NULL;
2285         is->audio_stream = -1;
2286         break;
2287     case AVMEDIA_TYPE_VIDEO:
2288         is->video_st = NULL;
2289         is->video_stream = -1;
2290         break;
2291     case AVMEDIA_TYPE_SUBTITLE:
2292         is->subtitle_st = NULL;
2293         is->subtitle_stream = -1;
2294         break;
2295     default:
2296         break;
2297     }
2298 }
2299
2300 /* since we have only one decoding thread, we can use a global
2301    variable instead of a thread local variable */
2302 static VideoState *global_video_state;
2303
2304 static int decode_interrupt_cb(void)
2305 {
2306     return (global_video_state && global_video_state->abort_request);
2307 }
2308
2309 /* this thread gets the stream from the disk or the network */
2310 static int decode_thread(void *arg)
2311 {
2312     VideoState *is = arg;
2313     AVFormatContext *ic = NULL;
2314     int err, i, ret;
2315     int st_index[AVMEDIA_TYPE_NB];
2316     AVPacket pkt1, *pkt = &pkt1;
2317     int eof=0;
2318     int pkt_in_play_range = 0;
2319     AVDictionaryEntry *t;
2320     AVDictionary **opts;
2321     int orig_nb_streams;
2322
2323     memset(st_index, -1, sizeof(st_index));
2324     is->video_stream = -1;
2325     is->audio_stream = -1;
2326     is->subtitle_stream = -1;
2327
2328     global_video_state = is;
2329     avio_set_interrupt_cb(decode_interrupt_cb);
2330
2331     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2332     if (err < 0) {
2333         print_error(is->filename, err);
2334         ret = -1;
2335         goto fail;
2336     }
2337     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2338         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2339         ret = AVERROR_OPTION_NOT_FOUND;
2340         goto fail;
2341     }
2342     is->ic = ic;
2343
2344     if(genpts)
2345         ic->flags |= AVFMT_FLAG_GENPTS;
2346
2347     opts = setup_find_stream_info_opts(ic);
2348     orig_nb_streams = ic->nb_streams;
2349
2350     err = avformat_find_stream_info(ic, opts);
2351     if (err < 0) {
2352         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2353         ret = -1;
2354         goto fail;
2355     }
2356     for (i = 0; i < orig_nb_streams; i++)
2357         av_dict_free(&opts[i]);
2358     av_freep(&opts);
2359
2360     if(ic->pb)
2361         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2362
2363     if(seek_by_bytes<0)
2364         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2365
2366     /* if seeking requested, we execute it */
2367     if (start_time != AV_NOPTS_VALUE) {
2368         int64_t timestamp;
2369
2370         timestamp = start_time;
2371         /* add the stream start time */
2372         if (ic->start_time != AV_NOPTS_VALUE)
2373             timestamp += ic->start_time;
2374         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2375         if (ret < 0) {
2376             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2377                     is->filename, (double)timestamp / AV_TIME_BASE);
2378         }
2379     }
2380
2381     for (i = 0; i < ic->nb_streams; i++)
2382         ic->streams[i]->discard = AVDISCARD_ALL;
2383     if (!video_disable)
2384         st_index[AVMEDIA_TYPE_VIDEO] =
2385             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2386                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2387     if (!audio_disable)
2388         st_index[AVMEDIA_TYPE_AUDIO] =
2389             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2390                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2391                                 st_index[AVMEDIA_TYPE_VIDEO],
2392                                 NULL, 0);
2393     if (!video_disable)
2394         st_index[AVMEDIA_TYPE_SUBTITLE] =
2395             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2396                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2397                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2398                                  st_index[AVMEDIA_TYPE_AUDIO] :
2399                                  st_index[AVMEDIA_TYPE_VIDEO]),
2400                                 NULL, 0);
2401     if (show_status) {
2402         av_dump_format(ic, 0, is->filename, 0);
2403     }
2404
2405     /* open the streams */
2406     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2407         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2408     }
2409
2410     ret=-1;
2411     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2412         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2413     }
2414     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2415     if(ret<0) {
2416         if (!display_disable)
2417             is->show_audio = 2;
2418     }
2419
2420     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2421         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2422     }
2423
2424     if (is->video_stream < 0 && is->audio_stream < 0) {
2425         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2426         ret = -1;
2427         goto fail;
2428     }
2429
2430     for(;;) {
2431         if (is->abort_request)
2432             break;
2433         if (is->paused != is->last_paused) {
2434             is->last_paused = is->paused;
2435             if (is->paused)
2436                 is->read_pause_return= av_read_pause(ic);
2437             else
2438                 av_read_play(ic);
2439         }
2440 #if CONFIG_RTSP_DEMUXER
2441         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2442             /* wait 10 ms to avoid trying to get another packet */
2443             /* XXX: horrible */
2444             SDL_Delay(10);
2445             continue;
2446         }
2447 #endif
2448         if (is->seek_req) {
2449             int64_t seek_target= is->seek_pos;
2450             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2451             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2452 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2453 //      of the seek_pos/seek_rel variables
2454
2455             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2456             if (ret < 0) {
2457                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2458             }else{
2459                 if (is->audio_stream >= 0) {
2460                     packet_queue_flush(&is->audioq);
2461                     packet_queue_put(&is->audioq, &flush_pkt);
2462                 }
2463                 if (is->subtitle_stream >= 0) {
2464                     packet_queue_flush(&is->subtitleq);
2465                     packet_queue_put(&is->subtitleq, &flush_pkt);
2466                 }
2467                 if (is->video_stream >= 0) {
2468                     packet_queue_flush(&is->videoq);
2469                     packet_queue_put(&is->videoq, &flush_pkt);
2470                 }
2471             }
2472             is->seek_req = 0;
2473             eof= 0;
2474         }
2475
2476         /* if the queue are full, no need to read more */
2477         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2478             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2479                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2480                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2481             /* wait 10 ms */
2482             SDL_Delay(10);
2483             continue;
2484         }
2485         if(eof) {
2486             if(is->video_stream >= 0){
2487                 av_init_packet(pkt);
2488                 pkt->data=NULL;
2489                 pkt->size=0;
2490                 pkt->stream_index= is->video_stream;
2491                 packet_queue_put(&is->videoq, pkt);
2492             }
2493             SDL_Delay(10);
2494             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2495                 if(loop!=1 && (!loop || --loop)){
2496                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2497                 }else if(autoexit){
2498                     ret=AVERROR_EOF;
2499                     goto fail;
2500                 }
2501             }
2502             continue;
2503         }
2504         ret = av_read_frame(ic, pkt);
2505         if (ret < 0) {
2506             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2507                 eof=1;
2508             if (ic->pb && ic->pb->error)
2509                 break;
2510             SDL_Delay(100); /* wait for user event */
2511             continue;
2512         }
2513         /* check if packet is in play range specified by user, then queue, otherwise discard */
2514         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2515                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2516                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2517                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2518                 <= ((double)duration/1000000);
2519         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2520             packet_queue_put(&is->audioq, pkt);
2521         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2522             packet_queue_put(&is->videoq, pkt);
2523         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2524             packet_queue_put(&is->subtitleq, pkt);
2525         } else {
2526             av_free_packet(pkt);
2527         }
2528     }
2529     /* wait until the end */
2530     while (!is->abort_request) {
2531         SDL_Delay(100);
2532     }
2533
2534     ret = 0;
2535  fail:
2536     /* disable interrupting */
2537     global_video_state = NULL;
2538
2539     /* close each stream */
2540     if (is->audio_stream >= 0)
2541         stream_component_close(is, is->audio_stream);
2542     if (is->video_stream >= 0)
2543         stream_component_close(is, is->video_stream);
2544     if (is->subtitle_stream >= 0)
2545         stream_component_close(is, is->subtitle_stream);
2546     if (is->ic) {
2547         av_close_input_file(is->ic);
2548         is->ic = NULL; /* safety */
2549     }
2550     avio_set_interrupt_cb(NULL);
2551
2552     if (ret != 0) {
2553         SDL_Event event;
2554
2555         event.type = FF_QUIT_EVENT;
2556         event.user.data1 = is;
2557         SDL_PushEvent(&event);
2558     }
2559     return 0;
2560 }
2561
2562 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2563 {
2564     VideoState *is;
2565
2566     is = av_mallocz(sizeof(VideoState));
2567     if (!is)
2568         return NULL;
2569     av_strlcpy(is->filename, filename, sizeof(is->filename));
2570     is->iformat = iformat;
2571     is->ytop = 0;
2572     is->xleft = 0;
2573
2574     /* start video display */
2575     is->pictq_mutex = SDL_CreateMutex();
2576     is->pictq_cond = SDL_CreateCond();
2577
2578     is->subpq_mutex = SDL_CreateMutex();
2579     is->subpq_cond = SDL_CreateCond();
2580
2581     is->av_sync_type = av_sync_type;
2582     is->parse_tid = SDL_CreateThread(decode_thread, is);
2583     if (!is->parse_tid) {
2584         av_free(is);
2585         return NULL;
2586     }
2587     return is;
2588 }
2589
2590 static void stream_cycle_channel(VideoState *is, int codec_type)
2591 {
2592     AVFormatContext *ic = is->ic;
2593     int start_index, stream_index;
2594     AVStream *st;
2595
2596     if (codec_type == AVMEDIA_TYPE_VIDEO)
2597         start_index = is->video_stream;
2598     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2599         start_index = is->audio_stream;
2600     else
2601         start_index = is->subtitle_stream;
2602     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2603         return;
2604     stream_index = start_index;
2605     for(;;) {
2606         if (++stream_index >= is->ic->nb_streams)
2607         {
2608             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2609             {
2610                 stream_index = -1;
2611                 goto the_end;
2612             } else
2613                 stream_index = 0;
2614         }
2615         if (stream_index == start_index)
2616             return;
2617         st = ic->streams[stream_index];
2618         if (st->codec->codec_type == codec_type) {
2619             /* check that parameters are OK */
2620             switch(codec_type) {
2621             case AVMEDIA_TYPE_AUDIO:
2622                 if (st->codec->sample_rate != 0 &&
2623                     st->codec->channels != 0)
2624                     goto the_end;
2625                 break;
2626             case AVMEDIA_TYPE_VIDEO:
2627             case AVMEDIA_TYPE_SUBTITLE:
2628                 goto the_end;
2629             default:
2630                 break;
2631             }
2632         }
2633     }
2634  the_end:
2635     stream_component_close(is, start_index);
2636     stream_component_open(is, stream_index);
2637 }
2638
2639
2640 static void toggle_full_screen(void)
2641 {
2642     is_full_screen = !is_full_screen;
2643     video_open(cur_stream);
2644 }
2645
2646 static void toggle_pause(void)
2647 {
2648     if (cur_stream)
2649         stream_pause(cur_stream);
2650     step = 0;
2651 }
2652
2653 static void step_to_next_frame(void)
2654 {
2655     if (cur_stream) {
2656         /* if the stream is paused unpause it, then step */
2657         if (cur_stream->paused)
2658             stream_pause(cur_stream);
2659     }
2660     step = 1;
2661 }
2662
2663 static void toggle_audio_display(void)
2664 {
2665     if (cur_stream) {
2666         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2667         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2668         fill_rectangle(screen,
2669                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2670                     bgcolor);
2671         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2672     }
2673 }
2674
2675 /* handle an event sent by the GUI */
2676 static void event_loop(void)
2677 {
2678     SDL_Event event;
2679     double incr, pos, frac;
2680
2681     for(;;) {
2682         double x;
2683         SDL_WaitEvent(&event);
2684         switch(event.type) {
2685         case SDL_KEYDOWN:
2686             if (exit_on_keydown) {
2687                 do_exit();
2688                 break;
2689             }
2690             switch(event.key.keysym.sym) {
2691             case SDLK_ESCAPE:
2692             case SDLK_q:
2693                 do_exit();
2694                 break;
2695             case SDLK_f:
2696                 toggle_full_screen();
2697                 break;
2698             case SDLK_p:
2699             case SDLK_SPACE:
2700                 toggle_pause();
2701                 break;
2702             case SDLK_s: //S: Step to next frame
2703                 step_to_next_frame();
2704                 break;
2705             case SDLK_a:
2706                 if (cur_stream)
2707                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2708                 break;
2709             case SDLK_v:
2710                 if (cur_stream)
2711                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2712                 break;
2713             case SDLK_t:
2714                 if (cur_stream)
2715                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2716                 break;
2717             case SDLK_w:
2718                 toggle_audio_display();
2719                 break;
2720             case SDLK_LEFT:
2721                 incr = -10.0;
2722                 goto do_seek;
2723             case SDLK_RIGHT:
2724                 incr = 10.0;
2725                 goto do_seek;
2726             case SDLK_UP:
2727                 incr = 60.0;
2728                 goto do_seek;
2729             case SDLK_DOWN:
2730                 incr = -60.0;
2731             do_seek:
2732                 if (cur_stream) {
2733                     if (seek_by_bytes) {
2734                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2735                             pos= cur_stream->video_current_pos;
2736                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2737                             pos= cur_stream->audio_pkt.pos;
2738                         }else
2739                             pos = avio_tell(cur_stream->ic->pb);
2740                         if (cur_stream->ic->bit_rate)
2741                             incr *= cur_stream->ic->bit_rate / 8.0;
2742                         else
2743                             incr *= 180000.0;
2744                         pos += incr;
2745                         stream_seek(cur_stream, pos, incr, 1);
2746                     } else {
2747                         pos = get_master_clock(cur_stream);
2748                         pos += incr;
2749                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2750                     }
2751                 }
2752                 break;
2753             default:
2754                 break;
2755             }
2756             break;
2757         case SDL_MOUSEBUTTONDOWN:
2758             if (exit_on_mousedown) {
2759                 do_exit();
2760                 break;
2761             }
2762         case SDL_MOUSEMOTION:
2763             if(event.type ==SDL_MOUSEBUTTONDOWN){
2764                 x= event.button.x;
2765             }else{
2766                 if(event.motion.state != SDL_PRESSED)
2767                     break;
2768                 x= event.motion.x;
2769             }
2770             if (cur_stream) {
2771                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2772                     uint64_t size=  avio_size(cur_stream->ic->pb);
2773                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2774                 }else{
2775                     int64_t ts;
2776                     int ns, hh, mm, ss;
2777                     int tns, thh, tmm, tss;
2778                     tns = cur_stream->ic->duration/1000000LL;
2779                     thh = tns/3600;
2780                     tmm = (tns%3600)/60;
2781                     tss = (tns%60);
2782                     frac = x/cur_stream->width;
2783                     ns = frac*tns;
2784                     hh = ns/3600;
2785                     mm = (ns%3600)/60;
2786                     ss = (ns%60);
2787                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2788                             hh, mm, ss, thh, tmm, tss);
2789                     ts = frac*cur_stream->ic->duration;
2790                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2791                         ts += cur_stream->ic->start_time;
2792                     stream_seek(cur_stream, ts, 0, 0);
2793                 }
2794             }
2795             break;
2796         case SDL_VIDEORESIZE:
2797             if (cur_stream) {
2798                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2799                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2800                 screen_width = cur_stream->width = event.resize.w;
2801                 screen_height= cur_stream->height= event.resize.h;
2802             }
2803             break;
2804         case SDL_QUIT:
2805         case FF_QUIT_EVENT:
2806             do_exit();
2807             break;
2808         case FF_ALLOC_EVENT:
2809             video_open(event.user.data1);
2810             alloc_picture(event.user.data1);
2811             break;
2812         case FF_REFRESH_EVENT:
2813             video_refresh_timer(event.user.data1);
2814             cur_stream->refresh=0;
2815             break;
2816         default:
2817             break;
2818         }
2819     }
2820 }
2821
2822 static int opt_frame_size(const char *opt, const char *arg)
2823 {
2824     av_log(NULL, AV_LOG_ERROR,
2825            "Option '%s' has been removed, use private format options instead\n", opt);
2826     return AVERROR(EINVAL);
2827 }
2828
2829 static int opt_width(const char *opt, const char *arg)
2830 {
2831     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2832     return 0;
2833 }
2834
2835 static int opt_height(const char *opt, const char *arg)
2836 {
2837     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2838     return 0;
2839 }
2840
2841 static int opt_format(const char *opt, const char *arg)
2842 {
2843     file_iformat = av_find_input_format(arg);
2844     if (!file_iformat) {
2845         fprintf(stderr, "Unknown input format: %s\n", arg);
2846         return AVERROR(EINVAL);
2847     }
2848     return 0;
2849 }
2850
2851 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2852 {
2853     frame_pix_fmt = av_get_pix_fmt(arg);
2854     return 0;
2855 }
2856
2857 static int opt_sync(const char *opt, const char *arg)
2858 {
2859     if (!strcmp(arg, "audio"))
2860         av_sync_type = AV_SYNC_AUDIO_MASTER;
2861     else if (!strcmp(arg, "video"))
2862         av_sync_type = AV_SYNC_VIDEO_MASTER;
2863     else if (!strcmp(arg, "ext"))
2864         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2865     else {
2866         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2867         exit(1);
2868     }
2869     return 0;
2870 }
2871
2872 static int opt_seek(const char *opt, const char *arg)
2873 {
2874     start_time = parse_time_or_die(opt, arg, 1);
2875     return 0;
2876 }
2877
2878 static int opt_duration(const char *opt, const char *arg)
2879 {
2880     duration = parse_time_or_die(opt, arg, 1);
2881     return 0;
2882 }
2883
2884 static int opt_debug(const char *opt, const char *arg)
2885 {
2886     av_log_set_level(99);
2887     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2888     return 0;
2889 }
2890
2891 static int opt_vismv(const char *opt, const char *arg)
2892 {
2893     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2894     return 0;
2895 }
2896
2897 static int opt_thread_count(const char *opt, const char *arg)
2898 {
2899     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2900 #if !HAVE_THREADS
2901     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2902 #endif
2903     return 0;
2904 }
2905
2906 static const OptionDef options[] = {
2907 #include "cmdutils_common_opts.h"
2908     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2909     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2910     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2911     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2912     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2913     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2914     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2915     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2916     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2917     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2918     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2919     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2920     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2921     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2922     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2923     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2924     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2925     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2926     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2927     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2928     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2929     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2930     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2931     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2932     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2933     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2934     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2935     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2936     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2937     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2938     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2939     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2940     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2941     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2942     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2943     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2944     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2945 #if CONFIG_AVFILTER
2946     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2947 #endif
2948     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2949     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2950     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2951     { NULL, },
2952 };
2953
2954 static void show_usage(void)
2955 {
2956     printf("Simple media player\n");
2957     printf("usage: ffplay [options] input_file\n");
2958     printf("\n");
2959 }
2960
2961 static void show_help(void)
2962 {
2963     av_log_set_callback(log_callback_help);
2964     show_usage();
2965     show_help_options(options, "Main options:\n",
2966                       OPT_EXPERT, 0);
2967     show_help_options(options, "\nAdvanced options:\n",
2968                       OPT_EXPERT, OPT_EXPERT);
2969     printf("\n");
2970     av_opt_show2(avcodec_opts[0], NULL,
2971                  AV_OPT_FLAG_DECODING_PARAM, 0);
2972     printf("\n");
2973     av_opt_show2(avformat_opts, NULL,
2974                  AV_OPT_FLAG_DECODING_PARAM, 0);
2975 #if !CONFIG_AVFILTER
2976     printf("\n");
2977     av_opt_show2(sws_opts, NULL,
2978                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2979 #endif
2980     printf("\nWhile playing:\n"
2981            "q, ESC              quit\n"
2982            "f                   toggle full screen\n"
2983            "p, SPC              pause\n"
2984            "a                   cycle audio channel\n"
2985            "v                   cycle video channel\n"
2986            "t                   cycle subtitle channel\n"
2987            "w                   show audio waves\n"
2988            "s                   activate frame-step mode\n"
2989            "left/right          seek backward/forward 10 seconds\n"
2990            "down/up             seek backward/forward 1 minute\n"
2991            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2992            );
2993 }
2994
2995 static void opt_input_file(const char *filename)
2996 {
2997     if (input_filename) {
2998         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2999                 filename, input_filename);
3000         exit(1);
3001     }
3002     if (!strcmp(filename, "-"))
3003         filename = "pipe:";
3004     input_filename = filename;
3005 }
3006
3007 /* Called from the main */
3008 int main(int argc, char **argv)
3009 {
3010     int flags;
3011
3012     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3013
3014     /* register all codecs, demux and protocols */
3015     avcodec_register_all();
3016 #if CONFIG_AVDEVICE
3017     avdevice_register_all();
3018 #endif
3019 #if CONFIG_AVFILTER
3020     avfilter_register_all();
3021 #endif
3022     av_register_all();
3023
3024     init_opts();
3025
3026     show_banner();
3027
3028     parse_options(argc, argv, options, opt_input_file);
3029
3030     if (!input_filename) {
3031         show_usage();
3032         fprintf(stderr, "An input file must be specified\n");
3033         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3034         exit(1);
3035     }
3036
3037     if (display_disable) {
3038         video_disable = 1;
3039     }
3040     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3041 #if !defined(__MINGW32__) && !defined(__APPLE__)
3042     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3043 #endif
3044     if (SDL_Init (flags)) {
3045         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3046         exit(1);
3047     }
3048
3049     if (!display_disable) {
3050 #if HAVE_SDL_VIDEO_SIZE
3051         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3052         fs_screen_width = vi->current_w;
3053         fs_screen_height = vi->current_h;
3054 #endif
3055     }
3056
3057     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3058     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3059     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3060
3061     av_init_packet(&flush_pkt);
3062     flush_pkt.data= "FLUSH";
3063
3064     cur_stream = stream_open(input_filename, file_iformat);
3065
3066     event_loop();
3067
3068     /* never returns */
3069
3070     return 0;
3071 }