]> git.sesse.net Git - ffmpeg/blob - ffplay.c
md5: cosmetics
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/parseutils.h"
32 #include "libavutil/samplefmt.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avfilter.h"
42 # include "libavfilter/avfiltergraph.h"
43 #endif
44
45 #include "cmdutils.h"
46
47 #include <SDL.h>
48 #include <SDL_thread.h>
49
50 #ifdef __MINGW32__
51 #undef main /* We don't want SDL to override our main() */
52 #endif
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *parse_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138     int dtg_active_format;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat audio_src_fmt;
164     AVAudioConvert *reformat_ctx;
165
166     int show_audio; /* if true, display audio samples */
167     int16_t sample_array[SAMPLE_ARRAY_SIZE];
168     int sample_array_index;
169     int last_i_start;
170     RDFTContext *rdft;
171     int rdft_bits;
172     FFTSample *rdft_data;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     PtsCorrectionContext pts_ctx;
208
209 #if CONFIG_AVFILTER
210     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
211 #endif
212
213     float skip_frames;
214     float skip_frames_index;
215     int refresh;
216 } VideoState;
217
218 static void show_help(void);
219
220 /* options specified by the user */
221 static AVInputFormat *file_iformat;
222 static const char *input_filename;
223 static const char *window_title;
224 static int fs_screen_width;
225 static int fs_screen_height;
226 static int screen_width = 0;
227 static int screen_height = 0;
228 static int frame_width = 0;
229 static int frame_height = 0;
230 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int debug = 0;
245 static int debug_mv = 0;
246 static int step = 0;
247 static int thread_count = 1;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int lowres = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
256 static int error_recognition = FF_ER_CAREFUL;
257 static int error_concealment = 3;
258 static int decoder_reorder_pts= -1;
259 static int autoexit;
260 static int exit_on_keydown;
261 static int exit_on_mousedown;
262 static int loop=1;
263 static int framedrop=1;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284
285 /* packet queue handling */
286 static void packet_queue_init(PacketQueue *q)
287 {
288     memset(q, 0, sizeof(PacketQueue));
289     q->mutex = SDL_CreateMutex();
290     q->cond = SDL_CreateCond();
291     packet_queue_put(q, &flush_pkt);
292 }
293
294 static void packet_queue_flush(PacketQueue *q)
295 {
296     AVPacketList *pkt, *pkt1;
297
298     SDL_LockMutex(q->mutex);
299     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300         pkt1 = pkt->next;
301         av_free_packet(&pkt->pkt);
302         av_freep(&pkt);
303     }
304     q->last_pkt = NULL;
305     q->first_pkt = NULL;
306     q->nb_packets = 0;
307     q->size = 0;
308     SDL_UnlockMutex(q->mutex);
309 }
310
311 static void packet_queue_end(PacketQueue *q)
312 {
313     packet_queue_flush(q);
314     SDL_DestroyMutex(q->mutex);
315     SDL_DestroyCond(q->cond);
316 }
317
318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319 {
320     AVPacketList *pkt1;
321
322     /* duplicate the packet */
323     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
324         return -1;
325
326     pkt1 = av_malloc(sizeof(AVPacketList));
327     if (!pkt1)
328         return -1;
329     pkt1->pkt = *pkt;
330     pkt1->next = NULL;
331
332
333     SDL_LockMutex(q->mutex);
334
335     if (!q->last_pkt)
336
337         q->first_pkt = pkt1;
338     else
339         q->last_pkt->next = pkt1;
340     q->last_pkt = pkt1;
341     q->nb_packets++;
342     q->size += pkt1->pkt.size + sizeof(*pkt1);
343     /* XXX: should duplicate packet data in DV case */
344     SDL_CondSignal(q->cond);
345
346     SDL_UnlockMutex(q->mutex);
347     return 0;
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352     SDL_LockMutex(q->mutex);
353
354     q->abort_request = 1;
355
356     SDL_CondSignal(q->cond);
357
358     SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364     AVPacketList *pkt1;
365     int ret;
366
367     SDL_LockMutex(q->mutex);
368
369     for(;;) {
370         if (q->abort_request) {
371             ret = -1;
372             break;
373         }
374
375         pkt1 = q->first_pkt;
376         if (pkt1) {
377             q->first_pkt = pkt1->next;
378             if (!q->first_pkt)
379                 q->last_pkt = NULL;
380             q->nb_packets--;
381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
382             *pkt = pkt1->pkt;
383             av_free(pkt1);
384             ret = 1;
385             break;
386         } else if (!block) {
387             ret = 0;
388             break;
389         } else {
390             SDL_CondWait(q->cond, q->mutex);
391         }
392     }
393     SDL_UnlockMutex(q->mutex);
394     return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398                                   int x, int y, int w, int h, int color)
399 {
400     SDL_Rect rect;
401     rect.x = x;
402     rect.y = y;
403     rect.w = w;
404     rect.h = h;
405     SDL_FillRect(screen, &rect, color);
406 }
407
408 #define ALPHA_BLEND(a, oldp, newp, s)\
409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410
411 #define RGBA_IN(r, g, b, a, s)\
412 {\
413     unsigned int v = ((const uint32_t *)(s))[0];\
414     a = (v >> 24) & 0xff;\
415     r = (v >> 16) & 0xff;\
416     g = (v >> 8) & 0xff;\
417     b = v & 0xff;\
418 }
419
420 #define YUVA_IN(y, u, v, a, s, pal)\
421 {\
422     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
423     a = (val >> 24) & 0xff;\
424     y = (val >> 16) & 0xff;\
425     u = (val >> 8) & 0xff;\
426     v = val & 0xff;\
427 }
428
429 #define YUVA_OUT(d, y, u, v, a)\
430 {\
431     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 }
433
434
435 #define BPP 1
436
437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 {
439     int wrap, wrap3, width2, skip2;
440     int y, u, v, a, u1, v1, a1, w, h;
441     uint8_t *lum, *cb, *cr;
442     const uint8_t *p;
443     const uint32_t *pal;
444     int dstx, dsty, dstw, dsth;
445
446     dstw = av_clip(rect->w, 0, imgw);
447     dsth = av_clip(rect->h, 0, imgh);
448     dstx = av_clip(rect->x, 0, imgw - dstw);
449     dsty = av_clip(rect->y, 0, imgh - dsth);
450     lum = dst->data[0] + dsty * dst->linesize[0];
451     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
452     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453
454     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
455     skip2 = dstx >> 1;
456     wrap = dst->linesize[0];
457     wrap3 = rect->pict.linesize[0];
458     p = rect->pict.data[0];
459     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
460
461     if (dsty & 1) {
462         lum += dstx;
463         cb += skip2;
464         cr += skip2;
465
466         if (dstx & 1) {
467             YUVA_IN(y, u, v, a, p, pal);
468             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
469             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
470             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
471             cb++;
472             cr++;
473             lum++;
474             p += BPP;
475         }
476         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
477             YUVA_IN(y, u, v, a, p, pal);
478             u1 = u;
479             v1 = v;
480             a1 = a;
481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482
483             YUVA_IN(y, u, v, a, p + BPP, pal);
484             u1 += u;
485             v1 += v;
486             a1 += a;
487             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
488             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
489             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
490             cb++;
491             cr++;
492             p += 2 * BPP;
493             lum += 2;
494         }
495         if (w) {
496             YUVA_IN(y, u, v, a, p, pal);
497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
499             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500             p++;
501             lum++;
502         }
503         p += wrap3 - dstw * BPP;
504         lum += wrap - dstw - dstx;
505         cb += dst->linesize[1] - width2 - skip2;
506         cr += dst->linesize[2] - width2 - skip2;
507     }
508     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
509         lum += dstx;
510         cb += skip2;
511         cr += skip2;
512
513         if (dstx & 1) {
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 = u;
516             v1 = v;
517             a1 = a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519             p += wrap3;
520             lum += wrap;
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 += u;
523             v1 += v;
524             a1 += a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528             cb++;
529             cr++;
530             p += -wrap3 + BPP;
531             lum += -wrap + 1;
532         }
533         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
534             YUVA_IN(y, u, v, a, p, pal);
535             u1 = u;
536             v1 = v;
537             a1 = a;
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
540             YUVA_IN(y, u, v, a, p + BPP, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545             p += wrap3;
546             lum += wrap;
547
548             YUVA_IN(y, u, v, a, p, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
554             YUVA_IN(y, u, v, a, p + BPP, pal);
555             u1 += u;
556             v1 += v;
557             a1 += a;
558             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559
560             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
561             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562
563             cb++;
564             cr++;
565             p += -wrap3 + 2 * BPP;
566             lum += -wrap + 2;
567         }
568         if (w) {
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 = u;
571             v1 = v;
572             a1 = a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574             p += wrap3;
575             lum += wrap;
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += -wrap3 + BPP;
586             lum += -wrap + 1;
587         }
588         p += wrap3 + (wrap3 - dstw * BPP);
589         lum += wrap + (wrap - dstw - dstx);
590         cb += dst->linesize[1] - width2 - skip2;
591         cr += dst->linesize[2] - width2 - skip2;
592     }
593     /* handle odd height */
594     if (h) {
595         lum += dstx;
596         cb += skip2;
597         cr += skip2;
598
599         if (dstx & 1) {
600             YUVA_IN(y, u, v, a, p, pal);
601             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
603             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
604             cb++;
605             cr++;
606             lum++;
607             p += BPP;
608         }
609         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
616             YUVA_IN(y, u, v, a, p + BPP, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
623             cb++;
624             cr++;
625             p += 2 * BPP;
626             lum += 2;
627         }
628         if (w) {
629             YUVA_IN(y, u, v, a, p, pal);
630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
632             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633         }
634     }
635 }
636
637 static void free_subpicture(SubPicture *sp)
638 {
639     avsubtitle_free(&sp->sub);
640 }
641
642 static void video_image_display(VideoState *is)
643 {
644     VideoPicture *vp;
645     SubPicture *sp;
646     AVPicture pict;
647     float aspect_ratio;
648     int width, height, x, y;
649     SDL_Rect rect;
650     int i;
651
652     vp = &is->pictq[is->pictq_rindex];
653     if (vp->bmp) {
654 #if CONFIG_AVFILTER
655          if (vp->picref->video->pixel_aspect.num == 0)
656              aspect_ratio = 0;
657          else
658              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
659 #else
660
661         /* XXX: use variable in the frame */
662         if (is->video_st->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664         else if (is->video_st->codec->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666         else
667             aspect_ratio = 0;
668 #endif
669         if (aspect_ratio <= 0.0)
670             aspect_ratio = 1.0;
671         aspect_ratio *= (float)vp->width / (float)vp->height;
672
673         if (is->subtitle_st)
674         {
675             if (is->subpq_size > 0)
676             {
677                 sp = &is->subpq[is->subpq_rindex];
678
679                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
680                 {
681                     SDL_LockYUVOverlay (vp->bmp);
682
683                     pict.data[0] = vp->bmp->pixels[0];
684                     pict.data[1] = vp->bmp->pixels[2];
685                     pict.data[2] = vp->bmp->pixels[1];
686
687                     pict.linesize[0] = vp->bmp->pitches[0];
688                     pict.linesize[1] = vp->bmp->pitches[2];
689                     pict.linesize[2] = vp->bmp->pitches[1];
690
691                     for (i = 0; i < sp->sub.num_rects; i++)
692                         blend_subrect(&pict, sp->sub.rects[i],
693                                       vp->bmp->w, vp->bmp->h);
694
695                     SDL_UnlockYUVOverlay (vp->bmp);
696                 }
697             }
698         }
699
700
701         /* XXX: we suppose the screen has a 1.0 pixel ratio */
702         height = is->height;
703         width = ((int)rint(height * aspect_ratio)) & ~1;
704         if (width > is->width) {
705             width = is->width;
706             height = ((int)rint(width / aspect_ratio)) & ~1;
707         }
708         x = (is->width - width) / 2;
709         y = (is->height - height) / 2;
710         is->no_background = 0;
711         rect.x = is->xleft + x;
712         rect.y = is->ytop  + y;
713         rect.w = width;
714         rect.h = height;
715         SDL_DisplayYUVOverlay(vp->bmp, &rect);
716     }
717 }
718
719 /* get the current audio output buffer size, in samples. With SDL, we
720    cannot have a precise information */
721 static int audio_write_get_buf_size(VideoState *is)
722 {
723     return is->audio_buf_size - is->audio_buf_index;
724 }
725
726 static inline int compute_mod(int a, int b)
727 {
728     a = a % b;
729     if (a >= 0)
730         return a;
731     else
732         return a + b;
733 }
734
735 static void video_audio_display(VideoState *s)
736 {
737     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
738     int ch, channels, h, h2, bgcolor, fgcolor;
739     int16_t time_diff;
740     int rdft_bits, nb_freq;
741
742     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
743         ;
744     nb_freq= 1<<(rdft_bits-1);
745
746     /* compute display index : center on currently output samples */
747     channels = s->audio_st->codec->channels;
748     nb_display_channels = channels;
749     if (!s->paused) {
750         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
751         n = 2 * channels;
752         delay = audio_write_get_buf_size(s);
753         delay /= n;
754
755         /* to be more precise, we take into account the time spent since
756            the last buffer computation */
757         if (audio_callback_time) {
758             time_diff = av_gettime() - audio_callback_time;
759             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
760         }
761
762         delay += 2*data_used;
763         if (delay < data_used)
764             delay = data_used;
765
766         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
767         if(s->show_audio==1){
768             h= INT_MIN;
769             for(i=0; i<1000; i+=channels){
770                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
771                 int a= s->sample_array[idx];
772                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
773                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
774                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
775                 int score= a-d;
776                 if(h<score && (b^c)<0){
777                     h= score;
778                     i_start= idx;
779                 }
780             }
781         }
782
783         s->last_i_start = i_start;
784     } else {
785         i_start = s->last_i_start;
786     }
787
788     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
789     if(s->show_audio==1){
790         fill_rectangle(screen,
791                        s->xleft, s->ytop, s->width, s->height,
792                        bgcolor);
793
794         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
795
796         /* total height for one channel */
797         h = s->height / nb_display_channels;
798         /* graph height / 2 */
799         h2 = (h * 9) / 20;
800         for(ch = 0;ch < nb_display_channels; ch++) {
801             i = i_start + ch;
802             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
803             for(x = 0; x < s->width; x++) {
804                 y = (s->sample_array[i] * h2) >> 15;
805                 if (y < 0) {
806                     y = -y;
807                     ys = y1 - y;
808                 } else {
809                     ys = y1;
810                 }
811                 fill_rectangle(screen,
812                                s->xleft + x, ys, 1, y,
813                                fgcolor);
814                 i += channels;
815                 if (i >= SAMPLE_ARRAY_SIZE)
816                     i -= SAMPLE_ARRAY_SIZE;
817             }
818         }
819
820         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
821
822         for(ch = 1;ch < nb_display_channels; ch++) {
823             y = s->ytop + ch * h;
824             fill_rectangle(screen,
825                            s->xleft, y, s->width, 1,
826                            fgcolor);
827         }
828         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
829     }else{
830         nb_display_channels= FFMIN(nb_display_channels, 2);
831         if(rdft_bits != s->rdft_bits){
832             av_rdft_end(s->rdft);
833             av_free(s->rdft_data);
834             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
835             s->rdft_bits= rdft_bits;
836             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
837         }
838         {
839             FFTSample *data[2];
840             for(ch = 0;ch < nb_display_channels; ch++) {
841                 data[ch] = s->rdft_data + 2*nb_freq*ch;
842                 i = i_start + ch;
843                 for(x = 0; x < 2*nb_freq; x++) {
844                     double w= (x-nb_freq)*(1.0/nb_freq);
845                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
846                     i += channels;
847                     if (i >= SAMPLE_ARRAY_SIZE)
848                         i -= SAMPLE_ARRAY_SIZE;
849                 }
850                 av_rdft_calc(s->rdft, data[ch]);
851             }
852             //least efficient way to do this, we should of course directly access it but its more than fast enough
853             for(y=0; y<s->height; y++){
854                 double w= 1/sqrt(nb_freq);
855                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
856                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
857                        + data[1][2*y+1]*data[1][2*y+1])) : a;
858                 a= FFMIN(a,255);
859                 b= FFMIN(b,255);
860                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
861
862                 fill_rectangle(screen,
863                             s->xpos, s->height-y, 1, 1,
864                             fgcolor);
865             }
866         }
867         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
868         s->xpos++;
869         if(s->xpos >= s->width)
870             s->xpos= s->xleft;
871     }
872 }
873
874 static int video_open(VideoState *is){
875     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
876     int w,h;
877
878     if(is_full_screen) flags |= SDL_FULLSCREEN;
879     else               flags |= SDL_RESIZABLE;
880
881     if (is_full_screen && fs_screen_width) {
882         w = fs_screen_width;
883         h = fs_screen_height;
884     } else if(!is_full_screen && screen_width){
885         w = screen_width;
886         h = screen_height;
887 #if CONFIG_AVFILTER
888     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
889         w = is->out_video_filter->inputs[0]->w;
890         h = is->out_video_filter->inputs[0]->h;
891 #else
892     }else if (is->video_st && is->video_st->codec->width){
893         w = is->video_st->codec->width;
894         h = is->video_st->codec->height;
895 #endif
896     } else {
897         w = 640;
898         h = 480;
899     }
900     if(screen && is->width == screen->w && screen->w == w
901        && is->height== screen->h && screen->h == h)
902         return 0;
903
904 #ifndef __APPLE__
905     screen = SDL_SetVideoMode(w, h, 0, flags);
906 #else
907     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
908     screen = SDL_SetVideoMode(w, h, 24, flags);
909 #endif
910     if (!screen) {
911         fprintf(stderr, "SDL: could not set video mode - exiting\n");
912         return -1;
913     }
914     if (!window_title)
915         window_title = input_filename;
916     SDL_WM_SetCaption(window_title, window_title);
917
918     is->width = screen->w;
919     is->height = screen->h;
920
921     return 0;
922 }
923
924 /* display the current picture, if any */
925 static void video_display(VideoState *is)
926 {
927     if(!screen)
928         video_open(cur_stream);
929     if (is->audio_st && is->show_audio)
930         video_audio_display(is);
931     else if (is->video_st)
932         video_image_display(is);
933 }
934
935 static int refresh_thread(void *opaque)
936 {
937     VideoState *is= opaque;
938     while(!is->abort_request){
939         SDL_Event event;
940         event.type = FF_REFRESH_EVENT;
941         event.user.data1 = opaque;
942         if(!is->refresh){
943             is->refresh=1;
944             SDL_PushEvent(&event);
945         }
946         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
947     }
948     return 0;
949 }
950
951 /* get the current audio clock value */
952 static double get_audio_clock(VideoState *is)
953 {
954     double pts;
955     int hw_buf_size, bytes_per_sec;
956     pts = is->audio_clock;
957     hw_buf_size = audio_write_get_buf_size(is);
958     bytes_per_sec = 0;
959     if (is->audio_st) {
960         bytes_per_sec = is->audio_st->codec->sample_rate *
961             2 * is->audio_st->codec->channels;
962     }
963     if (bytes_per_sec)
964         pts -= (double)hw_buf_size / bytes_per_sec;
965     return pts;
966 }
967
968 /* get the current video clock value */
969 static double get_video_clock(VideoState *is)
970 {
971     if (is->paused) {
972         return is->video_current_pts;
973     } else {
974         return is->video_current_pts_drift + av_gettime() / 1000000.0;
975     }
976 }
977
978 /* get the current external clock value */
979 static double get_external_clock(VideoState *is)
980 {
981     int64_t ti;
982     ti = av_gettime();
983     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
984 }
985
986 /* get the current master clock value */
987 static double get_master_clock(VideoState *is)
988 {
989     double val;
990
991     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
992         if (is->video_st)
993             val = get_video_clock(is);
994         else
995             val = get_audio_clock(is);
996     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
997         if (is->audio_st)
998             val = get_audio_clock(is);
999         else
1000             val = get_video_clock(is);
1001     } else {
1002         val = get_external_clock(is);
1003     }
1004     return val;
1005 }
1006
1007 /* seek in the stream */
1008 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1009 {
1010     if (!is->seek_req) {
1011         is->seek_pos = pos;
1012         is->seek_rel = rel;
1013         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1014         if (seek_by_bytes)
1015             is->seek_flags |= AVSEEK_FLAG_BYTE;
1016         is->seek_req = 1;
1017     }
1018 }
1019
1020 /* pause or resume the video */
1021 static void stream_pause(VideoState *is)
1022 {
1023     if (is->paused) {
1024         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1025         if(is->read_pause_return != AVERROR(ENOSYS)){
1026             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1027         }
1028         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1029     }
1030     is->paused = !is->paused;
1031 }
1032
1033 static double compute_target_time(double frame_current_pts, VideoState *is)
1034 {
1035     double delay, sync_threshold, diff;
1036
1037     /* compute nominal delay */
1038     delay = frame_current_pts - is->frame_last_pts;
1039     if (delay <= 0 || delay >= 10.0) {
1040         /* if incorrect delay, use previous one */
1041         delay = is->frame_last_delay;
1042     } else {
1043         is->frame_last_delay = delay;
1044     }
1045     is->frame_last_pts = frame_current_pts;
1046
1047     /* update delay to follow master synchronisation source */
1048     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1049          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1050         /* if video is slave, we try to correct big delays by
1051            duplicating or deleting a frame */
1052         diff = get_video_clock(is) - get_master_clock(is);
1053
1054         /* skip or repeat frame. We take into account the
1055            delay to compute the threshold. I still don't know
1056            if it is the best guess */
1057         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1058         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1059             if (diff <= -sync_threshold)
1060                 delay = 0;
1061             else if (diff >= sync_threshold)
1062                 delay = 2 * delay;
1063         }
1064     }
1065     is->frame_timer += delay;
1066
1067     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1068             delay, frame_current_pts, -diff);
1069
1070     return is->frame_timer;
1071 }
1072
1073 /* called to display each frame */
1074 static void video_refresh_timer(void *opaque)
1075 {
1076     VideoState *is = opaque;
1077     VideoPicture *vp;
1078
1079     SubPicture *sp, *sp2;
1080
1081     if (is->video_st) {
1082 retry:
1083         if (is->pictq_size == 0) {
1084             //nothing to do, no picture to display in the que
1085         } else {
1086             double time= av_gettime()/1000000.0;
1087             double next_target;
1088             /* dequeue the picture */
1089             vp = &is->pictq[is->pictq_rindex];
1090
1091             if(time < vp->target_clock)
1092                 return;
1093             /* update current video pts */
1094             is->video_current_pts = vp->pts;
1095             is->video_current_pts_drift = is->video_current_pts - time;
1096             is->video_current_pos = vp->pos;
1097             if(is->pictq_size > 1){
1098                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1099                 assert(nextvp->target_clock >= vp->target_clock);
1100                 next_target= nextvp->target_clock;
1101             }else{
1102                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1103             }
1104             if(framedrop && time > next_target){
1105                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1106                 if(is->pictq_size > 1 || time > next_target + 0.5){
1107                     /* update queue size and signal for next picture */
1108                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1109                         is->pictq_rindex = 0;
1110
1111                     SDL_LockMutex(is->pictq_mutex);
1112                     is->pictq_size--;
1113                     SDL_CondSignal(is->pictq_cond);
1114                     SDL_UnlockMutex(is->pictq_mutex);
1115                     goto retry;
1116                 }
1117             }
1118
1119             if(is->subtitle_st) {
1120                 if (is->subtitle_stream_changed) {
1121                     SDL_LockMutex(is->subpq_mutex);
1122
1123                     while (is->subpq_size) {
1124                         free_subpicture(&is->subpq[is->subpq_rindex]);
1125
1126                         /* update queue size and signal for next picture */
1127                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1128                             is->subpq_rindex = 0;
1129
1130                         is->subpq_size--;
1131                     }
1132                     is->subtitle_stream_changed = 0;
1133
1134                     SDL_CondSignal(is->subpq_cond);
1135                     SDL_UnlockMutex(is->subpq_mutex);
1136                 } else {
1137                     if (is->subpq_size > 0) {
1138                         sp = &is->subpq[is->subpq_rindex];
1139
1140                         if (is->subpq_size > 1)
1141                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1142                         else
1143                             sp2 = NULL;
1144
1145                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1146                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1147                         {
1148                             free_subpicture(sp);
1149
1150                             /* update queue size and signal for next picture */
1151                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1152                                 is->subpq_rindex = 0;
1153
1154                             SDL_LockMutex(is->subpq_mutex);
1155                             is->subpq_size--;
1156                             SDL_CondSignal(is->subpq_cond);
1157                             SDL_UnlockMutex(is->subpq_mutex);
1158                         }
1159                     }
1160                 }
1161             }
1162
1163             /* display picture */
1164             if (!display_disable)
1165                 video_display(is);
1166
1167             /* update queue size and signal for next picture */
1168             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1169                 is->pictq_rindex = 0;
1170
1171             SDL_LockMutex(is->pictq_mutex);
1172             is->pictq_size--;
1173             SDL_CondSignal(is->pictq_cond);
1174             SDL_UnlockMutex(is->pictq_mutex);
1175         }
1176     } else if (is->audio_st) {
1177         /* draw the next audio frame */
1178
1179         /* if only audio stream, then display the audio bars (better
1180            than nothing, just to test the implementation */
1181
1182         /* display picture */
1183         if (!display_disable)
1184             video_display(is);
1185     }
1186     if (show_status) {
1187         static int64_t last_time;
1188         int64_t cur_time;
1189         int aqsize, vqsize, sqsize;
1190         double av_diff;
1191
1192         cur_time = av_gettime();
1193         if (!last_time || (cur_time - last_time) >= 30000) {
1194             aqsize = 0;
1195             vqsize = 0;
1196             sqsize = 0;
1197             if (is->audio_st)
1198                 aqsize = is->audioq.size;
1199             if (is->video_st)
1200                 vqsize = is->videoq.size;
1201             if (is->subtitle_st)
1202                 sqsize = is->subtitleq.size;
1203             av_diff = 0;
1204             if (is->audio_st && is->video_st)
1205                 av_diff = get_audio_clock(is) - get_video_clock(is);
1206             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1207                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1208             fflush(stdout);
1209             last_time = cur_time;
1210         }
1211     }
1212 }
1213
1214 static void stream_close(VideoState *is)
1215 {
1216     VideoPicture *vp;
1217     int i;
1218     /* XXX: use a special url_shutdown call to abort parse cleanly */
1219     is->abort_request = 1;
1220     SDL_WaitThread(is->parse_tid, NULL);
1221     SDL_WaitThread(is->refresh_tid, NULL);
1222
1223     /* free all pictures */
1224     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1225         vp = &is->pictq[i];
1226 #if CONFIG_AVFILTER
1227         if (vp->picref) {
1228             avfilter_unref_buffer(vp->picref);
1229             vp->picref = NULL;
1230         }
1231 #endif
1232         if (vp->bmp) {
1233             SDL_FreeYUVOverlay(vp->bmp);
1234             vp->bmp = NULL;
1235         }
1236     }
1237     SDL_DestroyMutex(is->pictq_mutex);
1238     SDL_DestroyCond(is->pictq_cond);
1239     SDL_DestroyMutex(is->subpq_mutex);
1240     SDL_DestroyCond(is->subpq_cond);
1241 #if !CONFIG_AVFILTER
1242     if (is->img_convert_ctx)
1243         sws_freeContext(is->img_convert_ctx);
1244 #endif
1245     av_free(is);
1246 }
1247
1248 static void do_exit(void)
1249 {
1250     if (cur_stream) {
1251         stream_close(cur_stream);
1252         cur_stream = NULL;
1253     }
1254     uninit_opts();
1255 #if CONFIG_AVFILTER
1256     avfilter_uninit();
1257 #endif
1258     if (show_status)
1259         printf("\n");
1260     SDL_Quit();
1261     av_log(NULL, AV_LOG_QUIET, "");
1262     exit(0);
1263 }
1264
1265 /* allocate a picture (needs to do that in main thread to avoid
1266    potential locking problems */
1267 static void alloc_picture(void *opaque)
1268 {
1269     VideoState *is = opaque;
1270     VideoPicture *vp;
1271
1272     vp = &is->pictq[is->pictq_windex];
1273
1274     if (vp->bmp)
1275         SDL_FreeYUVOverlay(vp->bmp);
1276
1277 #if CONFIG_AVFILTER
1278     if (vp->picref)
1279         avfilter_unref_buffer(vp->picref);
1280     vp->picref = NULL;
1281
1282     vp->width   = is->out_video_filter->inputs[0]->w;
1283     vp->height  = is->out_video_filter->inputs[0]->h;
1284     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1285 #else
1286     vp->width   = is->video_st->codec->width;
1287     vp->height  = is->video_st->codec->height;
1288     vp->pix_fmt = is->video_st->codec->pix_fmt;
1289 #endif
1290
1291     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1292                                    SDL_YV12_OVERLAY,
1293                                    screen);
1294     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1295         /* SDL allocates a buffer smaller than requested if the video
1296          * overlay hardware is unable to support the requested size. */
1297         fprintf(stderr, "Error: the video system does not support an image\n"
1298                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1299                         "to reduce the image size.\n", vp->width, vp->height );
1300         do_exit();
1301     }
1302
1303     SDL_LockMutex(is->pictq_mutex);
1304     vp->allocated = 1;
1305     SDL_CondSignal(is->pictq_cond);
1306     SDL_UnlockMutex(is->pictq_mutex);
1307 }
1308
1309 /**
1310  *
1311  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1312  */
1313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1314 {
1315     VideoPicture *vp;
1316 #if CONFIG_AVFILTER
1317     AVPicture pict_src;
1318 #else
1319     int dst_pix_fmt = PIX_FMT_YUV420P;
1320 #endif
1321     /* wait until we have space to put a new picture */
1322     SDL_LockMutex(is->pictq_mutex);
1323
1324     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1325         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     /* alloc or resize hardware picture buffer */
1339     if (!vp->bmp ||
1340 #if CONFIG_AVFILTER
1341         vp->width  != is->out_video_filter->inputs[0]->w ||
1342         vp->height != is->out_video_filter->inputs[0]->h) {
1343 #else
1344         vp->width != is->video_st->codec->width ||
1345         vp->height != is->video_st->codec->height) {
1346 #endif
1347         SDL_Event event;
1348
1349         vp->allocated = 0;
1350
1351         /* the allocation must be done in the main thread to avoid
1352            locking problems */
1353         event.type = FF_ALLOC_EVENT;
1354         event.user.data1 = is;
1355         SDL_PushEvent(&event);
1356
1357         /* wait until the picture is allocated */
1358         SDL_LockMutex(is->pictq_mutex);
1359         while (!vp->allocated && !is->videoq.abort_request) {
1360             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1361         }
1362         SDL_UnlockMutex(is->pictq_mutex);
1363
1364         if (is->videoq.abort_request)
1365             return -1;
1366     }
1367
1368     /* if the frame is not skipped, then display it */
1369     if (vp->bmp) {
1370         AVPicture pict;
1371 #if CONFIG_AVFILTER
1372         if(vp->picref)
1373             avfilter_unref_buffer(vp->picref);
1374         vp->picref = src_frame->opaque;
1375 #endif
1376
1377         /* get a pointer on the bitmap */
1378         SDL_LockYUVOverlay (vp->bmp);
1379
1380         memset(&pict,0,sizeof(AVPicture));
1381         pict.data[0] = vp->bmp->pixels[0];
1382         pict.data[1] = vp->bmp->pixels[2];
1383         pict.data[2] = vp->bmp->pixels[1];
1384
1385         pict.linesize[0] = vp->bmp->pitches[0];
1386         pict.linesize[1] = vp->bmp->pitches[2];
1387         pict.linesize[2] = vp->bmp->pitches[1];
1388
1389 #if CONFIG_AVFILTER
1390         pict_src.data[0] = src_frame->data[0];
1391         pict_src.data[1] = src_frame->data[1];
1392         pict_src.data[2] = src_frame->data[2];
1393
1394         pict_src.linesize[0] = src_frame->linesize[0];
1395         pict_src.linesize[1] = src_frame->linesize[1];
1396         pict_src.linesize[2] = src_frame->linesize[2];
1397
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, &pict_src,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418
1419         /* now we can update the picture count */
1420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421             is->pictq_windex = 0;
1422         SDL_LockMutex(is->pictq_mutex);
1423         vp->target_clock= compute_target_time(vp->pts, is);
1424
1425         is->pictq_size++;
1426         SDL_UnlockMutex(is->pictq_mutex);
1427     }
1428     return 0;
1429 }
1430
1431 /**
1432  * compute the exact PTS for the picture if it is omitted in the stream
1433  * @param pts1 the dts of the pkt / pts of the frame
1434  */
1435 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1436 {
1437     double frame_delay, pts;
1438
1439     pts = pts1;
1440
1441     if (pts != 0) {
1442         /* update video clock with pts, if present */
1443         is->video_clock = pts;
1444     } else {
1445         pts = is->video_clock;
1446     }
1447     /* update video clock for next frame */
1448     frame_delay = av_q2d(is->video_st->codec->time_base);
1449     /* for MPEG2, the frame can be repeated, so we update the
1450        clock accordingly */
1451     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1452     is->video_clock += frame_delay;
1453
1454     return queue_picture(is, src_frame, pts, pos);
1455 }
1456
1457 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1458 {
1459     int len1, got_picture, i;
1460
1461     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1462         return -1;
1463
1464     if (pkt->data == flush_pkt.data) {
1465         avcodec_flush_buffers(is->video_st->codec);
1466
1467         SDL_LockMutex(is->pictq_mutex);
1468         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1469         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1470             is->pictq[i].target_clock= 0;
1471         }
1472         while (is->pictq_size && !is->videoq.abort_request) {
1473             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1474         }
1475         is->video_current_pos = -1;
1476         SDL_UnlockMutex(is->pictq_mutex);
1477
1478         init_pts_correction(&is->pts_ctx);
1479         is->frame_last_pts = AV_NOPTS_VALUE;
1480         is->frame_last_delay = 0;
1481         is->frame_timer = (double)av_gettime() / 1000000.0;
1482         is->skip_frames = 1;
1483         is->skip_frames_index = 0;
1484         return 0;
1485     }
1486
1487     len1 = avcodec_decode_video2(is->video_st->codec,
1488                                  frame, &got_picture,
1489                                  pkt);
1490
1491     if (got_picture) {
1492         if (decoder_reorder_pts == -1) {
1493             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1494         } else if (decoder_reorder_pts) {
1495             *pts = frame->pkt_pts;
1496         } else {
1497             *pts = frame->pkt_dts;
1498         }
1499
1500         if (*pts == AV_NOPTS_VALUE) {
1501             *pts = 0;
1502         }
1503
1504         is->skip_frames_index += 1;
1505         if(is->skip_frames_index >= is->skip_frames){
1506             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1507             return 1;
1508         }
1509
1510     }
1511     return 0;
1512 }
1513
1514 #if CONFIG_AVFILTER
1515 typedef struct {
1516     VideoState *is;
1517     AVFrame *frame;
1518     int use_dr1;
1519 } FilterPriv;
1520
1521 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1522 {
1523     AVFilterContext *ctx = codec->opaque;
1524     AVFilterBufferRef  *ref;
1525     int perms = AV_PERM_WRITE;
1526     int i, w, h, stride[4];
1527     unsigned edge;
1528     int pixel_size;
1529
1530     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1531         perms |= AV_PERM_NEG_LINESIZES;
1532
1533     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1534         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1535         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1536         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1537     }
1538     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1539
1540     w = codec->width;
1541     h = codec->height;
1542     avcodec_align_dimensions2(codec, &w, &h, stride);
1543     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1544     w += edge << 1;
1545     h += edge << 1;
1546
1547     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1548         return -1;
1549
1550     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1551     ref->video->w = codec->width;
1552     ref->video->h = codec->height;
1553     for(i = 0; i < 4; i ++) {
1554         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1555         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1556
1557         if (ref->data[i]) {
1558             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1559         }
1560         pic->data[i]     = ref->data[i];
1561         pic->linesize[i] = ref->linesize[i];
1562     }
1563     pic->opaque = ref;
1564     pic->age    = INT_MAX;
1565     pic->type   = FF_BUFFER_TYPE_USER;
1566     pic->reordered_opaque = codec->reordered_opaque;
1567     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1568     else           pic->pkt_pts = AV_NOPTS_VALUE;
1569     return 0;
1570 }
1571
1572 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1573 {
1574     memset(pic->data, 0, sizeof(pic->data));
1575     avfilter_unref_buffer(pic->opaque);
1576 }
1577
1578 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1579 {
1580     AVFilterBufferRef *ref = pic->opaque;
1581
1582     if (pic->data[0] == NULL) {
1583         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1584         return codec->get_buffer(codec, pic);
1585     }
1586
1587     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1588         (codec->pix_fmt != ref->format)) {
1589         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1590         return -1;
1591     }
1592
1593     pic->reordered_opaque = codec->reordered_opaque;
1594     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1595     else           pic->pkt_pts = AV_NOPTS_VALUE;
1596     return 0;
1597 }
1598
1599 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1600 {
1601     FilterPriv *priv = ctx->priv;
1602     AVCodecContext *codec;
1603     if(!opaque) return -1;
1604
1605     priv->is = opaque;
1606     codec    = priv->is->video_st->codec;
1607     codec->opaque = ctx;
1608     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1609         priv->use_dr1 = 1;
1610         codec->get_buffer     = input_get_buffer;
1611         codec->release_buffer = input_release_buffer;
1612         codec->reget_buffer   = input_reget_buffer;
1613         codec->thread_safe_callbacks = 1;
1614     }
1615
1616     priv->frame = avcodec_alloc_frame();
1617
1618     return 0;
1619 }
1620
1621 static void input_uninit(AVFilterContext *ctx)
1622 {
1623     FilterPriv *priv = ctx->priv;
1624     av_free(priv->frame);
1625 }
1626
1627 static int input_request_frame(AVFilterLink *link)
1628 {
1629     FilterPriv *priv = link->src->priv;
1630     AVFilterBufferRef *picref;
1631     int64_t pts = 0;
1632     AVPacket pkt;
1633     int ret;
1634
1635     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1636         av_free_packet(&pkt);
1637     if (ret < 0)
1638         return -1;
1639
1640     if(priv->use_dr1) {
1641         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1642     } else {
1643         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1644         av_image_copy(picref->data, picref->linesize,
1645                       priv->frame->data, priv->frame->linesize,
1646                       picref->format, link->w, link->h);
1647     }
1648     av_free_packet(&pkt);
1649
1650     picref->pts = pts;
1651     picref->pos = pkt.pos;
1652     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1653     avfilter_start_frame(link, picref);
1654     avfilter_draw_slice(link, 0, link->h, 1);
1655     avfilter_end_frame(link);
1656
1657     return 0;
1658 }
1659
1660 static int input_query_formats(AVFilterContext *ctx)
1661 {
1662     FilterPriv *priv = ctx->priv;
1663     enum PixelFormat pix_fmts[] = {
1664         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1665     };
1666
1667     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1668     return 0;
1669 }
1670
1671 static int input_config_props(AVFilterLink *link)
1672 {
1673     FilterPriv *priv  = link->src->priv;
1674     AVCodecContext *c = priv->is->video_st->codec;
1675
1676     link->w = c->width;
1677     link->h = c->height;
1678     link->time_base = priv->is->video_st->time_base;
1679
1680     return 0;
1681 }
1682
1683 static AVFilter input_filter =
1684 {
1685     .name      = "ffplay_input",
1686
1687     .priv_size = sizeof(FilterPriv),
1688
1689     .init      = input_init,
1690     .uninit    = input_uninit,
1691
1692     .query_formats = input_query_formats,
1693
1694     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1695     .outputs   = (AVFilterPad[]) {{ .name = "default",
1696                                     .type = AVMEDIA_TYPE_VIDEO,
1697                                     .request_frame = input_request_frame,
1698                                     .config_props  = input_config_props, },
1699                                   { .name = NULL }},
1700 };
1701
1702 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1703 {
1704     char sws_flags_str[128];
1705     int ret;
1706     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1707     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1708     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1709     graph->scale_sws_opts = av_strdup(sws_flags_str);
1710
1711     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1712                                             NULL, is, graph)) < 0)
1713         goto the_end;
1714     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1715                                             NULL, &ffsink_ctx, graph)) < 0)
1716         goto the_end;
1717
1718     if(vfilters) {
1719         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1720         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1721
1722         outputs->name    = av_strdup("in");
1723         outputs->filter_ctx = filt_src;
1724         outputs->pad_idx = 0;
1725         outputs->next    = NULL;
1726
1727         inputs->name    = av_strdup("out");
1728         inputs->filter_ctx = filt_out;
1729         inputs->pad_idx = 0;
1730         inputs->next    = NULL;
1731
1732         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1733             goto the_end;
1734         av_freep(&vfilters);
1735     } else {
1736         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1737             goto the_end;
1738     }
1739
1740     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1741         goto the_end;
1742
1743     is->out_video_filter = filt_out;
1744 the_end:
1745     return ret;
1746 }
1747
1748 #endif  /* CONFIG_AVFILTER */
1749
1750 static int video_thread(void *arg)
1751 {
1752     VideoState *is = arg;
1753     AVFrame *frame= avcodec_alloc_frame();
1754     int64_t pts_int;
1755     double pts;
1756     int ret;
1757
1758 #if CONFIG_AVFILTER
1759     AVFilterGraph *graph = avfilter_graph_alloc();
1760     AVFilterContext *filt_out = NULL;
1761     int64_t pos;
1762
1763     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1764         goto the_end;
1765     filt_out = is->out_video_filter;
1766 #endif
1767
1768     for(;;) {
1769 #if !CONFIG_AVFILTER
1770         AVPacket pkt;
1771 #else
1772         AVFilterBufferRef *picref;
1773         AVRational tb;
1774 #endif
1775         while (is->paused && !is->videoq.abort_request)
1776             SDL_Delay(10);
1777 #if CONFIG_AVFILTER
1778         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1779         if (picref) {
1780             pts_int = picref->pts;
1781             pos     = picref->pos;
1782             frame->opaque = picref;
1783         }
1784
1785         if (av_cmp_q(tb, is->video_st->time_base)) {
1786             av_unused int64_t pts1 = pts_int;
1787             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1788             av_dlog(NULL, "video_thread(): "
1789                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1790                     tb.num, tb.den, pts1,
1791                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1792         }
1793 #else
1794         ret = get_video_frame(is, frame, &pts_int, &pkt);
1795 #endif
1796
1797         if (ret < 0) goto the_end;
1798
1799         if (!ret)
1800             continue;
1801
1802         pts = pts_int*av_q2d(is->video_st->time_base);
1803
1804 #if CONFIG_AVFILTER
1805         ret = output_picture2(is, frame, pts, pos);
1806 #else
1807         ret = output_picture2(is, frame, pts,  pkt.pos);
1808         av_free_packet(&pkt);
1809 #endif
1810         if (ret < 0)
1811             goto the_end;
1812
1813         if (step)
1814             if (cur_stream)
1815                 stream_pause(cur_stream);
1816     }
1817  the_end:
1818 #if CONFIG_AVFILTER
1819     avfilter_graph_free(&graph);
1820 #endif
1821     av_free(frame);
1822     return 0;
1823 }
1824
1825 static int subtitle_thread(void *arg)
1826 {
1827     VideoState *is = arg;
1828     SubPicture *sp;
1829     AVPacket pkt1, *pkt = &pkt1;
1830     int len1, got_subtitle;
1831     double pts;
1832     int i, j;
1833     int r, g, b, y, u, v, a;
1834
1835     for(;;) {
1836         while (is->paused && !is->subtitleq.abort_request) {
1837             SDL_Delay(10);
1838         }
1839         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1840             break;
1841
1842         if(pkt->data == flush_pkt.data){
1843             avcodec_flush_buffers(is->subtitle_st->codec);
1844             continue;
1845         }
1846         SDL_LockMutex(is->subpq_mutex);
1847         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1848                !is->subtitleq.abort_request) {
1849             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1850         }
1851         SDL_UnlockMutex(is->subpq_mutex);
1852
1853         if (is->subtitleq.abort_request)
1854             goto the_end;
1855
1856         sp = &is->subpq[is->subpq_windex];
1857
1858        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1859            this packet, if any */
1860         pts = 0;
1861         if (pkt->pts != AV_NOPTS_VALUE)
1862             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1863
1864         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1865                                     &sp->sub, &got_subtitle,
1866                                     pkt);
1867         if (got_subtitle && sp->sub.format == 0) {
1868             sp->pts = pts;
1869
1870             for (i = 0; i < sp->sub.num_rects; i++)
1871             {
1872                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1873                 {
1874                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1875                     y = RGB_TO_Y_CCIR(r, g, b);
1876                     u = RGB_TO_U_CCIR(r, g, b, 0);
1877                     v = RGB_TO_V_CCIR(r, g, b, 0);
1878                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1879                 }
1880             }
1881
1882             /* now we can update the picture count */
1883             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1884                 is->subpq_windex = 0;
1885             SDL_LockMutex(is->subpq_mutex);
1886             is->subpq_size++;
1887             SDL_UnlockMutex(is->subpq_mutex);
1888         }
1889         av_free_packet(pkt);
1890     }
1891  the_end:
1892     return 0;
1893 }
1894
1895 /* copy samples for viewing in editor window */
1896 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1897 {
1898     int size, len;
1899
1900     size = samples_size / sizeof(short);
1901     while (size > 0) {
1902         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1903         if (len > size)
1904             len = size;
1905         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1906         samples += len;
1907         is->sample_array_index += len;
1908         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1909             is->sample_array_index = 0;
1910         size -= len;
1911     }
1912 }
1913
1914 /* return the new audio buffer size (samples can be added or deleted
1915    to get better sync if video or external master clock) */
1916 static int synchronize_audio(VideoState *is, short *samples,
1917                              int samples_size1, double pts)
1918 {
1919     int n, samples_size;
1920     double ref_clock;
1921
1922     n = 2 * is->audio_st->codec->channels;
1923     samples_size = samples_size1;
1924
1925     /* if not master, then we try to remove or add samples to correct the clock */
1926     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1927          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1928         double diff, avg_diff;
1929         int wanted_size, min_size, max_size, nb_samples;
1930
1931         ref_clock = get_master_clock(is);
1932         diff = get_audio_clock(is) - ref_clock;
1933
1934         if (diff < AV_NOSYNC_THRESHOLD) {
1935             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1936             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1937                 /* not enough measures to have a correct estimate */
1938                 is->audio_diff_avg_count++;
1939             } else {
1940                 /* estimate the A-V difference */
1941                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1942
1943                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1944                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1945                     nb_samples = samples_size / n;
1946
1947                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1948                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1949                     if (wanted_size < min_size)
1950                         wanted_size = min_size;
1951                     else if (wanted_size > max_size)
1952                         wanted_size = max_size;
1953
1954                     /* add or remove samples to correction the synchro */
1955                     if (wanted_size < samples_size) {
1956                         /* remove samples */
1957                         samples_size = wanted_size;
1958                     } else if (wanted_size > samples_size) {
1959                         uint8_t *samples_end, *q;
1960                         int nb;
1961
1962                         /* add samples */
1963                         nb = (samples_size - wanted_size);
1964                         samples_end = (uint8_t *)samples + samples_size - n;
1965                         q = samples_end + n;
1966                         while (nb > 0) {
1967                             memcpy(q, samples_end, n);
1968                             q += n;
1969                             nb -= n;
1970                         }
1971                         samples_size = wanted_size;
1972                     }
1973                 }
1974                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1975                         diff, avg_diff, samples_size - samples_size1,
1976                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1977             }
1978         } else {
1979             /* too big difference : may be initial PTS errors, so
1980                reset A-V filter */
1981             is->audio_diff_avg_count = 0;
1982             is->audio_diff_cum = 0;
1983         }
1984     }
1985
1986     return samples_size;
1987 }
1988
1989 /* decode one audio frame and returns its uncompressed size */
1990 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1991 {
1992     AVPacket *pkt_temp = &is->audio_pkt_temp;
1993     AVPacket *pkt = &is->audio_pkt;
1994     AVCodecContext *dec= is->audio_st->codec;
1995     int n, len1, data_size;
1996     double pts;
1997
1998     for(;;) {
1999         /* NOTE: the audio packet can contain several frames */
2000         while (pkt_temp->size > 0) {
2001             data_size = sizeof(is->audio_buf1);
2002             len1 = avcodec_decode_audio3(dec,
2003                                         (int16_t *)is->audio_buf1, &data_size,
2004                                         pkt_temp);
2005             if (len1 < 0) {
2006                 /* if error, we skip the frame */
2007                 pkt_temp->size = 0;
2008                 break;
2009             }
2010
2011             pkt_temp->data += len1;
2012             pkt_temp->size -= len1;
2013             if (data_size <= 0)
2014                 continue;
2015
2016             if (dec->sample_fmt != is->audio_src_fmt) {
2017                 if (is->reformat_ctx)
2018                     av_audio_convert_free(is->reformat_ctx);
2019                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2020                                                          dec->sample_fmt, 1, NULL, 0);
2021                 if (!is->reformat_ctx) {
2022                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2023                         av_get_sample_fmt_name(dec->sample_fmt),
2024                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2025                         break;
2026                 }
2027                 is->audio_src_fmt= dec->sample_fmt;
2028             }
2029
2030             if (is->reformat_ctx) {
2031                 const void *ibuf[6]= {is->audio_buf1};
2032                 void *obuf[6]= {is->audio_buf2};
2033                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2034                 int ostride[6]= {2};
2035                 int len= data_size/istride[0];
2036                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2037                     printf("av_audio_convert() failed\n");
2038                     break;
2039                 }
2040                 is->audio_buf= is->audio_buf2;
2041                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2042                           remove this legacy cruft */
2043                 data_size= len*2;
2044             }else{
2045                 is->audio_buf= is->audio_buf1;
2046             }
2047
2048             /* if no pts, then compute it */
2049             pts = is->audio_clock;
2050             *pts_ptr = pts;
2051             n = 2 * dec->channels;
2052             is->audio_clock += (double)data_size /
2053                 (double)(n * dec->sample_rate);
2054 #ifdef DEBUG
2055             {
2056                 static double last_clock;
2057                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2058                        is->audio_clock - last_clock,
2059                        is->audio_clock, pts);
2060                 last_clock = is->audio_clock;
2061             }
2062 #endif
2063             return data_size;
2064         }
2065
2066         /* free the current packet */
2067         if (pkt->data)
2068             av_free_packet(pkt);
2069
2070         if (is->paused || is->audioq.abort_request) {
2071             return -1;
2072         }
2073
2074         /* read next packet */
2075         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2076             return -1;
2077         if(pkt->data == flush_pkt.data){
2078             avcodec_flush_buffers(dec);
2079             continue;
2080         }
2081
2082         pkt_temp->data = pkt->data;
2083         pkt_temp->size = pkt->size;
2084
2085         /* if update the audio clock with the pts */
2086         if (pkt->pts != AV_NOPTS_VALUE) {
2087             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2088         }
2089     }
2090 }
2091
2092 /* prepare a new audio buffer */
2093 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2094 {
2095     VideoState *is = opaque;
2096     int audio_size, len1;
2097     double pts;
2098
2099     audio_callback_time = av_gettime();
2100
2101     while (len > 0) {
2102         if (is->audio_buf_index >= is->audio_buf_size) {
2103            audio_size = audio_decode_frame(is, &pts);
2104            if (audio_size < 0) {
2105                 /* if error, just output silence */
2106                is->audio_buf = is->audio_buf1;
2107                is->audio_buf_size = 1024;
2108                memset(is->audio_buf, 0, is->audio_buf_size);
2109            } else {
2110                if (is->show_audio)
2111                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2112                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2113                                               pts);
2114                is->audio_buf_size = audio_size;
2115            }
2116            is->audio_buf_index = 0;
2117         }
2118         len1 = is->audio_buf_size - is->audio_buf_index;
2119         if (len1 > len)
2120             len1 = len;
2121         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2122         len -= len1;
2123         stream += len1;
2124         is->audio_buf_index += len1;
2125     }
2126 }
2127
2128 /* open a given stream. Return 0 if OK */
2129 static int stream_component_open(VideoState *is, int stream_index)
2130 {
2131     AVFormatContext *ic = is->ic;
2132     AVCodecContext *avctx;
2133     AVCodec *codec;
2134     SDL_AudioSpec wanted_spec, spec;
2135
2136     if (stream_index < 0 || stream_index >= ic->nb_streams)
2137         return -1;
2138     avctx = ic->streams[stream_index]->codec;
2139
2140     /* prepare audio output */
2141     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2142         if (avctx->channels > 0) {
2143             avctx->request_channels = FFMIN(2, avctx->channels);
2144         } else {
2145             avctx->request_channels = 2;
2146         }
2147     }
2148
2149     codec = avcodec_find_decoder(avctx->codec_id);
2150     avctx->debug_mv = debug_mv;
2151     avctx->debug = debug;
2152     avctx->workaround_bugs = workaround_bugs;
2153     avctx->lowres = lowres;
2154     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2155     avctx->idct_algo= idct;
2156     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2157     avctx->skip_frame= skip_frame;
2158     avctx->skip_idct= skip_idct;
2159     avctx->skip_loop_filter= skip_loop_filter;
2160     avctx->error_recognition= error_recognition;
2161     avctx->error_concealment= error_concealment;
2162     avctx->thread_count= thread_count;
2163
2164     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2165
2166     if (!codec ||
2167         avcodec_open(avctx, codec) < 0)
2168         return -1;
2169
2170     /* prepare audio output */
2171     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2172         wanted_spec.freq = avctx->sample_rate;
2173         wanted_spec.format = AUDIO_S16SYS;
2174         wanted_spec.channels = avctx->channels;
2175         wanted_spec.silence = 0;
2176         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2177         wanted_spec.callback = sdl_audio_callback;
2178         wanted_spec.userdata = is;
2179         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2180             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2181             return -1;
2182         }
2183         is->audio_hw_buf_size = spec.size;
2184         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2185     }
2186
2187     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2188     switch(avctx->codec_type) {
2189     case AVMEDIA_TYPE_AUDIO:
2190         is->audio_stream = stream_index;
2191         is->audio_st = ic->streams[stream_index];
2192         is->audio_buf_size = 0;
2193         is->audio_buf_index = 0;
2194
2195         /* init averaging filter */
2196         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2197         is->audio_diff_avg_count = 0;
2198         /* since we do not have a precise anough audio fifo fullness,
2199            we correct audio sync only if larger than this threshold */
2200         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2201
2202         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2203         packet_queue_init(&is->audioq);
2204         SDL_PauseAudio(0);
2205         break;
2206     case AVMEDIA_TYPE_VIDEO:
2207         is->video_stream = stream_index;
2208         is->video_st = ic->streams[stream_index];
2209
2210         packet_queue_init(&is->videoq);
2211         is->video_tid = SDL_CreateThread(video_thread, is);
2212         break;
2213     case AVMEDIA_TYPE_SUBTITLE:
2214         is->subtitle_stream = stream_index;
2215         is->subtitle_st = ic->streams[stream_index];
2216         packet_queue_init(&is->subtitleq);
2217
2218         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2219         break;
2220     default:
2221         break;
2222     }
2223     return 0;
2224 }
2225
2226 static void stream_component_close(VideoState *is, int stream_index)
2227 {
2228     AVFormatContext *ic = is->ic;
2229     AVCodecContext *avctx;
2230
2231     if (stream_index < 0 || stream_index >= ic->nb_streams)
2232         return;
2233     avctx = ic->streams[stream_index]->codec;
2234
2235     switch(avctx->codec_type) {
2236     case AVMEDIA_TYPE_AUDIO:
2237         packet_queue_abort(&is->audioq);
2238
2239         SDL_CloseAudio();
2240
2241         packet_queue_end(&is->audioq);
2242         if (is->reformat_ctx)
2243             av_audio_convert_free(is->reformat_ctx);
2244         is->reformat_ctx = NULL;
2245         break;
2246     case AVMEDIA_TYPE_VIDEO:
2247         packet_queue_abort(&is->videoq);
2248
2249         /* note: we also signal this mutex to make sure we deblock the
2250            video thread in all cases */
2251         SDL_LockMutex(is->pictq_mutex);
2252         SDL_CondSignal(is->pictq_cond);
2253         SDL_UnlockMutex(is->pictq_mutex);
2254
2255         SDL_WaitThread(is->video_tid, NULL);
2256
2257         packet_queue_end(&is->videoq);
2258         break;
2259     case AVMEDIA_TYPE_SUBTITLE:
2260         packet_queue_abort(&is->subtitleq);
2261
2262         /* note: we also signal this mutex to make sure we deblock the
2263            video thread in all cases */
2264         SDL_LockMutex(is->subpq_mutex);
2265         is->subtitle_stream_changed = 1;
2266
2267         SDL_CondSignal(is->subpq_cond);
2268         SDL_UnlockMutex(is->subpq_mutex);
2269
2270         SDL_WaitThread(is->subtitle_tid, NULL);
2271
2272         packet_queue_end(&is->subtitleq);
2273         break;
2274     default:
2275         break;
2276     }
2277
2278     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2279     avcodec_close(avctx);
2280     switch(avctx->codec_type) {
2281     case AVMEDIA_TYPE_AUDIO:
2282         is->audio_st = NULL;
2283         is->audio_stream = -1;
2284         break;
2285     case AVMEDIA_TYPE_VIDEO:
2286         is->video_st = NULL;
2287         is->video_stream = -1;
2288         break;
2289     case AVMEDIA_TYPE_SUBTITLE:
2290         is->subtitle_st = NULL;
2291         is->subtitle_stream = -1;
2292         break;
2293     default:
2294         break;
2295     }
2296 }
2297
2298 /* since we have only one decoding thread, we can use a global
2299    variable instead of a thread local variable */
2300 static VideoState *global_video_state;
2301
2302 static int decode_interrupt_cb(void)
2303 {
2304     return (global_video_state && global_video_state->abort_request);
2305 }
2306
2307 /* this thread gets the stream from the disk or the network */
2308 static int decode_thread(void *arg)
2309 {
2310     VideoState *is = arg;
2311     AVFormatContext *ic = NULL;
2312     int err, i, ret;
2313     int st_index[AVMEDIA_TYPE_NB];
2314     AVPacket pkt1, *pkt = &pkt1;
2315     int eof=0;
2316     int pkt_in_play_range = 0;
2317     AVDictionaryEntry *t;
2318
2319     memset(st_index, -1, sizeof(st_index));
2320     is->video_stream = -1;
2321     is->audio_stream = -1;
2322     is->subtitle_stream = -1;
2323
2324     global_video_state = is;
2325     avio_set_interrupt_cb(decode_interrupt_cb);
2326
2327     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2328     if (err < 0) {
2329         print_error(is->filename, err);
2330         ret = -1;
2331         goto fail;
2332     }
2333     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2334         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2335         ret = AVERROR_OPTION_NOT_FOUND;
2336         goto fail;
2337     }
2338     is->ic = ic;
2339
2340     if(genpts)
2341         ic->flags |= AVFMT_FLAG_GENPTS;
2342
2343     /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2344     for (i = 0; i < ic->nb_streams; i++) {
2345         AVCodecContext *dec = ic->streams[i]->codec;
2346         switch (dec->codec_type) {
2347         case AVMEDIA_TYPE_AUDIO:
2348             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2349                              AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2350                              NULL);
2351             break;
2352         case AVMEDIA_TYPE_VIDEO:
2353             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2354                              AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2355                              NULL);
2356             break;
2357         }
2358     }
2359
2360     err = av_find_stream_info(ic);
2361     if (err < 0) {
2362         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2363         ret = -1;
2364         goto fail;
2365     }
2366     if(ic->pb)
2367         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2368
2369     if(seek_by_bytes<0)
2370         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2371
2372     /* if seeking requested, we execute it */
2373     if (start_time != AV_NOPTS_VALUE) {
2374         int64_t timestamp;
2375
2376         timestamp = start_time;
2377         /* add the stream start time */
2378         if (ic->start_time != AV_NOPTS_VALUE)
2379             timestamp += ic->start_time;
2380         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2381         if (ret < 0) {
2382             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2383                     is->filename, (double)timestamp / AV_TIME_BASE);
2384         }
2385     }
2386
2387     for (i = 0; i < ic->nb_streams; i++)
2388         ic->streams[i]->discard = AVDISCARD_ALL;
2389     if (!video_disable)
2390         st_index[AVMEDIA_TYPE_VIDEO] =
2391             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2392                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2393     if (!audio_disable)
2394         st_index[AVMEDIA_TYPE_AUDIO] =
2395             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2396                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2397                                 st_index[AVMEDIA_TYPE_VIDEO],
2398                                 NULL, 0);
2399     if (!video_disable)
2400         st_index[AVMEDIA_TYPE_SUBTITLE] =
2401             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2402                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2403                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2404                                  st_index[AVMEDIA_TYPE_AUDIO] :
2405                                  st_index[AVMEDIA_TYPE_VIDEO]),
2406                                 NULL, 0);
2407     if (show_status) {
2408         av_dump_format(ic, 0, is->filename, 0);
2409     }
2410
2411     /* open the streams */
2412     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2413         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2414     }
2415
2416     ret=-1;
2417     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2418         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2419     }
2420     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2421     if(ret<0) {
2422         if (!display_disable)
2423             is->show_audio = 2;
2424     }
2425
2426     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2427         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2428     }
2429
2430     if (is->video_stream < 0 && is->audio_stream < 0) {
2431         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2432         ret = -1;
2433         goto fail;
2434     }
2435
2436     for(;;) {
2437         if (is->abort_request)
2438             break;
2439         if (is->paused != is->last_paused) {
2440             is->last_paused = is->paused;
2441             if (is->paused)
2442                 is->read_pause_return= av_read_pause(ic);
2443             else
2444                 av_read_play(ic);
2445         }
2446 #if CONFIG_RTSP_DEMUXER
2447         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2448             /* wait 10 ms to avoid trying to get another packet */
2449             /* XXX: horrible */
2450             SDL_Delay(10);
2451             continue;
2452         }
2453 #endif
2454         if (is->seek_req) {
2455             int64_t seek_target= is->seek_pos;
2456             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2457             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2458 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2459 //      of the seek_pos/seek_rel variables
2460
2461             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2462             if (ret < 0) {
2463                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2464             }else{
2465                 if (is->audio_stream >= 0) {
2466                     packet_queue_flush(&is->audioq);
2467                     packet_queue_put(&is->audioq, &flush_pkt);
2468                 }
2469                 if (is->subtitle_stream >= 0) {
2470                     packet_queue_flush(&is->subtitleq);
2471                     packet_queue_put(&is->subtitleq, &flush_pkt);
2472                 }
2473                 if (is->video_stream >= 0) {
2474                     packet_queue_flush(&is->videoq);
2475                     packet_queue_put(&is->videoq, &flush_pkt);
2476                 }
2477             }
2478             is->seek_req = 0;
2479             eof= 0;
2480         }
2481
2482         /* if the queue are full, no need to read more */
2483         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2484             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2485                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2486                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2487             /* wait 10 ms */
2488             SDL_Delay(10);
2489             continue;
2490         }
2491         if(eof) {
2492             if(is->video_stream >= 0){
2493                 av_init_packet(pkt);
2494                 pkt->data=NULL;
2495                 pkt->size=0;
2496                 pkt->stream_index= is->video_stream;
2497                 packet_queue_put(&is->videoq, pkt);
2498             }
2499             SDL_Delay(10);
2500             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2501                 if(loop!=1 && (!loop || --loop)){
2502                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2503                 }else if(autoexit){
2504                     ret=AVERROR_EOF;
2505                     goto fail;
2506                 }
2507             }
2508             continue;
2509         }
2510         ret = av_read_frame(ic, pkt);
2511         if (ret < 0) {
2512             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2513                 eof=1;
2514             if (ic->pb && ic->pb->error)
2515                 break;
2516             SDL_Delay(100); /* wait for user event */
2517             continue;
2518         }
2519         /* check if packet is in play range specified by user, then queue, otherwise discard */
2520         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2521                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2522                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2523                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2524                 <= ((double)duration/1000000);
2525         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2526             packet_queue_put(&is->audioq, pkt);
2527         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2528             packet_queue_put(&is->videoq, pkt);
2529         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2530             packet_queue_put(&is->subtitleq, pkt);
2531         } else {
2532             av_free_packet(pkt);
2533         }
2534     }
2535     /* wait until the end */
2536     while (!is->abort_request) {
2537         SDL_Delay(100);
2538     }
2539
2540     ret = 0;
2541  fail:
2542     /* disable interrupting */
2543     global_video_state = NULL;
2544
2545     /* close each stream */
2546     if (is->audio_stream >= 0)
2547         stream_component_close(is, is->audio_stream);
2548     if (is->video_stream >= 0)
2549         stream_component_close(is, is->video_stream);
2550     if (is->subtitle_stream >= 0)
2551         stream_component_close(is, is->subtitle_stream);
2552     if (is->ic) {
2553         av_close_input_file(is->ic);
2554         is->ic = NULL; /* safety */
2555     }
2556     avio_set_interrupt_cb(NULL);
2557
2558     if (ret != 0) {
2559         SDL_Event event;
2560
2561         event.type = FF_QUIT_EVENT;
2562         event.user.data1 = is;
2563         SDL_PushEvent(&event);
2564     }
2565     return 0;
2566 }
2567
2568 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2569 {
2570     VideoState *is;
2571
2572     is = av_mallocz(sizeof(VideoState));
2573     if (!is)
2574         return NULL;
2575     av_strlcpy(is->filename, filename, sizeof(is->filename));
2576     is->iformat = iformat;
2577     is->ytop = 0;
2578     is->xleft = 0;
2579
2580     /* start video display */
2581     is->pictq_mutex = SDL_CreateMutex();
2582     is->pictq_cond = SDL_CreateCond();
2583
2584     is->subpq_mutex = SDL_CreateMutex();
2585     is->subpq_cond = SDL_CreateCond();
2586
2587     is->av_sync_type = av_sync_type;
2588     is->parse_tid = SDL_CreateThread(decode_thread, is);
2589     if (!is->parse_tid) {
2590         av_free(is);
2591         return NULL;
2592     }
2593     return is;
2594 }
2595
2596 static void stream_cycle_channel(VideoState *is, int codec_type)
2597 {
2598     AVFormatContext *ic = is->ic;
2599     int start_index, stream_index;
2600     AVStream *st;
2601
2602     if (codec_type == AVMEDIA_TYPE_VIDEO)
2603         start_index = is->video_stream;
2604     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2605         start_index = is->audio_stream;
2606     else
2607         start_index = is->subtitle_stream;
2608     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2609         return;
2610     stream_index = start_index;
2611     for(;;) {
2612         if (++stream_index >= is->ic->nb_streams)
2613         {
2614             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2615             {
2616                 stream_index = -1;
2617                 goto the_end;
2618             } else
2619                 stream_index = 0;
2620         }
2621         if (stream_index == start_index)
2622             return;
2623         st = ic->streams[stream_index];
2624         if (st->codec->codec_type == codec_type) {
2625             /* check that parameters are OK */
2626             switch(codec_type) {
2627             case AVMEDIA_TYPE_AUDIO:
2628                 if (st->codec->sample_rate != 0 &&
2629                     st->codec->channels != 0)
2630                     goto the_end;
2631                 break;
2632             case AVMEDIA_TYPE_VIDEO:
2633             case AVMEDIA_TYPE_SUBTITLE:
2634                 goto the_end;
2635             default:
2636                 break;
2637             }
2638         }
2639     }
2640  the_end:
2641     stream_component_close(is, start_index);
2642     stream_component_open(is, stream_index);
2643 }
2644
2645
2646 static void toggle_full_screen(void)
2647 {
2648     is_full_screen = !is_full_screen;
2649     video_open(cur_stream);
2650 }
2651
2652 static void toggle_pause(void)
2653 {
2654     if (cur_stream)
2655         stream_pause(cur_stream);
2656     step = 0;
2657 }
2658
2659 static void step_to_next_frame(void)
2660 {
2661     if (cur_stream) {
2662         /* if the stream is paused unpause it, then step */
2663         if (cur_stream->paused)
2664             stream_pause(cur_stream);
2665     }
2666     step = 1;
2667 }
2668
2669 static void toggle_audio_display(void)
2670 {
2671     if (cur_stream) {
2672         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2673         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2674         fill_rectangle(screen,
2675                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2676                     bgcolor);
2677         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2678     }
2679 }
2680
2681 /* handle an event sent by the GUI */
2682 static void event_loop(void)
2683 {
2684     SDL_Event event;
2685     double incr, pos, frac;
2686
2687     for(;;) {
2688         double x;
2689         SDL_WaitEvent(&event);
2690         switch(event.type) {
2691         case SDL_KEYDOWN:
2692             if (exit_on_keydown) {
2693                 do_exit();
2694                 break;
2695             }
2696             switch(event.key.keysym.sym) {
2697             case SDLK_ESCAPE:
2698             case SDLK_q:
2699                 do_exit();
2700                 break;
2701             case SDLK_f:
2702                 toggle_full_screen();
2703                 break;
2704             case SDLK_p:
2705             case SDLK_SPACE:
2706                 toggle_pause();
2707                 break;
2708             case SDLK_s: //S: Step to next frame
2709                 step_to_next_frame();
2710                 break;
2711             case SDLK_a:
2712                 if (cur_stream)
2713                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2714                 break;
2715             case SDLK_v:
2716                 if (cur_stream)
2717                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2718                 break;
2719             case SDLK_t:
2720                 if (cur_stream)
2721                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2722                 break;
2723             case SDLK_w:
2724                 toggle_audio_display();
2725                 break;
2726             case SDLK_LEFT:
2727                 incr = -10.0;
2728                 goto do_seek;
2729             case SDLK_RIGHT:
2730                 incr = 10.0;
2731                 goto do_seek;
2732             case SDLK_UP:
2733                 incr = 60.0;
2734                 goto do_seek;
2735             case SDLK_DOWN:
2736                 incr = -60.0;
2737             do_seek:
2738                 if (cur_stream) {
2739                     if (seek_by_bytes) {
2740                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2741                             pos= cur_stream->video_current_pos;
2742                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2743                             pos= cur_stream->audio_pkt.pos;
2744                         }else
2745                             pos = avio_tell(cur_stream->ic->pb);
2746                         if (cur_stream->ic->bit_rate)
2747                             incr *= cur_stream->ic->bit_rate / 8.0;
2748                         else
2749                             incr *= 180000.0;
2750                         pos += incr;
2751                         stream_seek(cur_stream, pos, incr, 1);
2752                     } else {
2753                         pos = get_master_clock(cur_stream);
2754                         pos += incr;
2755                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2756                     }
2757                 }
2758                 break;
2759             default:
2760                 break;
2761             }
2762             break;
2763         case SDL_MOUSEBUTTONDOWN:
2764             if (exit_on_mousedown) {
2765                 do_exit();
2766                 break;
2767             }
2768         case SDL_MOUSEMOTION:
2769             if(event.type ==SDL_MOUSEBUTTONDOWN){
2770                 x= event.button.x;
2771             }else{
2772                 if(event.motion.state != SDL_PRESSED)
2773                     break;
2774                 x= event.motion.x;
2775             }
2776             if (cur_stream) {
2777                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2778                     uint64_t size=  avio_size(cur_stream->ic->pb);
2779                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2780                 }else{
2781                     int64_t ts;
2782                     int ns, hh, mm, ss;
2783                     int tns, thh, tmm, tss;
2784                     tns = cur_stream->ic->duration/1000000LL;
2785                     thh = tns/3600;
2786                     tmm = (tns%3600)/60;
2787                     tss = (tns%60);
2788                     frac = x/cur_stream->width;
2789                     ns = frac*tns;
2790                     hh = ns/3600;
2791                     mm = (ns%3600)/60;
2792                     ss = (ns%60);
2793                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2794                             hh, mm, ss, thh, tmm, tss);
2795                     ts = frac*cur_stream->ic->duration;
2796                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2797                         ts += cur_stream->ic->start_time;
2798                     stream_seek(cur_stream, ts, 0, 0);
2799                 }
2800             }
2801             break;
2802         case SDL_VIDEORESIZE:
2803             if (cur_stream) {
2804                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2805                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2806                 screen_width = cur_stream->width = event.resize.w;
2807                 screen_height= cur_stream->height= event.resize.h;
2808             }
2809             break;
2810         case SDL_QUIT:
2811         case FF_QUIT_EVENT:
2812             do_exit();
2813             break;
2814         case FF_ALLOC_EVENT:
2815             video_open(event.user.data1);
2816             alloc_picture(event.user.data1);
2817             break;
2818         case FF_REFRESH_EVENT:
2819             video_refresh_timer(event.user.data1);
2820             cur_stream->refresh=0;
2821             break;
2822         default:
2823             break;
2824         }
2825     }
2826 }
2827
2828 static int opt_frame_size(const char *opt, const char *arg)
2829 {
2830     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2831         fprintf(stderr, "Incorrect frame size\n");
2832         return AVERROR(EINVAL);
2833     }
2834     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2835         fprintf(stderr, "Frame size must be a multiple of 2\n");
2836         return AVERROR(EINVAL);
2837     }
2838     return 0;
2839 }
2840
2841 static int opt_width(const char *opt, const char *arg)
2842 {
2843     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2844     return 0;
2845 }
2846
2847 static int opt_height(const char *opt, const char *arg)
2848 {
2849     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2850     return 0;
2851 }
2852
2853 static int opt_format(const char *opt, const char *arg)
2854 {
2855     file_iformat = av_find_input_format(arg);
2856     if (!file_iformat) {
2857         fprintf(stderr, "Unknown input format: %s\n", arg);
2858         return AVERROR(EINVAL);
2859     }
2860     return 0;
2861 }
2862
2863 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2864 {
2865     frame_pix_fmt = av_get_pix_fmt(arg);
2866     return 0;
2867 }
2868
2869 static int opt_sync(const char *opt, const char *arg)
2870 {
2871     if (!strcmp(arg, "audio"))
2872         av_sync_type = AV_SYNC_AUDIO_MASTER;
2873     else if (!strcmp(arg, "video"))
2874         av_sync_type = AV_SYNC_VIDEO_MASTER;
2875     else if (!strcmp(arg, "ext"))
2876         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2877     else {
2878         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2879         exit(1);
2880     }
2881     return 0;
2882 }
2883
2884 static int opt_seek(const char *opt, const char *arg)
2885 {
2886     start_time = parse_time_or_die(opt, arg, 1);
2887     return 0;
2888 }
2889
2890 static int opt_duration(const char *opt, const char *arg)
2891 {
2892     duration = parse_time_or_die(opt, arg, 1);
2893     return 0;
2894 }
2895
2896 static int opt_debug(const char *opt, const char *arg)
2897 {
2898     av_log_set_level(99);
2899     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2900     return 0;
2901 }
2902
2903 static int opt_vismv(const char *opt, const char *arg)
2904 {
2905     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2906     return 0;
2907 }
2908
2909 static int opt_thread_count(const char *opt, const char *arg)
2910 {
2911     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2912 #if !HAVE_THREADS
2913     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2914 #endif
2915     return 0;
2916 }
2917
2918 static const OptionDef options[] = {
2919 #include "cmdutils_common_opts.h"
2920     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2921     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2922     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2923     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2924     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2925     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2926     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2927     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2928     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2929     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2930     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2931     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2932     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2933     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2934     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2935     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2936     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2937     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2938     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2939     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2940     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2941     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2942     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2943     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2944     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2945     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2946     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2947     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2948     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2949     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2950     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2951     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2952     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2953     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2954     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2955     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2956     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2957 #if CONFIG_AVFILTER
2958     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2959 #endif
2960     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2961     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2962     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2963     { NULL, },
2964 };
2965
2966 static void show_usage(void)
2967 {
2968     printf("Simple media player\n");
2969     printf("usage: ffplay [options] input_file\n");
2970     printf("\n");
2971 }
2972
2973 static void show_help(void)
2974 {
2975     av_log_set_callback(log_callback_help);
2976     show_usage();
2977     show_help_options(options, "Main options:\n",
2978                       OPT_EXPERT, 0);
2979     show_help_options(options, "\nAdvanced options:\n",
2980                       OPT_EXPERT, OPT_EXPERT);
2981     printf("\n");
2982     av_opt_show2(avcodec_opts[0], NULL,
2983                  AV_OPT_FLAG_DECODING_PARAM, 0);
2984     printf("\n");
2985     av_opt_show2(avformat_opts, NULL,
2986                  AV_OPT_FLAG_DECODING_PARAM, 0);
2987 #if !CONFIG_AVFILTER
2988     printf("\n");
2989     av_opt_show2(sws_opts, NULL,
2990                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2991 #endif
2992     printf("\nWhile playing:\n"
2993            "q, ESC              quit\n"
2994            "f                   toggle full screen\n"
2995            "p, SPC              pause\n"
2996            "a                   cycle audio channel\n"
2997            "v                   cycle video channel\n"
2998            "t                   cycle subtitle channel\n"
2999            "w                   show audio waves\n"
3000            "s                   activate frame-step mode\n"
3001            "left/right          seek backward/forward 10 seconds\n"
3002            "down/up             seek backward/forward 1 minute\n"
3003            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3004            );
3005 }
3006
3007 static void opt_input_file(const char *filename)
3008 {
3009     if (input_filename) {
3010         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3011                 filename, input_filename);
3012         exit(1);
3013     }
3014     if (!strcmp(filename, "-"))
3015         filename = "pipe:";
3016     input_filename = filename;
3017 }
3018
3019 /* Called from the main */
3020 int main(int argc, char **argv)
3021 {
3022     int flags;
3023
3024     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3025
3026     /* register all codecs, demux and protocols */
3027     avcodec_register_all();
3028 #if CONFIG_AVDEVICE
3029     avdevice_register_all();
3030 #endif
3031 #if CONFIG_AVFILTER
3032     avfilter_register_all();
3033 #endif
3034     av_register_all();
3035
3036     init_opts();
3037
3038     show_banner();
3039
3040     parse_options(argc, argv, options, opt_input_file);
3041
3042     if (!input_filename) {
3043         show_usage();
3044         fprintf(stderr, "An input file must be specified\n");
3045         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3046         exit(1);
3047     }
3048
3049     if (display_disable) {
3050         video_disable = 1;
3051     }
3052     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3053 #if !defined(__MINGW32__) && !defined(__APPLE__)
3054     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3055 #endif
3056     if (SDL_Init (flags)) {
3057         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3058         exit(1);
3059     }
3060
3061     if (!display_disable) {
3062 #if HAVE_SDL_VIDEO_SIZE
3063         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3064         fs_screen_width = vi->current_w;
3065         fs_screen_height = vi->current_h;
3066 #endif
3067     }
3068
3069     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3070     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3071     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3072
3073     av_init_packet(&flush_pkt);
3074     flush_pkt.data= "FLUSH";
3075
3076     cur_stream = stream_open(input_filename, file_iformat);
3077
3078     event_loop();
3079
3080     /* never returns */
3081
3082     return 0;
3083 }