]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static int opt_help(const char *opt, const char *arg);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int audio_disable;
227 static int video_disable;
228 static int wanted_stream[AVMEDIA_TYPE_NB]={
229     [AVMEDIA_TYPE_AUDIO]=-1,
230     [AVMEDIA_TYPE_VIDEO]=-1,
231     [AVMEDIA_TYPE_SUBTITLE]=-1,
232 };
233 static int seek_by_bytes=-1;
234 static int display_disable;
235 static int show_status = 1;
236 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
237 static int64_t start_time = AV_NOPTS_VALUE;
238 static int64_t duration = AV_NOPTS_VALUE;
239 static int step = 0;
240 static int thread_count = 1;
241 static int workaround_bugs = 1;
242 static int fast = 0;
243 static int genpts = 0;
244 static int lowres = 0;
245 static int idct = FF_IDCT_AUTO;
246 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
247 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
248 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
249 static int error_recognition = FF_ER_CAREFUL;
250 static int error_concealment = 3;
251 static int decoder_reorder_pts= -1;
252 static int autoexit;
253 static int exit_on_keydown;
254 static int exit_on_mousedown;
255 static int loop=1;
256 static int framedrop=-1;
257 static enum ShowMode show_mode = SHOW_MODE_NONE;
258
259 static int rdftspeed=20;
260 #if CONFIG_AVFILTER
261 static char *vfilters = NULL;
262 #endif
263
264 /* current context */
265 static int is_full_screen;
266 static VideoState *cur_stream;
267 static int64_t audio_callback_time;
268
269 static AVPacket flush_pkt;
270
271 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
272 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
273 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
274
275 static SDL_Surface *screen;
276
277 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
278 {
279     AVPacketList *pkt1;
280
281     /* duplicate the packet */
282     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
283         return -1;
284
285     pkt1 = av_malloc(sizeof(AVPacketList));
286     if (!pkt1)
287         return -1;
288     pkt1->pkt = *pkt;
289     pkt1->next = NULL;
290
291
292     SDL_LockMutex(q->mutex);
293
294     if (!q->last_pkt)
295
296         q->first_pkt = pkt1;
297     else
298         q->last_pkt->next = pkt1;
299     q->last_pkt = pkt1;
300     q->nb_packets++;
301     q->size += pkt1->pkt.size + sizeof(*pkt1);
302     /* XXX: should duplicate packet data in DV case */
303     SDL_CondSignal(q->cond);
304
305     SDL_UnlockMutex(q->mutex);
306     return 0;
307 }
308
309 /* packet queue handling */
310 static void packet_queue_init(PacketQueue *q)
311 {
312     memset(q, 0, sizeof(PacketQueue));
313     q->mutex = SDL_CreateMutex();
314     q->cond = SDL_CreateCond();
315     packet_queue_put(q, &flush_pkt);
316 }
317
318 static void packet_queue_flush(PacketQueue *q)
319 {
320     AVPacketList *pkt, *pkt1;
321
322     SDL_LockMutex(q->mutex);
323     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
324         pkt1 = pkt->next;
325         av_free_packet(&pkt->pkt);
326         av_freep(&pkt);
327     }
328     q->last_pkt = NULL;
329     q->first_pkt = NULL;
330     q->nb_packets = 0;
331     q->size = 0;
332     SDL_UnlockMutex(q->mutex);
333 }
334
335 static void packet_queue_end(PacketQueue *q)
336 {
337     packet_queue_flush(q);
338     SDL_DestroyMutex(q->mutex);
339     SDL_DestroyCond(q->cond);
340 }
341
342 static void packet_queue_abort(PacketQueue *q)
343 {
344     SDL_LockMutex(q->mutex);
345
346     q->abort_request = 1;
347
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351 }
352
353 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
354 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
355 {
356     AVPacketList *pkt1;
357     int ret;
358
359     SDL_LockMutex(q->mutex);
360
361     for(;;) {
362         if (q->abort_request) {
363             ret = -1;
364             break;
365         }
366
367         pkt1 = q->first_pkt;
368         if (pkt1) {
369             q->first_pkt = pkt1->next;
370             if (!q->first_pkt)
371                 q->last_pkt = NULL;
372             q->nb_packets--;
373             q->size -= pkt1->pkt.size + sizeof(*pkt1);
374             *pkt = pkt1->pkt;
375             av_free(pkt1);
376             ret = 1;
377             break;
378         } else if (!block) {
379             ret = 0;
380             break;
381         } else {
382             SDL_CondWait(q->cond, q->mutex);
383         }
384     }
385     SDL_UnlockMutex(q->mutex);
386     return ret;
387 }
388
389 static inline void fill_rectangle(SDL_Surface *screen,
390                                   int x, int y, int w, int h, int color)
391 {
392     SDL_Rect rect;
393     rect.x = x;
394     rect.y = y;
395     rect.w = w;
396     rect.h = h;
397     SDL_FillRect(screen, &rect, color);
398 }
399
400 #define ALPHA_BLEND(a, oldp, newp, s)\
401 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
402
403 #define RGBA_IN(r, g, b, a, s)\
404 {\
405     unsigned int v = ((const uint32_t *)(s))[0];\
406     a = (v >> 24) & 0xff;\
407     r = (v >> 16) & 0xff;\
408     g = (v >> 8) & 0xff;\
409     b = v & 0xff;\
410 }
411
412 #define YUVA_IN(y, u, v, a, s, pal)\
413 {\
414     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415     a = (val >> 24) & 0xff;\
416     y = (val >> 16) & 0xff;\
417     u = (val >> 8) & 0xff;\
418     v = val & 0xff;\
419 }
420
421 #define YUVA_OUT(d, y, u, v, a)\
422 {\
423     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
424 }
425
426
427 #define BPP 1
428
429 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
430 {
431     int wrap, wrap3, width2, skip2;
432     int y, u, v, a, u1, v1, a1, w, h;
433     uint8_t *lum, *cb, *cr;
434     const uint8_t *p;
435     const uint32_t *pal;
436     int dstx, dsty, dstw, dsth;
437
438     dstw = av_clip(rect->w, 0, imgw);
439     dsth = av_clip(rect->h, 0, imgh);
440     dstx = av_clip(rect->x, 0, imgw - dstw);
441     dsty = av_clip(rect->y, 0, imgh - dsth);
442     lum = dst->data[0] + dsty * dst->linesize[0];
443     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
445
446     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
447     skip2 = dstx >> 1;
448     wrap = dst->linesize[0];
449     wrap3 = rect->pict.linesize[0];
450     p = rect->pict.data[0];
451     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
452
453     if (dsty & 1) {
454         lum += dstx;
455         cb += skip2;
456         cr += skip2;
457
458         if (dstx & 1) {
459             YUVA_IN(y, u, v, a, p, pal);
460             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463             cb++;
464             cr++;
465             lum++;
466             p += BPP;
467         }
468         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469             YUVA_IN(y, u, v, a, p, pal);
470             u1 = u;
471             v1 = v;
472             a1 = a;
473             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474
475             YUVA_IN(y, u, v, a, p + BPP, pal);
476             u1 += u;
477             v1 += v;
478             a1 += a;
479             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482             cb++;
483             cr++;
484             p += 2 * BPP;
485             lum += 2;
486         }
487         if (w) {
488             YUVA_IN(y, u, v, a, p, pal);
489             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
492             p++;
493             lum++;
494         }
495         p += wrap3 - dstw * BPP;
496         lum += wrap - dstw - dstx;
497         cb += dst->linesize[1] - width2 - skip2;
498         cr += dst->linesize[2] - width2 - skip2;
499     }
500     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             u1 = u;
508             v1 = v;
509             a1 = a;
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             p += wrap3;
512             lum += wrap;
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 += u;
515             v1 += v;
516             a1 += a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
519             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
520             cb++;
521             cr++;
522             p += -wrap3 + BPP;
523             lum += -wrap + 1;
524         }
525         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
526             YUVA_IN(y, u, v, a, p, pal);
527             u1 = u;
528             v1 = v;
529             a1 = a;
530             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531
532             YUVA_IN(y, u, v, a, p + BPP, pal);
533             u1 += u;
534             v1 += v;
535             a1 += a;
536             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
537             p += wrap3;
538             lum += wrap;
539
540             YUVA_IN(y, u, v, a, p, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545
546             YUVA_IN(y, u, v, a, p + BPP, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551
552             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
553             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
554
555             cb++;
556             cr++;
557             p += -wrap3 + 2 * BPP;
558             lum += -wrap + 2;
559         }
560         if (w) {
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 = u;
563             v1 = v;
564             a1 = a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             p += wrap3;
567             lum += wrap;
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 += u;
570             v1 += v;
571             a1 += a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
574             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
575             cb++;
576             cr++;
577             p += -wrap3 + BPP;
578             lum += -wrap + 1;
579         }
580         p += wrap3 + (wrap3 - dstw * BPP);
581         lum += wrap + (wrap - dstw - dstx);
582         cb += dst->linesize[1] - width2 - skip2;
583         cr += dst->linesize[2] - width2 - skip2;
584     }
585     /* handle odd height */
586     if (h) {
587         lum += dstx;
588         cb += skip2;
589         cr += skip2;
590
591         if (dstx & 1) {
592             YUVA_IN(y, u, v, a, p, pal);
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
595             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
596             cb++;
597             cr++;
598             lum++;
599             p += BPP;
600         }
601         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
602             YUVA_IN(y, u, v, a, p, pal);
603             u1 = u;
604             v1 = v;
605             a1 = a;
606             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607
608             YUVA_IN(y, u, v, a, p + BPP, pal);
609             u1 += u;
610             v1 += v;
611             a1 += a;
612             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
614             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
615             cb++;
616             cr++;
617             p += 2 * BPP;
618             lum += 2;
619         }
620         if (w) {
621             YUVA_IN(y, u, v, a, p, pal);
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
624             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
625         }
626     }
627 }
628
629 static void free_subpicture(SubPicture *sp)
630 {
631     avsubtitle_free(&sp->sub);
632 }
633
634 static void video_image_display(VideoState *is)
635 {
636     VideoPicture *vp;
637     SubPicture *sp;
638     AVPicture pict;
639     float aspect_ratio;
640     int width, height, x, y;
641     SDL_Rect rect;
642     int i;
643
644     vp = &is->pictq[is->pictq_rindex];
645     if (vp->bmp) {
646 #if CONFIG_AVFILTER
647          if (vp->picref->video->sample_aspect_ratio.num == 0)
648              aspect_ratio = 0;
649          else
650              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
651 #else
652
653         /* XXX: use variable in the frame */
654         if (is->video_st->sample_aspect_ratio.num)
655             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
656         else if (is->video_st->codec->sample_aspect_ratio.num)
657             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
658         else
659             aspect_ratio = 0;
660 #endif
661         if (aspect_ratio <= 0.0)
662             aspect_ratio = 1.0;
663         aspect_ratio *= (float)vp->width / (float)vp->height;
664
665         if (is->subtitle_st) {
666             if (is->subpq_size > 0) {
667                 sp = &is->subpq[is->subpq_rindex];
668
669                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
670                     SDL_LockYUVOverlay (vp->bmp);
671
672                     pict.data[0] = vp->bmp->pixels[0];
673                     pict.data[1] = vp->bmp->pixels[2];
674                     pict.data[2] = vp->bmp->pixels[1];
675
676                     pict.linesize[0] = vp->bmp->pitches[0];
677                     pict.linesize[1] = vp->bmp->pitches[2];
678                     pict.linesize[2] = vp->bmp->pitches[1];
679
680                     for (i = 0; i < sp->sub.num_rects; i++)
681                         blend_subrect(&pict, sp->sub.rects[i],
682                                       vp->bmp->w, vp->bmp->h);
683
684                     SDL_UnlockYUVOverlay (vp->bmp);
685                 }
686             }
687         }
688
689
690         /* XXX: we suppose the screen has a 1.0 pixel ratio */
691         height = is->height;
692         width = ((int)rint(height * aspect_ratio)) & ~1;
693         if (width > is->width) {
694             width = is->width;
695             height = ((int)rint(width / aspect_ratio)) & ~1;
696         }
697         x = (is->width - width) / 2;
698         y = (is->height - height) / 2;
699         is->no_background = 0;
700         rect.x = is->xleft + x;
701         rect.y = is->ytop  + y;
702         rect.w = FFMAX(width,  1);
703         rect.h = FFMAX(height, 1);
704         SDL_DisplayYUVOverlay(vp->bmp, &rect);
705     }
706 }
707
708 /* get the current audio output buffer size, in samples. With SDL, we
709    cannot have a precise information */
710 static int audio_write_get_buf_size(VideoState *is)
711 {
712     return is->audio_buf_size - is->audio_buf_index;
713 }
714
715 static inline int compute_mod(int a, int b)
716 {
717     return a < 0 ? a%b + b : a%b;
718 }
719
720 static void video_audio_display(VideoState *s)
721 {
722     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
723     int ch, channels, h, h2, bgcolor, fgcolor;
724     int16_t time_diff;
725     int rdft_bits, nb_freq;
726
727     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
728         ;
729     nb_freq= 1<<(rdft_bits-1);
730
731     /* compute display index : center on currently output samples */
732     channels = s->audio_st->codec->channels;
733     nb_display_channels = channels;
734     if (!s->paused) {
735         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
736         n = 2 * channels;
737         delay = audio_write_get_buf_size(s);
738         delay /= n;
739
740         /* to be more precise, we take into account the time spent since
741            the last buffer computation */
742         if (audio_callback_time) {
743             time_diff = av_gettime() - audio_callback_time;
744             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
745         }
746
747         delay += 2*data_used;
748         if (delay < data_used)
749             delay = data_used;
750
751         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
752         if (s->show_mode == SHOW_MODE_WAVES) {
753             h= INT_MIN;
754             for(i=0; i<1000; i+=channels){
755                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
756                 int a= s->sample_array[idx];
757                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
758                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
759                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
760                 int score= a-d;
761                 if(h<score && (b^c)<0){
762                     h= score;
763                     i_start= idx;
764                 }
765             }
766         }
767
768         s->last_i_start = i_start;
769     } else {
770         i_start = s->last_i_start;
771     }
772
773     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
774     if (s->show_mode == SHOW_MODE_WAVES) {
775         fill_rectangle(screen,
776                        s->xleft, s->ytop, s->width, s->height,
777                        bgcolor);
778
779         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
780
781         /* total height for one channel */
782         h = s->height / nb_display_channels;
783         /* graph height / 2 */
784         h2 = (h * 9) / 20;
785         for(ch = 0;ch < nb_display_channels; ch++) {
786             i = i_start + ch;
787             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
788             for(x = 0; x < s->width; x++) {
789                 y = (s->sample_array[i] * h2) >> 15;
790                 if (y < 0) {
791                     y = -y;
792                     ys = y1 - y;
793                 } else {
794                     ys = y1;
795                 }
796                 fill_rectangle(screen,
797                                s->xleft + x, ys, 1, y,
798                                fgcolor);
799                 i += channels;
800                 if (i >= SAMPLE_ARRAY_SIZE)
801                     i -= SAMPLE_ARRAY_SIZE;
802             }
803         }
804
805         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
806
807         for(ch = 1;ch < nb_display_channels; ch++) {
808             y = s->ytop + ch * h;
809             fill_rectangle(screen,
810                            s->xleft, y, s->width, 1,
811                            fgcolor);
812         }
813         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
814     }else{
815         nb_display_channels= FFMIN(nb_display_channels, 2);
816         if(rdft_bits != s->rdft_bits){
817             av_rdft_end(s->rdft);
818             av_free(s->rdft_data);
819             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
820             s->rdft_bits= rdft_bits;
821             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
822         }
823         {
824             FFTSample *data[2];
825             for(ch = 0;ch < nb_display_channels; ch++) {
826                 data[ch] = s->rdft_data + 2*nb_freq*ch;
827                 i = i_start + ch;
828                 for(x = 0; x < 2*nb_freq; x++) {
829                     double w= (x-nb_freq)*(1.0/nb_freq);
830                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
831                     i += channels;
832                     if (i >= SAMPLE_ARRAY_SIZE)
833                         i -= SAMPLE_ARRAY_SIZE;
834                 }
835                 av_rdft_calc(s->rdft, data[ch]);
836             }
837             //least efficient way to do this, we should of course directly access it but its more than fast enough
838             for(y=0; y<s->height; y++){
839                 double w= 1/sqrt(nb_freq);
840                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
841                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
842                        + data[1][2*y+1]*data[1][2*y+1])) : a;
843                 a= FFMIN(a,255);
844                 b= FFMIN(b,255);
845                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
846
847                 fill_rectangle(screen,
848                             s->xpos, s->height-y, 1, 1,
849                             fgcolor);
850             }
851         }
852         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
853         s->xpos++;
854         if(s->xpos >= s->width)
855             s->xpos= s->xleft;
856     }
857 }
858
859 static void stream_close(VideoState *is)
860 {
861     VideoPicture *vp;
862     int i;
863     /* XXX: use a special url_shutdown call to abort parse cleanly */
864     is->abort_request = 1;
865     SDL_WaitThread(is->read_tid, NULL);
866     SDL_WaitThread(is->refresh_tid, NULL);
867
868     /* free all pictures */
869     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
870         vp = &is->pictq[i];
871 #if CONFIG_AVFILTER
872         if (vp->picref) {
873             avfilter_unref_buffer(vp->picref);
874             vp->picref = NULL;
875         }
876 #endif
877         if (vp->bmp) {
878             SDL_FreeYUVOverlay(vp->bmp);
879             vp->bmp = NULL;
880         }
881     }
882     SDL_DestroyMutex(is->pictq_mutex);
883     SDL_DestroyCond(is->pictq_cond);
884     SDL_DestroyMutex(is->subpq_mutex);
885     SDL_DestroyCond(is->subpq_cond);
886 #if !CONFIG_AVFILTER
887     if (is->img_convert_ctx)
888         sws_freeContext(is->img_convert_ctx);
889 #endif
890     av_free(is);
891 }
892
893 static void do_exit(void)
894 {
895     if (cur_stream) {
896         stream_close(cur_stream);
897         cur_stream = NULL;
898     }
899     uninit_opts();
900 #if CONFIG_AVFILTER
901     avfilter_uninit();
902 #endif
903     if (show_status)
904         printf("\n");
905     SDL_Quit();
906     av_log(NULL, AV_LOG_QUIET, "%s", "");
907     exit(0);
908 }
909
910 static int video_open(VideoState *is){
911     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
912     int w,h;
913
914     if(is_full_screen) flags |= SDL_FULLSCREEN;
915     else               flags |= SDL_RESIZABLE;
916
917     if (is_full_screen && fs_screen_width) {
918         w = fs_screen_width;
919         h = fs_screen_height;
920     } else if(!is_full_screen && screen_width){
921         w = screen_width;
922         h = screen_height;
923 #if CONFIG_AVFILTER
924     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
925         w = is->out_video_filter->inputs[0]->w;
926         h = is->out_video_filter->inputs[0]->h;
927 #else
928     }else if (is->video_st && is->video_st->codec->width){
929         w = is->video_st->codec->width;
930         h = is->video_st->codec->height;
931 #endif
932     } else {
933         w = 640;
934         h = 480;
935     }
936     if(screen && is->width == screen->w && screen->w == w
937        && is->height== screen->h && screen->h == h)
938         return 0;
939
940 #ifndef __APPLE__
941     screen = SDL_SetVideoMode(w, h, 0, flags);
942 #else
943     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
944     screen = SDL_SetVideoMode(w, h, 24, flags);
945 #endif
946     if (!screen) {
947         fprintf(stderr, "SDL: could not set video mode - exiting\n");
948         do_exit();
949     }
950     if (!window_title)
951         window_title = input_filename;
952     SDL_WM_SetCaption(window_title, window_title);
953
954     is->width = screen->w;
955     is->height = screen->h;
956
957     return 0;
958 }
959
960 /* display the current picture, if any */
961 static void video_display(VideoState *is)
962 {
963     if(!screen)
964         video_open(cur_stream);
965     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
966         video_audio_display(is);
967     else if (is->video_st)
968         video_image_display(is);
969 }
970
971 static int refresh_thread(void *opaque)
972 {
973     VideoState *is= opaque;
974     while(!is->abort_request){
975         SDL_Event event;
976         event.type = FF_REFRESH_EVENT;
977         event.user.data1 = opaque;
978         if(!is->refresh){
979             is->refresh=1;
980             SDL_PushEvent(&event);
981         }
982         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
983         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
984     }
985     return 0;
986 }
987
988 /* get the current audio clock value */
989 static double get_audio_clock(VideoState *is)
990 {
991     double pts;
992     int hw_buf_size, bytes_per_sec;
993     pts = is->audio_clock;
994     hw_buf_size = audio_write_get_buf_size(is);
995     bytes_per_sec = 0;
996     if (is->audio_st) {
997         bytes_per_sec = is->audio_st->codec->sample_rate *
998             2 * is->audio_st->codec->channels;
999     }
1000     if (bytes_per_sec)
1001         pts -= (double)hw_buf_size / bytes_per_sec;
1002     return pts;
1003 }
1004
1005 /* get the current video clock value */
1006 static double get_video_clock(VideoState *is)
1007 {
1008     if (is->paused) {
1009         return is->video_current_pts;
1010     } else {
1011         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1012     }
1013 }
1014
1015 /* get the current external clock value */
1016 static double get_external_clock(VideoState *is)
1017 {
1018     int64_t ti;
1019     ti = av_gettime();
1020     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1021 }
1022
1023 /* get the current master clock value */
1024 static double get_master_clock(VideoState *is)
1025 {
1026     double val;
1027
1028     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1029         if (is->video_st)
1030             val = get_video_clock(is);
1031         else
1032             val = get_audio_clock(is);
1033     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1034         if (is->audio_st)
1035             val = get_audio_clock(is);
1036         else
1037             val = get_video_clock(is);
1038     } else {
1039         val = get_external_clock(is);
1040     }
1041     return val;
1042 }
1043
1044 /* seek in the stream */
1045 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1046 {
1047     if (!is->seek_req) {
1048         is->seek_pos = pos;
1049         is->seek_rel = rel;
1050         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1051         if (seek_by_bytes)
1052             is->seek_flags |= AVSEEK_FLAG_BYTE;
1053         is->seek_req = 1;
1054     }
1055 }
1056
1057 /* pause or resume the video */
1058 static void stream_toggle_pause(VideoState *is)
1059 {
1060     if (is->paused) {
1061         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1062         if(is->read_pause_return != AVERROR(ENOSYS)){
1063             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1064         }
1065         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1066     }
1067     is->paused = !is->paused;
1068 }
1069
1070 static double compute_target_time(double frame_current_pts, VideoState *is)
1071 {
1072     double delay, sync_threshold, diff;
1073
1074     /* compute nominal delay */
1075     delay = frame_current_pts - is->frame_last_pts;
1076     if (delay <= 0 || delay >= 10.0) {
1077         /* if incorrect delay, use previous one */
1078         delay = is->frame_last_delay;
1079     } else {
1080         is->frame_last_delay = delay;
1081     }
1082     is->frame_last_pts = frame_current_pts;
1083
1084     /* update delay to follow master synchronisation source */
1085     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1086          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1087         /* if video is slave, we try to correct big delays by
1088            duplicating or deleting a frame */
1089         diff = get_video_clock(is) - get_master_clock(is);
1090
1091         /* skip or repeat frame. We take into account the
1092            delay to compute the threshold. I still don't know
1093            if it is the best guess */
1094         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1095         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1096             if (diff <= -sync_threshold)
1097                 delay = 0;
1098             else if (diff >= sync_threshold)
1099                 delay = 2 * delay;
1100         }
1101     }
1102     is->frame_timer += delay;
1103
1104     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1105             delay, frame_current_pts, -diff);
1106
1107     return is->frame_timer;
1108 }
1109
1110 /* called to display each frame */
1111 static void video_refresh(void *opaque)
1112 {
1113     VideoState *is = opaque;
1114     VideoPicture *vp;
1115
1116     SubPicture *sp, *sp2;
1117
1118     if (is->video_st) {
1119 retry:
1120         if (is->pictq_size == 0) {
1121             //nothing to do, no picture to display in the que
1122         } else {
1123             double time= av_gettime()/1000000.0;
1124             double next_target;
1125             /* dequeue the picture */
1126             vp = &is->pictq[is->pictq_rindex];
1127
1128             if(time < vp->target_clock)
1129                 return;
1130             /* update current video pts */
1131             is->video_current_pts = vp->pts;
1132             is->video_current_pts_drift = is->video_current_pts - time;
1133             is->video_current_pos = vp->pos;
1134             if(is->pictq_size > 1){
1135                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1136                 assert(nextvp->target_clock >= vp->target_clock);
1137                 next_target= nextvp->target_clock;
1138             }else{
1139                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1140             }
1141             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1142                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1143                 if(is->pictq_size > 1 || time > next_target + 0.5){
1144                     /* update queue size and signal for next picture */
1145                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1146                         is->pictq_rindex = 0;
1147
1148                     SDL_LockMutex(is->pictq_mutex);
1149                     is->pictq_size--;
1150                     SDL_CondSignal(is->pictq_cond);
1151                     SDL_UnlockMutex(is->pictq_mutex);
1152                     goto retry;
1153                 }
1154             }
1155
1156             if(is->subtitle_st) {
1157                 if (is->subtitle_stream_changed) {
1158                     SDL_LockMutex(is->subpq_mutex);
1159
1160                     while (is->subpq_size) {
1161                         free_subpicture(&is->subpq[is->subpq_rindex]);
1162
1163                         /* update queue size and signal for next picture */
1164                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1165                             is->subpq_rindex = 0;
1166
1167                         is->subpq_size--;
1168                     }
1169                     is->subtitle_stream_changed = 0;
1170
1171                     SDL_CondSignal(is->subpq_cond);
1172                     SDL_UnlockMutex(is->subpq_mutex);
1173                 } else {
1174                     if (is->subpq_size > 0) {
1175                         sp = &is->subpq[is->subpq_rindex];
1176
1177                         if (is->subpq_size > 1)
1178                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1179                         else
1180                             sp2 = NULL;
1181
1182                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1183                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1184                         {
1185                             free_subpicture(sp);
1186
1187                             /* update queue size and signal for next picture */
1188                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1189                                 is->subpq_rindex = 0;
1190
1191                             SDL_LockMutex(is->subpq_mutex);
1192                             is->subpq_size--;
1193                             SDL_CondSignal(is->subpq_cond);
1194                             SDL_UnlockMutex(is->subpq_mutex);
1195                         }
1196                     }
1197                 }
1198             }
1199
1200             /* display picture */
1201             if (!display_disable)
1202                 video_display(is);
1203
1204             /* update queue size and signal for next picture */
1205             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1206                 is->pictq_rindex = 0;
1207
1208             SDL_LockMutex(is->pictq_mutex);
1209             is->pictq_size--;
1210             SDL_CondSignal(is->pictq_cond);
1211             SDL_UnlockMutex(is->pictq_mutex);
1212         }
1213     } else if (is->audio_st) {
1214         /* draw the next audio frame */
1215
1216         /* if only audio stream, then display the audio bars (better
1217            than nothing, just to test the implementation */
1218
1219         /* display picture */
1220         if (!display_disable)
1221             video_display(is);
1222     }
1223     if (show_status) {
1224         static int64_t last_time;
1225         int64_t cur_time;
1226         int aqsize, vqsize, sqsize;
1227         double av_diff;
1228
1229         cur_time = av_gettime();
1230         if (!last_time || (cur_time - last_time) >= 30000) {
1231             aqsize = 0;
1232             vqsize = 0;
1233             sqsize = 0;
1234             if (is->audio_st)
1235                 aqsize = is->audioq.size;
1236             if (is->video_st)
1237                 vqsize = is->videoq.size;
1238             if (is->subtitle_st)
1239                 sqsize = is->subtitleq.size;
1240             av_diff = 0;
1241             if (is->audio_st && is->video_st)
1242                 av_diff = get_audio_clock(is) - get_video_clock(is);
1243             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1244                    get_master_clock(is),
1245                    av_diff,
1246                    FFMAX(is->skip_frames-1, 0),
1247                    aqsize / 1024,
1248                    vqsize / 1024,
1249                    sqsize,
1250                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1251                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1252             fflush(stdout);
1253             last_time = cur_time;
1254         }
1255     }
1256 }
1257
1258 /* allocate a picture (needs to do that in main thread to avoid
1259    potential locking problems */
1260 static void alloc_picture(void *opaque)
1261 {
1262     VideoState *is = opaque;
1263     VideoPicture *vp;
1264
1265     vp = &is->pictq[is->pictq_windex];
1266
1267     if (vp->bmp)
1268         SDL_FreeYUVOverlay(vp->bmp);
1269
1270 #if CONFIG_AVFILTER
1271     if (vp->picref)
1272         avfilter_unref_buffer(vp->picref);
1273     vp->picref = NULL;
1274
1275     vp->width   = is->out_video_filter->inputs[0]->w;
1276     vp->height  = is->out_video_filter->inputs[0]->h;
1277     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1278 #else
1279     vp->width   = is->video_st->codec->width;
1280     vp->height  = is->video_st->codec->height;
1281     vp->pix_fmt = is->video_st->codec->pix_fmt;
1282 #endif
1283
1284     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1285                                    SDL_YV12_OVERLAY,
1286                                    screen);
1287     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1288         /* SDL allocates a buffer smaller than requested if the video
1289          * overlay hardware is unable to support the requested size. */
1290         fprintf(stderr, "Error: the video system does not support an image\n"
1291                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1292                         "to reduce the image size.\n", vp->width, vp->height );
1293         do_exit();
1294     }
1295
1296     SDL_LockMutex(is->pictq_mutex);
1297     vp->allocated = 1;
1298     SDL_CondSignal(is->pictq_cond);
1299     SDL_UnlockMutex(is->pictq_mutex);
1300 }
1301
1302 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1303 {
1304     VideoPicture *vp;
1305     double frame_delay, pts = pts1;
1306
1307     /* compute the exact PTS for the picture if it is omitted in the stream
1308      * pts1 is the dts of the pkt / pts of the frame */
1309     if (pts != 0) {
1310         /* update video clock with pts, if present */
1311         is->video_clock = pts;
1312     } else {
1313         pts = is->video_clock;
1314     }
1315     /* update video clock for next frame */
1316     frame_delay = av_q2d(is->video_st->codec->time_base);
1317     /* for MPEG2, the frame can be repeated, so we update the
1318        clock accordingly */
1319     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1320     is->video_clock += frame_delay;
1321
1322 #if defined(DEBUG_SYNC) && 0
1323     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1324            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1325 #endif
1326
1327     /* wait until we have space to put a new picture */
1328     SDL_LockMutex(is->pictq_mutex);
1329
1330     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1331         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1332
1333     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1334            !is->videoq.abort_request) {
1335         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1336     }
1337     SDL_UnlockMutex(is->pictq_mutex);
1338
1339     if (is->videoq.abort_request)
1340         return -1;
1341
1342     vp = &is->pictq[is->pictq_windex];
1343
1344     /* alloc or resize hardware picture buffer */
1345     if (!vp->bmp ||
1346 #if CONFIG_AVFILTER
1347         vp->width  != is->out_video_filter->inputs[0]->w ||
1348         vp->height != is->out_video_filter->inputs[0]->h) {
1349 #else
1350         vp->width != is->video_st->codec->width ||
1351         vp->height != is->video_st->codec->height) {
1352 #endif
1353         SDL_Event event;
1354
1355         vp->allocated = 0;
1356
1357         /* the allocation must be done in the main thread to avoid
1358            locking problems */
1359         event.type = FF_ALLOC_EVENT;
1360         event.user.data1 = is;
1361         SDL_PushEvent(&event);
1362
1363         /* wait until the picture is allocated */
1364         SDL_LockMutex(is->pictq_mutex);
1365         while (!vp->allocated && !is->videoq.abort_request) {
1366             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367         }
1368         SDL_UnlockMutex(is->pictq_mutex);
1369
1370         if (is->videoq.abort_request)
1371             return -1;
1372     }
1373
1374     /* if the frame is not skipped, then display it */
1375     if (vp->bmp) {
1376         AVPicture pict;
1377 #if CONFIG_AVFILTER
1378         if(vp->picref)
1379             avfilter_unref_buffer(vp->picref);
1380         vp->picref = src_frame->opaque;
1381 #endif
1382
1383         /* get a pointer on the bitmap */
1384         SDL_LockYUVOverlay (vp->bmp);
1385
1386         memset(&pict,0,sizeof(AVPicture));
1387         pict.data[0] = vp->bmp->pixels[0];
1388         pict.data[1] = vp->bmp->pixels[2];
1389         pict.data[2] = vp->bmp->pixels[1];
1390
1391         pict.linesize[0] = vp->bmp->pitches[0];
1392         pict.linesize[1] = vp->bmp->pitches[2];
1393         pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396         //FIXME use direct rendering
1397         av_picture_copy(&pict, (AVPicture *)src_frame,
1398                         vp->pix_fmt, vp->width, vp->height);
1399 #else
1400         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1401         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1402             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1403             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1404         if (is->img_convert_ctx == NULL) {
1405             fprintf(stderr, "Cannot initialize the conversion context\n");
1406             exit(1);
1407         }
1408         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1409                   0, vp->height, pict.data, pict.linesize);
1410 #endif
1411         /* update the bitmap content */
1412         SDL_UnlockYUVOverlay(vp->bmp);
1413
1414         vp->pts = pts;
1415         vp->pos = pos;
1416
1417         /* now we can update the picture count */
1418         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1419             is->pictq_windex = 0;
1420         SDL_LockMutex(is->pictq_mutex);
1421         vp->target_clock= compute_target_time(vp->pts, is);
1422
1423         is->pictq_size++;
1424         SDL_UnlockMutex(is->pictq_mutex);
1425     }
1426     return 0;
1427 }
1428
1429 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1430 {
1431     int got_picture, i;
1432
1433     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1434         return -1;
1435
1436     if (pkt->data == flush_pkt.data) {
1437         avcodec_flush_buffers(is->video_st->codec);
1438
1439         SDL_LockMutex(is->pictq_mutex);
1440         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1441         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1442             is->pictq[i].target_clock= 0;
1443         }
1444         while (is->pictq_size && !is->videoq.abort_request) {
1445             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1446         }
1447         is->video_current_pos = -1;
1448         SDL_UnlockMutex(is->pictq_mutex);
1449
1450         is->frame_last_pts = AV_NOPTS_VALUE;
1451         is->frame_last_delay = 0;
1452         is->frame_timer = (double)av_gettime() / 1000000.0;
1453         is->skip_frames = 1;
1454         is->skip_frames_index = 0;
1455         return 0;
1456     }
1457
1458     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1459
1460     if (got_picture) {
1461         if (decoder_reorder_pts == -1) {
1462             *pts = frame->best_effort_timestamp;
1463         } else if (decoder_reorder_pts) {
1464             *pts = frame->pkt_pts;
1465         } else {
1466             *pts = frame->pkt_dts;
1467         }
1468
1469         if (*pts == AV_NOPTS_VALUE) {
1470             *pts = 0;
1471         }
1472
1473         is->skip_frames_index += 1;
1474         if(is->skip_frames_index >= is->skip_frames){
1475             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1476             return 1;
1477         }
1478
1479     }
1480     return 0;
1481 }
1482
1483 #if CONFIG_AVFILTER
1484 typedef struct {
1485     VideoState *is;
1486     AVFrame *frame;
1487     int use_dr1;
1488 } FilterPriv;
1489
1490 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1491 {
1492     AVFilterContext *ctx = codec->opaque;
1493     AVFilterBufferRef  *ref;
1494     int perms = AV_PERM_WRITE;
1495     int i, w, h, stride[4];
1496     unsigned edge;
1497     int pixel_size;
1498
1499     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1500
1501     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1502         perms |= AV_PERM_NEG_LINESIZES;
1503
1504     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1505         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1506         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1508     }
1509     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1510
1511     w = codec->width;
1512     h = codec->height;
1513
1514     if(av_image_check_size(w, h, 0, codec))
1515         return -1;
1516
1517     avcodec_align_dimensions2(codec, &w, &h, stride);
1518     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1519     w += edge << 1;
1520     h += edge << 1;
1521
1522     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1523         return -1;
1524
1525     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1526     ref->video->w = codec->width;
1527     ref->video->h = codec->height;
1528     for(i = 0; i < 4; i ++) {
1529         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1530         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1531
1532         if (ref->data[i]) {
1533             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1534         }
1535         pic->data[i]     = ref->data[i];
1536         pic->linesize[i] = ref->linesize[i];
1537     }
1538     pic->opaque = ref;
1539     pic->age    = INT_MAX;
1540     pic->type   = FF_BUFFER_TYPE_USER;
1541     pic->reordered_opaque = codec->reordered_opaque;
1542     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1543     else           pic->pkt_pts = AV_NOPTS_VALUE;
1544     return 0;
1545 }
1546
1547 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1548 {
1549     memset(pic->data, 0, sizeof(pic->data));
1550     avfilter_unref_buffer(pic->opaque);
1551 }
1552
1553 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1554 {
1555     AVFilterBufferRef *ref = pic->opaque;
1556
1557     if (pic->data[0] == NULL) {
1558         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1559         return codec->get_buffer(codec, pic);
1560     }
1561
1562     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1563         (codec->pix_fmt != ref->format)) {
1564         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1565         return -1;
1566     }
1567
1568     pic->reordered_opaque = codec->reordered_opaque;
1569     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1570     else           pic->pkt_pts = AV_NOPTS_VALUE;
1571     return 0;
1572 }
1573
1574 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1575 {
1576     FilterPriv *priv = ctx->priv;
1577     AVCodecContext *codec;
1578     if(!opaque) return -1;
1579
1580     priv->is = opaque;
1581     codec    = priv->is->video_st->codec;
1582     codec->opaque = ctx;
1583     if((codec->codec->capabilities & CODEC_CAP_DR1)
1584     ) {
1585         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1586         priv->use_dr1 = 1;
1587         codec->get_buffer     = input_get_buffer;
1588         codec->release_buffer = input_release_buffer;
1589         codec->reget_buffer   = input_reget_buffer;
1590         codec->thread_safe_callbacks = 1;
1591     }
1592
1593     priv->frame = avcodec_alloc_frame();
1594
1595     return 0;
1596 }
1597
1598 static void input_uninit(AVFilterContext *ctx)
1599 {
1600     FilterPriv *priv = ctx->priv;
1601     av_free(priv->frame);
1602 }
1603
1604 static int input_request_frame(AVFilterLink *link)
1605 {
1606     FilterPriv *priv = link->src->priv;
1607     AVFilterBufferRef *picref;
1608     int64_t pts = 0;
1609     AVPacket pkt;
1610     int ret;
1611
1612     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1613         av_free_packet(&pkt);
1614     if (ret < 0)
1615         return -1;
1616
1617     if(priv->use_dr1 && priv->frame->opaque) {
1618         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1619     } else {
1620         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1621         av_image_copy(picref->data, picref->linesize,
1622                       priv->frame->data, priv->frame->linesize,
1623                       picref->format, link->w, link->h);
1624     }
1625     av_free_packet(&pkt);
1626
1627     avfilter_copy_frame_props(picref, priv->frame);
1628     picref->pts = pts;
1629
1630     avfilter_start_frame(link, picref);
1631     avfilter_draw_slice(link, 0, link->h, 1);
1632     avfilter_end_frame(link);
1633
1634     return 0;
1635 }
1636
1637 static int input_query_formats(AVFilterContext *ctx)
1638 {
1639     FilterPriv *priv = ctx->priv;
1640     enum PixelFormat pix_fmts[] = {
1641         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1642     };
1643
1644     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1645     return 0;
1646 }
1647
1648 static int input_config_props(AVFilterLink *link)
1649 {
1650     FilterPriv *priv  = link->src->priv;
1651     AVCodecContext *c = priv->is->video_st->codec;
1652
1653     link->w = c->width;
1654     link->h = c->height;
1655     link->time_base = priv->is->video_st->time_base;
1656
1657     return 0;
1658 }
1659
1660 static AVFilter input_filter =
1661 {
1662     .name      = "ffplay_input",
1663
1664     .priv_size = sizeof(FilterPriv),
1665
1666     .init      = input_init,
1667     .uninit    = input_uninit,
1668
1669     .query_formats = input_query_formats,
1670
1671     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1672     .outputs   = (AVFilterPad[]) {{ .name = "default",
1673                                     .type = AVMEDIA_TYPE_VIDEO,
1674                                     .request_frame = input_request_frame,
1675                                     .config_props  = input_config_props, },
1676                                   { .name = NULL }},
1677 };
1678
1679 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1680 {
1681     char sws_flags_str[128];
1682     int ret;
1683     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1684     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1685     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1686     graph->scale_sws_opts = av_strdup(sws_flags_str);
1687
1688     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1689                                             NULL, is, graph)) < 0)
1690         return ret;
1691     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1692                                             NULL, pix_fmts, graph)) < 0)
1693         return ret;
1694
1695     if(vfilters) {
1696         AVFilterInOut *outputs = avfilter_inout_alloc();
1697         AVFilterInOut *inputs  = avfilter_inout_alloc();
1698
1699         outputs->name    = av_strdup("in");
1700         outputs->filter_ctx = filt_src;
1701         outputs->pad_idx = 0;
1702         outputs->next    = NULL;
1703
1704         inputs->name    = av_strdup("out");
1705         inputs->filter_ctx = filt_out;
1706         inputs->pad_idx = 0;
1707         inputs->next    = NULL;
1708
1709         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1710             return ret;
1711         av_freep(&vfilters);
1712     } else {
1713         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1714             return ret;
1715     }
1716
1717     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1718         return ret;
1719
1720     is->out_video_filter = filt_out;
1721
1722     return ret;
1723 }
1724
1725 #endif  /* CONFIG_AVFILTER */
1726
1727 static int video_thread(void *arg)
1728 {
1729     VideoState *is = arg;
1730     AVFrame *frame= avcodec_alloc_frame();
1731     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1732     double pts;
1733     int ret;
1734
1735 #if CONFIG_AVFILTER
1736     AVFilterGraph *graph = avfilter_graph_alloc();
1737     AVFilterContext *filt_out = NULL;
1738
1739     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1740         goto the_end;
1741     filt_out = is->out_video_filter;
1742 #endif
1743
1744     for(;;) {
1745 #if !CONFIG_AVFILTER
1746         AVPacket pkt;
1747 #else
1748         AVFilterBufferRef *picref;
1749         AVRational tb = filt_out->inputs[0]->time_base;
1750 #endif
1751         while (is->paused && !is->videoq.abort_request)
1752             SDL_Delay(10);
1753 #if CONFIG_AVFILTER
1754         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1755         if (picref) {
1756             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1757             pts_int = picref->pts;
1758             pos     = picref->pos;
1759             frame->opaque = picref;
1760         }
1761
1762         if (av_cmp_q(tb, is->video_st->time_base)) {
1763             av_unused int64_t pts1 = pts_int;
1764             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1765             av_dlog(NULL, "video_thread(): "
1766                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1767                     tb.num, tb.den, pts1,
1768                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1769         }
1770 #else
1771         ret = get_video_frame(is, frame, &pts_int, &pkt);
1772         pos = pkt.pos;
1773         av_free_packet(&pkt);
1774 #endif
1775
1776         if (ret < 0) goto the_end;
1777
1778         if (!picref)
1779             continue;
1780
1781         pts = pts_int*av_q2d(is->video_st->time_base);
1782
1783         ret = queue_picture(is, frame, pts, pos);
1784
1785         if (ret < 0)
1786             goto the_end;
1787
1788         if (step)
1789             if (cur_stream)
1790                 stream_toggle_pause(cur_stream);
1791     }
1792  the_end:
1793 #if CONFIG_AVFILTER
1794     avfilter_graph_free(&graph);
1795 #endif
1796     av_free(frame);
1797     return 0;
1798 }
1799
1800 static int subtitle_thread(void *arg)
1801 {
1802     VideoState *is = arg;
1803     SubPicture *sp;
1804     AVPacket pkt1, *pkt = &pkt1;
1805     int got_subtitle;
1806     double pts;
1807     int i, j;
1808     int r, g, b, y, u, v, a;
1809
1810     for(;;) {
1811         while (is->paused && !is->subtitleq.abort_request) {
1812             SDL_Delay(10);
1813         }
1814         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1815             break;
1816
1817         if(pkt->data == flush_pkt.data){
1818             avcodec_flush_buffers(is->subtitle_st->codec);
1819             continue;
1820         }
1821         SDL_LockMutex(is->subpq_mutex);
1822         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1823                !is->subtitleq.abort_request) {
1824             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1825         }
1826         SDL_UnlockMutex(is->subpq_mutex);
1827
1828         if (is->subtitleq.abort_request)
1829             return 0;
1830
1831         sp = &is->subpq[is->subpq_windex];
1832
1833        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1834            this packet, if any */
1835         pts = 0;
1836         if (pkt->pts != AV_NOPTS_VALUE)
1837             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1838
1839         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1840                                  &got_subtitle, pkt);
1841
1842         if (got_subtitle && sp->sub.format == 0) {
1843             sp->pts = pts;
1844
1845             for (i = 0; i < sp->sub.num_rects; i++)
1846             {
1847                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1848                 {
1849                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1850                     y = RGB_TO_Y_CCIR(r, g, b);
1851                     u = RGB_TO_U_CCIR(r, g, b, 0);
1852                     v = RGB_TO_V_CCIR(r, g, b, 0);
1853                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1854                 }
1855             }
1856
1857             /* now we can update the picture count */
1858             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1859                 is->subpq_windex = 0;
1860             SDL_LockMutex(is->subpq_mutex);
1861             is->subpq_size++;
1862             SDL_UnlockMutex(is->subpq_mutex);
1863         }
1864         av_free_packet(pkt);
1865     }
1866     return 0;
1867 }
1868
1869 /* copy samples for viewing in editor window */
1870 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1871 {
1872     int size, len;
1873
1874     size = samples_size / sizeof(short);
1875     while (size > 0) {
1876         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1877         if (len > size)
1878             len = size;
1879         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1880         samples += len;
1881         is->sample_array_index += len;
1882         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1883             is->sample_array_index = 0;
1884         size -= len;
1885     }
1886 }
1887
1888 /* return the new audio buffer size (samples can be added or deleted
1889    to get better sync if video or external master clock) */
1890 static int synchronize_audio(VideoState *is, short *samples,
1891                              int samples_size1, double pts)
1892 {
1893     int n, samples_size;
1894     double ref_clock;
1895
1896     n = 2 * is->audio_st->codec->channels;
1897     samples_size = samples_size1;
1898
1899     /* if not master, then we try to remove or add samples to correct the clock */
1900     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1901          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1902         double diff, avg_diff;
1903         int wanted_size, min_size, max_size, nb_samples;
1904
1905         ref_clock = get_master_clock(is);
1906         diff = get_audio_clock(is) - ref_clock;
1907
1908         if (diff < AV_NOSYNC_THRESHOLD) {
1909             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1910             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1911                 /* not enough measures to have a correct estimate */
1912                 is->audio_diff_avg_count++;
1913             } else {
1914                 /* estimate the A-V difference */
1915                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1916
1917                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1918                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1919                     nb_samples = samples_size / n;
1920
1921                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1922                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1923                     if (wanted_size < min_size)
1924                         wanted_size = min_size;
1925                     else if (wanted_size > max_size)
1926                         wanted_size = max_size;
1927
1928                     /* add or remove samples to correction the synchro */
1929                     if (wanted_size < samples_size) {
1930                         /* remove samples */
1931                         samples_size = wanted_size;
1932                     } else if (wanted_size > samples_size) {
1933                         uint8_t *samples_end, *q;
1934                         int nb;
1935
1936                         /* add samples */
1937                         nb = (samples_size - wanted_size);
1938                         samples_end = (uint8_t *)samples + samples_size - n;
1939                         q = samples_end + n;
1940                         while (nb > 0) {
1941                             memcpy(q, samples_end, n);
1942                             q += n;
1943                             nb -= n;
1944                         }
1945                         samples_size = wanted_size;
1946                     }
1947                 }
1948 #if 0
1949                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1950                        diff, avg_diff, samples_size - samples_size1,
1951                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1952 #endif
1953             }
1954         } else {
1955             /* too big difference : may be initial PTS errors, so
1956                reset A-V filter */
1957             is->audio_diff_avg_count = 0;
1958             is->audio_diff_cum = 0;
1959         }
1960     }
1961
1962     return samples_size;
1963 }
1964
1965 /* decode one audio frame and returns its uncompressed size */
1966 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1967 {
1968     AVPacket *pkt_temp = &is->audio_pkt_temp;
1969     AVPacket *pkt = &is->audio_pkt;
1970     AVCodecContext *dec= is->audio_st->codec;
1971     int n, len1, data_size;
1972     double pts;
1973
1974     for(;;) {
1975         /* NOTE: the audio packet can contain several frames */
1976         while (pkt_temp->size > 0) {
1977             data_size = sizeof(is->audio_buf1);
1978             len1 = avcodec_decode_audio3(dec,
1979                                         (int16_t *)is->audio_buf1, &data_size,
1980                                         pkt_temp);
1981             if (len1 < 0) {
1982                 /* if error, we skip the frame */
1983                 pkt_temp->size = 0;
1984                 break;
1985             }
1986
1987             pkt_temp->data += len1;
1988             pkt_temp->size -= len1;
1989             if (data_size <= 0)
1990                 continue;
1991
1992             if (dec->sample_fmt != is->audio_src_fmt) {
1993                 if (is->reformat_ctx)
1994                     av_audio_convert_free(is->reformat_ctx);
1995                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
1996                                                          dec->sample_fmt, 1, NULL, 0);
1997                 if (!is->reformat_ctx) {
1998                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1999                         av_get_sample_fmt_name(dec->sample_fmt),
2000                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2001                         break;
2002                 }
2003                 is->audio_src_fmt= dec->sample_fmt;
2004             }
2005
2006             if (is->reformat_ctx) {
2007                 const void *ibuf[6]= {is->audio_buf1};
2008                 void *obuf[6]= {is->audio_buf2};
2009                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2010                 int ostride[6]= {2};
2011                 int len= data_size/istride[0];
2012                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2013                     printf("av_audio_convert() failed\n");
2014                     break;
2015                 }
2016                 is->audio_buf= is->audio_buf2;
2017                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2018                           remove this legacy cruft */
2019                 data_size= len*2;
2020             }else{
2021                 is->audio_buf= is->audio_buf1;
2022             }
2023
2024             /* if no pts, then compute it */
2025             pts = is->audio_clock;
2026             *pts_ptr = pts;
2027             n = 2 * dec->channels;
2028             is->audio_clock += (double)data_size /
2029                 (double)(n * dec->sample_rate);
2030 #ifdef DEBUG
2031             {
2032                 static double last_clock;
2033                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2034                        is->audio_clock - last_clock,
2035                        is->audio_clock, pts);
2036                 last_clock = is->audio_clock;
2037             }
2038 #endif
2039             return data_size;
2040         }
2041
2042         /* free the current packet */
2043         if (pkt->data)
2044             av_free_packet(pkt);
2045
2046         if (is->paused || is->audioq.abort_request) {
2047             return -1;
2048         }
2049
2050         /* read next packet */
2051         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2052             return -1;
2053         if(pkt->data == flush_pkt.data){
2054             avcodec_flush_buffers(dec);
2055             continue;
2056         }
2057
2058         pkt_temp->data = pkt->data;
2059         pkt_temp->size = pkt->size;
2060
2061         /* if update the audio clock with the pts */
2062         if (pkt->pts != AV_NOPTS_VALUE) {
2063             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2064         }
2065     }
2066 }
2067
2068 /* prepare a new audio buffer */
2069 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2070 {
2071     VideoState *is = opaque;
2072     int audio_size, len1;
2073     double pts;
2074
2075     audio_callback_time = av_gettime();
2076
2077     while (len > 0) {
2078         if (is->audio_buf_index >= is->audio_buf_size) {
2079            audio_size = audio_decode_frame(is, &pts);
2080            if (audio_size < 0) {
2081                 /* if error, just output silence */
2082                is->audio_buf = is->audio_buf1;
2083                is->audio_buf_size = 1024;
2084                memset(is->audio_buf, 0, is->audio_buf_size);
2085            } else {
2086                if (is->show_mode != SHOW_MODE_VIDEO)
2087                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2088                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2089                                               pts);
2090                is->audio_buf_size = audio_size;
2091            }
2092            is->audio_buf_index = 0;
2093         }
2094         len1 = is->audio_buf_size - is->audio_buf_index;
2095         if (len1 > len)
2096             len1 = len;
2097         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2098         len -= len1;
2099         stream += len1;
2100         is->audio_buf_index += len1;
2101     }
2102 }
2103
2104 /* open a given stream. Return 0 if OK */
2105 static int stream_component_open(VideoState *is, int stream_index)
2106 {
2107     AVFormatContext *ic = is->ic;
2108     AVCodecContext *avctx;
2109     AVCodec *codec;
2110     SDL_AudioSpec wanted_spec, spec;
2111     AVDictionary *opts;
2112     AVDictionaryEntry *t = NULL;
2113
2114     if (stream_index < 0 || stream_index >= ic->nb_streams)
2115         return -1;
2116     avctx = ic->streams[stream_index]->codec;
2117
2118     opts = filter_codec_opts(codec_opts, avctx->codec_id, 0);
2119
2120     /* prepare audio output */
2121     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2122         if (avctx->channels > 0) {
2123             avctx->request_channels = FFMIN(2, avctx->channels);
2124         } else {
2125             avctx->request_channels = 2;
2126         }
2127     }
2128
2129     codec = avcodec_find_decoder(avctx->codec_id);
2130     if (!codec)
2131         return -1;
2132
2133     avctx->workaround_bugs = workaround_bugs;
2134     avctx->lowres = lowres;
2135     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2136     avctx->idct_algo= idct;
2137     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2138     avctx->skip_frame= skip_frame;
2139     avctx->skip_idct= skip_idct;
2140     avctx->skip_loop_filter= skip_loop_filter;
2141     avctx->error_recognition= error_recognition;
2142     avctx->error_concealment= error_concealment;
2143     avctx->thread_count= thread_count;
2144
2145     if(codec->capabilities & CODEC_CAP_DR1)
2146         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2147
2148     if (!codec ||
2149         avcodec_open2(avctx, codec, &opts) < 0)
2150         return -1;
2151     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2152         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2153         return AVERROR_OPTION_NOT_FOUND;
2154     }
2155
2156     /* prepare audio output */
2157     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2158         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2159             fprintf(stderr, "Invalid sample rate or channel count\n");
2160             return -1;
2161         }
2162         wanted_spec.freq = avctx->sample_rate;
2163         wanted_spec.format = AUDIO_S16SYS;
2164         wanted_spec.channels = avctx->channels;
2165         wanted_spec.silence = 0;
2166         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2167         wanted_spec.callback = sdl_audio_callback;
2168         wanted_spec.userdata = is;
2169         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2170             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2171             return -1;
2172         }
2173         is->audio_hw_buf_size = spec.size;
2174         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2175     }
2176
2177     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2178     switch(avctx->codec_type) {
2179     case AVMEDIA_TYPE_AUDIO:
2180         is->audio_stream = stream_index;
2181         is->audio_st = ic->streams[stream_index];
2182         is->audio_buf_size = 0;
2183         is->audio_buf_index = 0;
2184
2185         /* init averaging filter */
2186         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2187         is->audio_diff_avg_count = 0;
2188         /* since we do not have a precise anough audio fifo fullness,
2189            we correct audio sync only if larger than this threshold */
2190         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2191
2192         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2193         packet_queue_init(&is->audioq);
2194         SDL_PauseAudio(0);
2195         break;
2196     case AVMEDIA_TYPE_VIDEO:
2197         is->video_stream = stream_index;
2198         is->video_st = ic->streams[stream_index];
2199
2200         packet_queue_init(&is->videoq);
2201         is->video_tid = SDL_CreateThread(video_thread, is);
2202         break;
2203     case AVMEDIA_TYPE_SUBTITLE:
2204         is->subtitle_stream = stream_index;
2205         is->subtitle_st = ic->streams[stream_index];
2206         packet_queue_init(&is->subtitleq);
2207
2208         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2209         break;
2210     default:
2211         break;
2212     }
2213     return 0;
2214 }
2215
2216 static void stream_component_close(VideoState *is, int stream_index)
2217 {
2218     AVFormatContext *ic = is->ic;
2219     AVCodecContext *avctx;
2220
2221     if (stream_index < 0 || stream_index >= ic->nb_streams)
2222         return;
2223     avctx = ic->streams[stream_index]->codec;
2224
2225     switch(avctx->codec_type) {
2226     case AVMEDIA_TYPE_AUDIO:
2227         packet_queue_abort(&is->audioq);
2228
2229         SDL_CloseAudio();
2230
2231         packet_queue_end(&is->audioq);
2232         if (is->reformat_ctx)
2233             av_audio_convert_free(is->reformat_ctx);
2234         is->reformat_ctx = NULL;
2235         break;
2236     case AVMEDIA_TYPE_VIDEO:
2237         packet_queue_abort(&is->videoq);
2238
2239         /* note: we also signal this mutex to make sure we deblock the
2240            video thread in all cases */
2241         SDL_LockMutex(is->pictq_mutex);
2242         SDL_CondSignal(is->pictq_cond);
2243         SDL_UnlockMutex(is->pictq_mutex);
2244
2245         SDL_WaitThread(is->video_tid, NULL);
2246
2247         packet_queue_end(&is->videoq);
2248         break;
2249     case AVMEDIA_TYPE_SUBTITLE:
2250         packet_queue_abort(&is->subtitleq);
2251
2252         /* note: we also signal this mutex to make sure we deblock the
2253            video thread in all cases */
2254         SDL_LockMutex(is->subpq_mutex);
2255         is->subtitle_stream_changed = 1;
2256
2257         SDL_CondSignal(is->subpq_cond);
2258         SDL_UnlockMutex(is->subpq_mutex);
2259
2260         SDL_WaitThread(is->subtitle_tid, NULL);
2261
2262         packet_queue_end(&is->subtitleq);
2263         break;
2264     default:
2265         break;
2266     }
2267
2268     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2269     avcodec_close(avctx);
2270     switch(avctx->codec_type) {
2271     case AVMEDIA_TYPE_AUDIO:
2272         is->audio_st = NULL;
2273         is->audio_stream = -1;
2274         break;
2275     case AVMEDIA_TYPE_VIDEO:
2276         is->video_st = NULL;
2277         is->video_stream = -1;
2278         break;
2279     case AVMEDIA_TYPE_SUBTITLE:
2280         is->subtitle_st = NULL;
2281         is->subtitle_stream = -1;
2282         break;
2283     default:
2284         break;
2285     }
2286 }
2287
2288 /* since we have only one decoding thread, we can use a global
2289    variable instead of a thread local variable */
2290 static VideoState *global_video_state;
2291
2292 static int decode_interrupt_cb(void)
2293 {
2294     return (global_video_state && global_video_state->abort_request);
2295 }
2296
2297 /* this thread gets the stream from the disk or the network */
2298 static int read_thread(void *arg)
2299 {
2300     VideoState *is = arg;
2301     AVFormatContext *ic = NULL;
2302     int err, i, ret;
2303     int st_index[AVMEDIA_TYPE_NB];
2304     AVPacket pkt1, *pkt = &pkt1;
2305     int eof=0;
2306     int pkt_in_play_range = 0;
2307     AVDictionaryEntry *t;
2308     AVDictionary **opts;
2309     int orig_nb_streams;
2310
2311     memset(st_index, -1, sizeof(st_index));
2312     is->video_stream = -1;
2313     is->audio_stream = -1;
2314     is->subtitle_stream = -1;
2315
2316     global_video_state = is;
2317     avio_set_interrupt_cb(decode_interrupt_cb);
2318
2319     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2320     if (err < 0) {
2321         print_error(is->filename, err);
2322         ret = -1;
2323         goto fail;
2324     }
2325     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2326         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2327         ret = AVERROR_OPTION_NOT_FOUND;
2328         goto fail;
2329     }
2330     is->ic = ic;
2331
2332     if(genpts)
2333         ic->flags |= AVFMT_FLAG_GENPTS;
2334
2335     opts = setup_find_stream_info_opts(ic, codec_opts);
2336     orig_nb_streams = ic->nb_streams;
2337
2338     err = avformat_find_stream_info(ic, opts);
2339     if (err < 0) {
2340         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2341         ret = -1;
2342         goto fail;
2343     }
2344     for (i = 0; i < orig_nb_streams; i++)
2345         av_dict_free(&opts[i]);
2346     av_freep(&opts);
2347
2348     if(ic->pb)
2349         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2350
2351     if(seek_by_bytes<0)
2352         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2353
2354     /* if seeking requested, we execute it */
2355     if (start_time != AV_NOPTS_VALUE) {
2356         int64_t timestamp;
2357
2358         timestamp = start_time;
2359         /* add the stream start time */
2360         if (ic->start_time != AV_NOPTS_VALUE)
2361             timestamp += ic->start_time;
2362         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2363         if (ret < 0) {
2364             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2365                     is->filename, (double)timestamp / AV_TIME_BASE);
2366         }
2367     }
2368
2369     for (i = 0; i < ic->nb_streams; i++)
2370         ic->streams[i]->discard = AVDISCARD_ALL;
2371     if (!video_disable)
2372         st_index[AVMEDIA_TYPE_VIDEO] =
2373             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2374                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2375     if (!audio_disable)
2376         st_index[AVMEDIA_TYPE_AUDIO] =
2377             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2378                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2379                                 st_index[AVMEDIA_TYPE_VIDEO],
2380                                 NULL, 0);
2381     if (!video_disable)
2382         st_index[AVMEDIA_TYPE_SUBTITLE] =
2383             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2384                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2385                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2386                                  st_index[AVMEDIA_TYPE_AUDIO] :
2387                                  st_index[AVMEDIA_TYPE_VIDEO]),
2388                                 NULL, 0);
2389     if (show_status) {
2390         av_dump_format(ic, 0, is->filename, 0);
2391     }
2392
2393     is->show_mode = show_mode;
2394
2395     /* open the streams */
2396     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2397         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2398     }
2399
2400     ret=-1;
2401     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2402         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2403     }
2404     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2405     if (is->show_mode == SHOW_MODE_NONE)
2406         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2407
2408     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2409         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2410     }
2411
2412     if (is->video_stream < 0 && is->audio_stream < 0) {
2413         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2414         ret = -1;
2415         goto fail;
2416     }
2417
2418     for(;;) {
2419         if (is->abort_request)
2420             break;
2421         if (is->paused != is->last_paused) {
2422             is->last_paused = is->paused;
2423             if (is->paused)
2424                 is->read_pause_return= av_read_pause(ic);
2425             else
2426                 av_read_play(ic);
2427         }
2428 #if CONFIG_RTSP_DEMUXER
2429         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2430             /* wait 10 ms to avoid trying to get another packet */
2431             /* XXX: horrible */
2432             SDL_Delay(10);
2433             continue;
2434         }
2435 #endif
2436         if (is->seek_req) {
2437             int64_t seek_target= is->seek_pos;
2438             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2439             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2440 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2441 //      of the seek_pos/seek_rel variables
2442
2443             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2444             if (ret < 0) {
2445                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2446             }else{
2447                 if (is->audio_stream >= 0) {
2448                     packet_queue_flush(&is->audioq);
2449                     packet_queue_put(&is->audioq, &flush_pkt);
2450                 }
2451                 if (is->subtitle_stream >= 0) {
2452                     packet_queue_flush(&is->subtitleq);
2453                     packet_queue_put(&is->subtitleq, &flush_pkt);
2454                 }
2455                 if (is->video_stream >= 0) {
2456                     packet_queue_flush(&is->videoq);
2457                     packet_queue_put(&is->videoq, &flush_pkt);
2458                 }
2459             }
2460             is->seek_req = 0;
2461             eof= 0;
2462         }
2463
2464         /* if the queue are full, no need to read more */
2465         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2466             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2467                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2468                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2469             /* wait 10 ms */
2470             SDL_Delay(10);
2471             continue;
2472         }
2473         if(eof) {
2474             if(is->video_stream >= 0){
2475                 av_init_packet(pkt);
2476                 pkt->data=NULL;
2477                 pkt->size=0;
2478                 pkt->stream_index= is->video_stream;
2479                 packet_queue_put(&is->videoq, pkt);
2480             }
2481             SDL_Delay(10);
2482             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2483                 if(loop!=1 && (!loop || --loop)){
2484                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2485                 }else if(autoexit){
2486                     ret=AVERROR_EOF;
2487                     goto fail;
2488                 }
2489             }
2490             eof=0;
2491             continue;
2492         }
2493         ret = av_read_frame(ic, pkt);
2494         if (ret < 0) {
2495             if (ret == AVERROR_EOF || url_feof(ic->pb))
2496                 eof=1;
2497             if (ic->pb && ic->pb->error)
2498                 break;
2499             SDL_Delay(100); /* wait for user event */
2500             continue;
2501         }
2502         /* check if packet is in play range specified by user, then queue, otherwise discard */
2503         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2504                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2505                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2506                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2507                 <= ((double)duration/1000000);
2508         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2509             packet_queue_put(&is->audioq, pkt);
2510         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2511             packet_queue_put(&is->videoq, pkt);
2512         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2513             packet_queue_put(&is->subtitleq, pkt);
2514         } else {
2515             av_free_packet(pkt);
2516         }
2517     }
2518     /* wait until the end */
2519     while (!is->abort_request) {
2520         SDL_Delay(100);
2521     }
2522
2523     ret = 0;
2524  fail:
2525     /* disable interrupting */
2526     global_video_state = NULL;
2527
2528     /* close each stream */
2529     if (is->audio_stream >= 0)
2530         stream_component_close(is, is->audio_stream);
2531     if (is->video_stream >= 0)
2532         stream_component_close(is, is->video_stream);
2533     if (is->subtitle_stream >= 0)
2534         stream_component_close(is, is->subtitle_stream);
2535     if (is->ic) {
2536         av_close_input_file(is->ic);
2537         is->ic = NULL; /* safety */
2538     }
2539     avio_set_interrupt_cb(NULL);
2540
2541     if (ret != 0) {
2542         SDL_Event event;
2543
2544         event.type = FF_QUIT_EVENT;
2545         event.user.data1 = is;
2546         SDL_PushEvent(&event);
2547     }
2548     return 0;
2549 }
2550
2551 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2552 {
2553     VideoState *is;
2554
2555     is = av_mallocz(sizeof(VideoState));
2556     if (!is)
2557         return NULL;
2558     av_strlcpy(is->filename, filename, sizeof(is->filename));
2559     is->iformat = iformat;
2560     is->ytop = 0;
2561     is->xleft = 0;
2562
2563     /* start video display */
2564     is->pictq_mutex = SDL_CreateMutex();
2565     is->pictq_cond = SDL_CreateCond();
2566
2567     is->subpq_mutex = SDL_CreateMutex();
2568     is->subpq_cond = SDL_CreateCond();
2569
2570     is->av_sync_type = av_sync_type;
2571     is->read_tid = SDL_CreateThread(read_thread, is);
2572     if (!is->read_tid) {
2573         av_free(is);
2574         return NULL;
2575     }
2576     return is;
2577 }
2578
2579 static void stream_cycle_channel(VideoState *is, int codec_type)
2580 {
2581     AVFormatContext *ic = is->ic;
2582     int start_index, stream_index;
2583     AVStream *st;
2584
2585     if (codec_type == AVMEDIA_TYPE_VIDEO)
2586         start_index = is->video_stream;
2587     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2588         start_index = is->audio_stream;
2589     else
2590         start_index = is->subtitle_stream;
2591     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2592         return;
2593     stream_index = start_index;
2594     for(;;) {
2595         if (++stream_index >= is->ic->nb_streams)
2596         {
2597             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2598             {
2599                 stream_index = -1;
2600                 goto the_end;
2601             } else
2602                 stream_index = 0;
2603         }
2604         if (stream_index == start_index)
2605             return;
2606         st = ic->streams[stream_index];
2607         if (st->codec->codec_type == codec_type) {
2608             /* check that parameters are OK */
2609             switch(codec_type) {
2610             case AVMEDIA_TYPE_AUDIO:
2611                 if (st->codec->sample_rate != 0 &&
2612                     st->codec->channels != 0)
2613                     goto the_end;
2614                 break;
2615             case AVMEDIA_TYPE_VIDEO:
2616             case AVMEDIA_TYPE_SUBTITLE:
2617                 goto the_end;
2618             default:
2619                 break;
2620             }
2621         }
2622     }
2623  the_end:
2624     stream_component_close(is, start_index);
2625     stream_component_open(is, stream_index);
2626 }
2627
2628
2629 static void toggle_full_screen(void)
2630 {
2631     is_full_screen = !is_full_screen;
2632     video_open(cur_stream);
2633 }
2634
2635 static void toggle_pause(void)
2636 {
2637     if (cur_stream)
2638         stream_toggle_pause(cur_stream);
2639     step = 0;
2640 }
2641
2642 static void step_to_next_frame(void)
2643 {
2644     if (cur_stream) {
2645         /* if the stream is paused unpause it, then step */
2646         if (cur_stream->paused)
2647             stream_toggle_pause(cur_stream);
2648     }
2649     step = 1;
2650 }
2651
2652 static void toggle_audio_display(void)
2653 {
2654     if (cur_stream) {
2655         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2656         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2657         fill_rectangle(screen,
2658                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2659                     bgcolor);
2660         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2661     }
2662 }
2663
2664 /* handle an event sent by the GUI */
2665 static void event_loop(void)
2666 {
2667     SDL_Event event;
2668     double incr, pos, frac;
2669
2670     for(;;) {
2671         double x;
2672         SDL_WaitEvent(&event);
2673         switch(event.type) {
2674         case SDL_KEYDOWN:
2675             if (exit_on_keydown) {
2676                 do_exit();
2677                 break;
2678             }
2679             switch(event.key.keysym.sym) {
2680             case SDLK_ESCAPE:
2681             case SDLK_q:
2682                 do_exit();
2683                 break;
2684             case SDLK_f:
2685                 toggle_full_screen();
2686                 break;
2687             case SDLK_p:
2688             case SDLK_SPACE:
2689                 toggle_pause();
2690                 break;
2691             case SDLK_s: //S: Step to next frame
2692                 step_to_next_frame();
2693                 break;
2694             case SDLK_a:
2695                 if (cur_stream)
2696                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2697                 break;
2698             case SDLK_v:
2699                 if (cur_stream)
2700                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2701                 break;
2702             case SDLK_t:
2703                 if (cur_stream)
2704                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2705                 break;
2706             case SDLK_w:
2707                 toggle_audio_display();
2708                 break;
2709             case SDLK_LEFT:
2710                 incr = -10.0;
2711                 goto do_seek;
2712             case SDLK_RIGHT:
2713                 incr = 10.0;
2714                 goto do_seek;
2715             case SDLK_UP:
2716                 incr = 60.0;
2717                 goto do_seek;
2718             case SDLK_DOWN:
2719                 incr = -60.0;
2720             do_seek:
2721                 if (cur_stream) {
2722                     if (seek_by_bytes) {
2723                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2724                             pos= cur_stream->video_current_pos;
2725                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2726                             pos= cur_stream->audio_pkt.pos;
2727                         }else
2728                             pos = avio_tell(cur_stream->ic->pb);
2729                         if (cur_stream->ic->bit_rate)
2730                             incr *= cur_stream->ic->bit_rate / 8.0;
2731                         else
2732                             incr *= 180000.0;
2733                         pos += incr;
2734                         stream_seek(cur_stream, pos, incr, 1);
2735                     } else {
2736                         pos = get_master_clock(cur_stream);
2737                         pos += incr;
2738                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2739                     }
2740                 }
2741                 break;
2742             default:
2743                 break;
2744             }
2745             break;
2746         case SDL_MOUSEBUTTONDOWN:
2747             if (exit_on_mousedown) {
2748                 do_exit();
2749                 break;
2750             }
2751         case SDL_MOUSEMOTION:
2752             if(event.type ==SDL_MOUSEBUTTONDOWN){
2753                 x= event.button.x;
2754             }else{
2755                 if(event.motion.state != SDL_PRESSED)
2756                     break;
2757                 x= event.motion.x;
2758             }
2759             if (cur_stream) {
2760                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2761                     uint64_t size=  avio_size(cur_stream->ic->pb);
2762                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2763                 }else{
2764                     int64_t ts;
2765                     int ns, hh, mm, ss;
2766                     int tns, thh, tmm, tss;
2767                     tns = cur_stream->ic->duration/1000000LL;
2768                     thh = tns/3600;
2769                     tmm = (tns%3600)/60;
2770                     tss = (tns%60);
2771                     frac = x/cur_stream->width;
2772                     ns = frac*tns;
2773                     hh = ns/3600;
2774                     mm = (ns%3600)/60;
2775                     ss = (ns%60);
2776                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2777                             hh, mm, ss, thh, tmm, tss);
2778                     ts = frac*cur_stream->ic->duration;
2779                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2780                         ts += cur_stream->ic->start_time;
2781                     stream_seek(cur_stream, ts, 0, 0);
2782                 }
2783             }
2784             break;
2785         case SDL_VIDEORESIZE:
2786             if (cur_stream) {
2787                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2788                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2789                 screen_width = cur_stream->width = event.resize.w;
2790                 screen_height= cur_stream->height= event.resize.h;
2791             }
2792             break;
2793         case SDL_QUIT:
2794         case FF_QUIT_EVENT:
2795             do_exit();
2796             break;
2797         case FF_ALLOC_EVENT:
2798             video_open(event.user.data1);
2799             alloc_picture(event.user.data1);
2800             break;
2801         case FF_REFRESH_EVENT:
2802             video_refresh(event.user.data1);
2803             cur_stream->refresh=0;
2804             break;
2805         default:
2806             break;
2807         }
2808     }
2809 }
2810
2811 static int opt_frame_size(const char *opt, const char *arg)
2812 {
2813     av_log(NULL, AV_LOG_ERROR,
2814            "Option '%s' has been removed, use private format options instead\n", opt);
2815     return AVERROR(EINVAL);
2816 }
2817
2818 static int opt_width(const char *opt, const char *arg)
2819 {
2820     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2821     return 0;
2822 }
2823
2824 static int opt_height(const char *opt, const char *arg)
2825 {
2826     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2827     return 0;
2828 }
2829
2830 static int opt_format(const char *opt, const char *arg)
2831 {
2832     file_iformat = av_find_input_format(arg);
2833     if (!file_iformat) {
2834         fprintf(stderr, "Unknown input format: %s\n", arg);
2835         return AVERROR(EINVAL);
2836     }
2837     return 0;
2838 }
2839
2840 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2841 {
2842     av_log(NULL, AV_LOG_ERROR,
2843            "Option '%s' has been removed, use private format options instead\n", opt);
2844     return AVERROR(EINVAL);
2845 }
2846
2847 static int opt_sync(const char *opt, const char *arg)
2848 {
2849     if (!strcmp(arg, "audio"))
2850         av_sync_type = AV_SYNC_AUDIO_MASTER;
2851     else if (!strcmp(arg, "video"))
2852         av_sync_type = AV_SYNC_VIDEO_MASTER;
2853     else if (!strcmp(arg, "ext"))
2854         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2855     else {
2856         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2857         exit(1);
2858     }
2859     return 0;
2860 }
2861
2862 static int opt_seek(const char *opt, const char *arg)
2863 {
2864     start_time = parse_time_or_die(opt, arg, 1);
2865     return 0;
2866 }
2867
2868 static int opt_duration(const char *opt, const char *arg)
2869 {
2870     duration = parse_time_or_die(opt, arg, 1);
2871     return 0;
2872 }
2873
2874 static int opt_thread_count(const char *opt, const char *arg)
2875 {
2876     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2877 #if !HAVE_THREADS
2878     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2879 #endif
2880     return 0;
2881 }
2882
2883 static int opt_show_mode(const char *opt, const char *arg)
2884 {
2885     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2886                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2887                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2888                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2889     return 0;
2890 }
2891
2892 static int opt_input_file(const char *opt, const char *filename)
2893 {
2894     if (input_filename) {
2895         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2896                 filename, input_filename);
2897         exit(1);
2898     }
2899     if (!strcmp(filename, "-"))
2900         filename = "pipe:";
2901     input_filename = filename;
2902     return 0;
2903 }
2904
2905 static const OptionDef options[] = {
2906 #include "cmdutils_common_opts.h"
2907     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2908     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2909     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2910     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2911     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2912     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2913     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2914     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2915     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2916     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2917     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2918     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2919     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2920     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2921     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2922     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2923     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2924     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2925     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2926     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2927     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2928     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2929     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2930     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2931     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2932     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2933     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2934     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2935     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2936     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2937     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2938     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2939     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2940     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2941     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2942 #if CONFIG_AVFILTER
2943     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2944 #endif
2945     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2946     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2947     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2948     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2949     { NULL, },
2950 };
2951
2952 static void show_usage(void)
2953 {
2954     printf("Simple media player\n");
2955     printf("usage: ffplay [options] input_file\n");
2956     printf("\n");
2957 }
2958
2959 static int opt_help(const char *opt, const char *arg)
2960 {
2961     av_log_set_callback(log_callback_help);
2962     show_usage();
2963     show_help_options(options, "Main options:\n",
2964                       OPT_EXPERT, 0);
2965     show_help_options(options, "\nAdvanced options:\n",
2966                       OPT_EXPERT, OPT_EXPERT);
2967     printf("\n");
2968     av_opt_show2(avcodec_opts[0], NULL,
2969                  AV_OPT_FLAG_DECODING_PARAM, 0);
2970     printf("\n");
2971     av_opt_show2(avformat_opts, NULL,
2972                  AV_OPT_FLAG_DECODING_PARAM, 0);
2973 #if !CONFIG_AVFILTER
2974     printf("\n");
2975     av_opt_show2(sws_opts, NULL,
2976                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2977 #endif
2978     printf("\nWhile playing:\n"
2979            "q, ESC              quit\n"
2980            "f                   toggle full screen\n"
2981            "p, SPC              pause\n"
2982            "a                   cycle audio channel\n"
2983            "v                   cycle video channel\n"
2984            "t                   cycle subtitle channel\n"
2985            "w                   show audio waves\n"
2986            "s                   activate frame-step mode\n"
2987            "left/right          seek backward/forward 10 seconds\n"
2988            "down/up             seek backward/forward 1 minute\n"
2989            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2990            );
2991     return 0;
2992 }
2993
2994 /* Called from the main */
2995 int main(int argc, char **argv)
2996 {
2997     int flags;
2998
2999     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3000
3001     /* register all codecs, demux and protocols */
3002     avcodec_register_all();
3003 #if CONFIG_AVDEVICE
3004     avdevice_register_all();
3005 #endif
3006 #if CONFIG_AVFILTER
3007     avfilter_register_all();
3008 #endif
3009     av_register_all();
3010
3011     init_opts();
3012
3013     show_banner();
3014
3015     parse_options(argc, argv, options, opt_input_file);
3016
3017     if (!input_filename) {
3018         show_usage();
3019         fprintf(stderr, "An input file must be specified\n");
3020         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3021         exit(1);
3022     }
3023
3024     if (display_disable) {
3025         video_disable = 1;
3026     }
3027     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3028     if (audio_disable)
3029         flags &= ~SDL_INIT_AUDIO;
3030 #if !defined(__MINGW32__) && !defined(__APPLE__)
3031     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3032 #endif
3033     if (SDL_Init (flags)) {
3034         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3035         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3036         exit(1);
3037     }
3038
3039     if (!display_disable) {
3040 #if HAVE_SDL_VIDEO_SIZE
3041         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3042         fs_screen_width = vi->current_w;
3043         fs_screen_height = vi->current_h;
3044 #endif
3045     }
3046
3047     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3048     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3049     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3050
3051     av_init_packet(&flush_pkt);
3052     flush_pkt.data= "FLUSH";
3053
3054     cur_stream = stream_open(input_filename, file_iformat);
3055
3056     event_loop();
3057
3058     /* never returns */
3059
3060     return 0;
3061 }