]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: Fix non-compiling debug printf and replace it by av_dlog.
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/opt.h"
37 #include "libavcodec/avfft.h"
38
39 #if CONFIG_AVFILTER
40 # include "libavfilter/avfilter.h"
41 # include "libavfilter/avfiltergraph.h"
42 #endif
43
44 #include "cmdutils.h"
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #ifdef __MINGW32__
50 #undef main /* We don't want SDL to override our main() */
51 #endif
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100     int64_t pos;                                 ///<byte position in file
101     SDL_Overlay *bmp;
102     int width, height; /* source height & width */
103     int allocated;
104     enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107     AVFilterBufferRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112     double pts; /* presentation time stamp for this picture */
113     AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117     AV_SYNC_AUDIO_MASTER, /* default choice */
118     AV_SYNC_VIDEO_MASTER,
119     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123     SDL_Thread *parse_tid;
124     SDL_Thread *video_tid;
125     SDL_Thread *refresh_tid;
126     AVInputFormat *iformat;
127     int no_background;
128     int abort_request;
129     int paused;
130     int last_paused;
131     int seek_req;
132     int seek_flags;
133     int64_t seek_pos;
134     int64_t seek_rel;
135     int read_pause_return;
136     AVFormatContext *ic;
137     int dtg_active_format;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     int show_audio; /* if true, display audio samples */
166     int16_t sample_array[SAMPLE_ARRAY_SIZE];
167     int sample_array_index;
168     int last_i_start;
169     RDFTContext *rdft;
170     int rdft_bits;
171     FFTSample *rdft_data;
172     int xpos;
173
174     SDL_Thread *subtitle_tid;
175     int subtitle_stream;
176     int subtitle_stream_changed;
177     AVStream *subtitle_st;
178     PacketQueue subtitleq;
179     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180     int subpq_size, subpq_rindex, subpq_windex;
181     SDL_mutex *subpq_mutex;
182     SDL_cond *subpq_cond;
183
184     double frame_timer;
185     double frame_last_pts;
186     double frame_last_delay;
187     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
188     int video_stream;
189     AVStream *video_st;
190     PacketQueue videoq;
191     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
192     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193     int64_t video_current_pos;                   ///<current displayed file pos
194     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195     int pictq_size, pictq_rindex, pictq_windex;
196     SDL_mutex *pictq_mutex;
197     SDL_cond *pictq_cond;
198 #if !CONFIG_AVFILTER
199     struct SwsContext *img_convert_ctx;
200 #endif
201
202     //    QETimer *video_timer;
203     char filename[1024];
204     int width, height, xleft, ytop;
205
206     PtsCorrectionContext pts_ctx;
207
208 #if CONFIG_AVFILTER
209     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
210 #endif
211
212     float skip_frames;
213     float skip_frames_index;
214     int refresh;
215 } VideoState;
216
217 static void show_help(void);
218
219 /* options specified by the user */
220 static AVInputFormat *file_iformat;
221 static const char *input_filename;
222 static const char *window_title;
223 static int fs_screen_width;
224 static int fs_screen_height;
225 static int screen_width = 0;
226 static int screen_height = 0;
227 static int frame_width = 0;
228 static int frame_height = 0;
229 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int debug = 0;
244 static int debug_mv = 0;
245 static int step = 0;
246 static int thread_count = 1;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int lowres = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
255 static int error_recognition = FF_ER_CAREFUL;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts= -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop=1;
262 static int framedrop=1;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for(;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (vp->picref->video->pixel_aspect.num == 0)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st)
673         {
674             if (is->subpq_size > 0)
675             {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679                 {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = width;
713         rect.h = height;
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 /* get the current audio output buffer size, in samples. With SDL, we
719    cannot have a precise information */
720 static int audio_write_get_buf_size(VideoState *is)
721 {
722     return is->audio_buf_size - is->audio_buf_index;
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     a = a % b;
728     if (a >= 0)
729         return a;
730     else
731         return a + b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
742         ;
743     nb_freq= 1<<(rdft_bits-1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->audio_st->codec->channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
750         n = 2 * channels;
751         delay = audio_write_get_buf_size(s);
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
759         }
760
761         delay += 2*data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if(s->show_audio==1){
767             h= INT_MIN;
768             for(i=0; i<1000; i+=channels){
769                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a= s->sample_array[idx];
771                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
772                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
773                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
774                 int score= a-d;
775                 if(h<score && (b^c)<0){
776                     h= score;
777                     i_start= idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if(s->show_audio==1){
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for(ch = 0;ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for(x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for(ch = 1;ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     }else{
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if(rdft_bits != s->rdft_bits){
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits= rdft_bits;
835             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for(ch = 0;ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2*nb_freq*ch;
841                 i = i_start + ch;
842                 for(x = 0; x < 2*nb_freq; x++) {
843                     double w= (x-nb_freq)*(1.0/nb_freq);
844                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             //least efficient way to do this, we should of course directly access it but its more than fast enough
852             for(y=0; y<s->height; y++){
853                 double w= 1/sqrt(nb_freq);
854                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
855                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
856                        + data[1][2*y+1]*data[1][2*y+1])) : a;
857                 a= FFMIN(a,255);
858                 b= FFMIN(b,255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if(s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static int video_open(VideoState *is){
874     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
875     int w,h;
876
877     if(is_full_screen) flags |= SDL_FULLSCREEN;
878     else               flags |= SDL_RESIZABLE;
879
880     if (is_full_screen && fs_screen_width) {
881         w = fs_screen_width;
882         h = fs_screen_height;
883     } else if(!is_full_screen && screen_width){
884         w = screen_width;
885         h = screen_height;
886 #if CONFIG_AVFILTER
887     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
888         w = is->out_video_filter->inputs[0]->w;
889         h = is->out_video_filter->inputs[0]->h;
890 #else
891     }else if (is->video_st && is->video_st->codec->width){
892         w = is->video_st->codec->width;
893         h = is->video_st->codec->height;
894 #endif
895     } else {
896         w = 640;
897         h = 480;
898     }
899     if(screen && is->width == screen->w && screen->w == w
900        && is->height== screen->h && screen->h == h)
901         return 0;
902
903 #ifndef __APPLE__
904     screen = SDL_SetVideoMode(w, h, 0, flags);
905 #else
906     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
907     screen = SDL_SetVideoMode(w, h, 24, flags);
908 #endif
909     if (!screen) {
910         fprintf(stderr, "SDL: could not set video mode - exiting\n");
911         return -1;
912     }
913     if (!window_title)
914         window_title = input_filename;
915     SDL_WM_SetCaption(window_title, window_title);
916
917     is->width = screen->w;
918     is->height = screen->h;
919
920     return 0;
921 }
922
923 /* display the current picture, if any */
924 static void video_display(VideoState *is)
925 {
926     if(!screen)
927         video_open(cur_stream);
928     if (is->audio_st && is->show_audio)
929         video_audio_display(is);
930     else if (is->video_st)
931         video_image_display(is);
932 }
933
934 static int refresh_thread(void *opaque)
935 {
936     VideoState *is= opaque;
937     while(!is->abort_request){
938         SDL_Event event;
939         event.type = FF_REFRESH_EVENT;
940         event.user.data1 = opaque;
941         if(!is->refresh){
942             is->refresh=1;
943             SDL_PushEvent(&event);
944         }
945         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
946     }
947     return 0;
948 }
949
950 /* get the current audio clock value */
951 static double get_audio_clock(VideoState *is)
952 {
953     double pts;
954     int hw_buf_size, bytes_per_sec;
955     pts = is->audio_clock;
956     hw_buf_size = audio_write_get_buf_size(is);
957     bytes_per_sec = 0;
958     if (is->audio_st) {
959         bytes_per_sec = is->audio_st->codec->sample_rate *
960             2 * is->audio_st->codec->channels;
961     }
962     if (bytes_per_sec)
963         pts -= (double)hw_buf_size / bytes_per_sec;
964     return pts;
965 }
966
967 /* get the current video clock value */
968 static double get_video_clock(VideoState *is)
969 {
970     if (is->paused) {
971         return is->video_current_pts;
972     } else {
973         return is->video_current_pts_drift + av_gettime() / 1000000.0;
974     }
975 }
976
977 /* get the current external clock value */
978 static double get_external_clock(VideoState *is)
979 {
980     int64_t ti;
981     ti = av_gettime();
982     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
983 }
984
985 /* get the current master clock value */
986 static double get_master_clock(VideoState *is)
987 {
988     double val;
989
990     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
991         if (is->video_st)
992             val = get_video_clock(is);
993         else
994             val = get_audio_clock(is);
995     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
996         if (is->audio_st)
997             val = get_audio_clock(is);
998         else
999             val = get_video_clock(is);
1000     } else {
1001         val = get_external_clock(is);
1002     }
1003     return val;
1004 }
1005
1006 /* seek in the stream */
1007 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1008 {
1009     if (!is->seek_req) {
1010         is->seek_pos = pos;
1011         is->seek_rel = rel;
1012         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1013         if (seek_by_bytes)
1014             is->seek_flags |= AVSEEK_FLAG_BYTE;
1015         is->seek_req = 1;
1016     }
1017 }
1018
1019 /* pause or resume the video */
1020 static void stream_pause(VideoState *is)
1021 {
1022     if (is->paused) {
1023         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1024         if(is->read_pause_return != AVERROR(ENOSYS)){
1025             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1026         }
1027         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1028     }
1029     is->paused = !is->paused;
1030 }
1031
1032 static double compute_target_time(double frame_current_pts, VideoState *is)
1033 {
1034     double delay, sync_threshold, diff;
1035
1036     /* compute nominal delay */
1037     delay = frame_current_pts - is->frame_last_pts;
1038     if (delay <= 0 || delay >= 10.0) {
1039         /* if incorrect delay, use previous one */
1040         delay = is->frame_last_delay;
1041     } else {
1042         is->frame_last_delay = delay;
1043     }
1044     is->frame_last_pts = frame_current_pts;
1045
1046     /* update delay to follow master synchronisation source */
1047     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1048          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1049         /* if video is slave, we try to correct big delays by
1050            duplicating or deleting a frame */
1051         diff = get_video_clock(is) - get_master_clock(is);
1052
1053         /* skip or repeat frame. We take into account the
1054            delay to compute the threshold. I still don't know
1055            if it is the best guess */
1056         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1057         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1058             if (diff <= -sync_threshold)
1059                 delay = 0;
1060             else if (diff >= sync_threshold)
1061                 delay = 2 * delay;
1062         }
1063     }
1064     is->frame_timer += delay;
1065
1066     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1067             delay, frame_current_pts, -diff);
1068
1069     return is->frame_timer;
1070 }
1071
1072 /* called to display each frame */
1073 static void video_refresh_timer(void *opaque)
1074 {
1075     VideoState *is = opaque;
1076     VideoPicture *vp;
1077
1078     SubPicture *sp, *sp2;
1079
1080     if (is->video_st) {
1081 retry:
1082         if (is->pictq_size == 0) {
1083             //nothing to do, no picture to display in the que
1084         } else {
1085             double time= av_gettime()/1000000.0;
1086             double next_target;
1087             /* dequeue the picture */
1088             vp = &is->pictq[is->pictq_rindex];
1089
1090             if(time < vp->target_clock)
1091                 return;
1092             /* update current video pts */
1093             is->video_current_pts = vp->pts;
1094             is->video_current_pts_drift = is->video_current_pts - time;
1095             is->video_current_pos = vp->pos;
1096             if(is->pictq_size > 1){
1097                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1098                 assert(nextvp->target_clock >= vp->target_clock);
1099                 next_target= nextvp->target_clock;
1100             }else{
1101                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1102             }
1103             if(framedrop && time > next_target){
1104                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1105                 if(is->pictq_size > 1 || time > next_target + 0.5){
1106                     /* update queue size and signal for next picture */
1107                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1108                         is->pictq_rindex = 0;
1109
1110                     SDL_LockMutex(is->pictq_mutex);
1111                     is->pictq_size--;
1112                     SDL_CondSignal(is->pictq_cond);
1113                     SDL_UnlockMutex(is->pictq_mutex);
1114                     goto retry;
1115                 }
1116             }
1117
1118             if(is->subtitle_st) {
1119                 if (is->subtitle_stream_changed) {
1120                     SDL_LockMutex(is->subpq_mutex);
1121
1122                     while (is->subpq_size) {
1123                         free_subpicture(&is->subpq[is->subpq_rindex]);
1124
1125                         /* update queue size and signal for next picture */
1126                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1127                             is->subpq_rindex = 0;
1128
1129                         is->subpq_size--;
1130                     }
1131                     is->subtitle_stream_changed = 0;
1132
1133                     SDL_CondSignal(is->subpq_cond);
1134                     SDL_UnlockMutex(is->subpq_mutex);
1135                 } else {
1136                     if (is->subpq_size > 0) {
1137                         sp = &is->subpq[is->subpq_rindex];
1138
1139                         if (is->subpq_size > 1)
1140                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1141                         else
1142                             sp2 = NULL;
1143
1144                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1145                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1146                         {
1147                             free_subpicture(sp);
1148
1149                             /* update queue size and signal for next picture */
1150                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1151                                 is->subpq_rindex = 0;
1152
1153                             SDL_LockMutex(is->subpq_mutex);
1154                             is->subpq_size--;
1155                             SDL_CondSignal(is->subpq_cond);
1156                             SDL_UnlockMutex(is->subpq_mutex);
1157                         }
1158                     }
1159                 }
1160             }
1161
1162             /* display picture */
1163             if (!display_disable)
1164                 video_display(is);
1165
1166             /* update queue size and signal for next picture */
1167             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1168                 is->pictq_rindex = 0;
1169
1170             SDL_LockMutex(is->pictq_mutex);
1171             is->pictq_size--;
1172             SDL_CondSignal(is->pictq_cond);
1173             SDL_UnlockMutex(is->pictq_mutex);
1174         }
1175     } else if (is->audio_st) {
1176         /* draw the next audio frame */
1177
1178         /* if only audio stream, then display the audio bars (better
1179            than nothing, just to test the implementation */
1180
1181         /* display picture */
1182         if (!display_disable)
1183             video_display(is);
1184     }
1185     if (show_status) {
1186         static int64_t last_time;
1187         int64_t cur_time;
1188         int aqsize, vqsize, sqsize;
1189         double av_diff;
1190
1191         cur_time = av_gettime();
1192         if (!last_time || (cur_time - last_time) >= 30000) {
1193             aqsize = 0;
1194             vqsize = 0;
1195             sqsize = 0;
1196             if (is->audio_st)
1197                 aqsize = is->audioq.size;
1198             if (is->video_st)
1199                 vqsize = is->videoq.size;
1200             if (is->subtitle_st)
1201                 sqsize = is->subtitleq.size;
1202             av_diff = 0;
1203             if (is->audio_st && is->video_st)
1204                 av_diff = get_audio_clock(is) - get_video_clock(is);
1205             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1206                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1207             fflush(stdout);
1208             last_time = cur_time;
1209         }
1210     }
1211 }
1212
1213 static void stream_close(VideoState *is)
1214 {
1215     VideoPicture *vp;
1216     int i;
1217     /* XXX: use a special url_shutdown call to abort parse cleanly */
1218     is->abort_request = 1;
1219     SDL_WaitThread(is->parse_tid, NULL);
1220     SDL_WaitThread(is->refresh_tid, NULL);
1221
1222     /* free all pictures */
1223     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1224         vp = &is->pictq[i];
1225 #if CONFIG_AVFILTER
1226         if (vp->picref) {
1227             avfilter_unref_buffer(vp->picref);
1228             vp->picref = NULL;
1229         }
1230 #endif
1231         if (vp->bmp) {
1232             SDL_FreeYUVOverlay(vp->bmp);
1233             vp->bmp = NULL;
1234         }
1235     }
1236     SDL_DestroyMutex(is->pictq_mutex);
1237     SDL_DestroyCond(is->pictq_cond);
1238     SDL_DestroyMutex(is->subpq_mutex);
1239     SDL_DestroyCond(is->subpq_cond);
1240 #if !CONFIG_AVFILTER
1241     if (is->img_convert_ctx)
1242         sws_freeContext(is->img_convert_ctx);
1243 #endif
1244     av_free(is);
1245 }
1246
1247 static void do_exit(void)
1248 {
1249     if (cur_stream) {
1250         stream_close(cur_stream);
1251         cur_stream = NULL;
1252     }
1253     uninit_opts();
1254 #if CONFIG_AVFILTER
1255     avfilter_uninit();
1256 #endif
1257     if (show_status)
1258         printf("\n");
1259     SDL_Quit();
1260     av_log(NULL, AV_LOG_QUIET, "");
1261     exit(0);
1262 }
1263
1264 /* allocate a picture (needs to do that in main thread to avoid
1265    potential locking problems */
1266 static void alloc_picture(void *opaque)
1267 {
1268     VideoState *is = opaque;
1269     VideoPicture *vp;
1270
1271     vp = &is->pictq[is->pictq_windex];
1272
1273     if (vp->bmp)
1274         SDL_FreeYUVOverlay(vp->bmp);
1275
1276 #if CONFIG_AVFILTER
1277     if (vp->picref)
1278         avfilter_unref_buffer(vp->picref);
1279     vp->picref = NULL;
1280
1281     vp->width   = is->out_video_filter->inputs[0]->w;
1282     vp->height  = is->out_video_filter->inputs[0]->h;
1283     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1284 #else
1285     vp->width   = is->video_st->codec->width;
1286     vp->height  = is->video_st->codec->height;
1287     vp->pix_fmt = is->video_st->codec->pix_fmt;
1288 #endif
1289
1290     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1291                                    SDL_YV12_OVERLAY,
1292                                    screen);
1293     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1294         /* SDL allocates a buffer smaller than requested if the video
1295          * overlay hardware is unable to support the requested size. */
1296         fprintf(stderr, "Error: the video system does not support an image\n"
1297                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1298                         "to reduce the image size.\n", vp->width, vp->height );
1299         do_exit();
1300     }
1301
1302     SDL_LockMutex(is->pictq_mutex);
1303     vp->allocated = 1;
1304     SDL_CondSignal(is->pictq_cond);
1305     SDL_UnlockMutex(is->pictq_mutex);
1306 }
1307
1308 /**
1309  *
1310  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1311  */
1312 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1313 {
1314     VideoPicture *vp;
1315     int dst_pix_fmt;
1316 #if CONFIG_AVFILTER
1317     AVPicture pict_src;
1318 #endif
1319     /* wait until we have space to put a new picture */
1320     SDL_LockMutex(is->pictq_mutex);
1321
1322     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1323         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1324
1325     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1326            !is->videoq.abort_request) {
1327         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1328     }
1329     SDL_UnlockMutex(is->pictq_mutex);
1330
1331     if (is->videoq.abort_request)
1332         return -1;
1333
1334     vp = &is->pictq[is->pictq_windex];
1335
1336     /* alloc or resize hardware picture buffer */
1337     if (!vp->bmp ||
1338 #if CONFIG_AVFILTER
1339         vp->width  != is->out_video_filter->inputs[0]->w ||
1340         vp->height != is->out_video_filter->inputs[0]->h) {
1341 #else
1342         vp->width != is->video_st->codec->width ||
1343         vp->height != is->video_st->codec->height) {
1344 #endif
1345         SDL_Event event;
1346
1347         vp->allocated = 0;
1348
1349         /* the allocation must be done in the main thread to avoid
1350            locking problems */
1351         event.type = FF_ALLOC_EVENT;
1352         event.user.data1 = is;
1353         SDL_PushEvent(&event);
1354
1355         /* wait until the picture is allocated */
1356         SDL_LockMutex(is->pictq_mutex);
1357         while (!vp->allocated && !is->videoq.abort_request) {
1358             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1359         }
1360         SDL_UnlockMutex(is->pictq_mutex);
1361
1362         if (is->videoq.abort_request)
1363             return -1;
1364     }
1365
1366     /* if the frame is not skipped, then display it */
1367     if (vp->bmp) {
1368         AVPicture pict;
1369 #if CONFIG_AVFILTER
1370         if(vp->picref)
1371             avfilter_unref_buffer(vp->picref);
1372         vp->picref = src_frame->opaque;
1373 #endif
1374
1375         /* get a pointer on the bitmap */
1376         SDL_LockYUVOverlay (vp->bmp);
1377
1378         dst_pix_fmt = PIX_FMT_YUV420P;
1379         memset(&pict,0,sizeof(AVPicture));
1380         pict.data[0] = vp->bmp->pixels[0];
1381         pict.data[1] = vp->bmp->pixels[2];
1382         pict.data[2] = vp->bmp->pixels[1];
1383
1384         pict.linesize[0] = vp->bmp->pitches[0];
1385         pict.linesize[1] = vp->bmp->pitches[2];
1386         pict.linesize[2] = vp->bmp->pitches[1];
1387
1388 #if CONFIG_AVFILTER
1389         pict_src.data[0] = src_frame->data[0];
1390         pict_src.data[1] = src_frame->data[1];
1391         pict_src.data[2] = src_frame->data[2];
1392
1393         pict_src.linesize[0] = src_frame->linesize[0];
1394         pict_src.linesize[1] = src_frame->linesize[1];
1395         pict_src.linesize[2] = src_frame->linesize[2];
1396
1397         //FIXME use direct rendering
1398         av_picture_copy(&pict, &pict_src,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock= compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 /**
1431  * compute the exact PTS for the picture if it is omitted in the stream
1432  * @param pts1 the dts of the pkt / pts of the frame
1433  */
1434 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1435 {
1436     double frame_delay, pts;
1437
1438     pts = pts1;
1439
1440     if (pts != 0) {
1441         /* update video clock with pts, if present */
1442         is->video_clock = pts;
1443     } else {
1444         pts = is->video_clock;
1445     }
1446     /* update video clock for next frame */
1447     frame_delay = av_q2d(is->video_st->codec->time_base);
1448     /* for MPEG2, the frame can be repeated, so we update the
1449        clock accordingly */
1450     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1451     is->video_clock += frame_delay;
1452
1453     return queue_picture(is, src_frame, pts, pos);
1454 }
1455
1456 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1457 {
1458     int len1, got_picture, i;
1459
1460     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1461         return -1;
1462
1463     if (pkt->data == flush_pkt.data) {
1464         avcodec_flush_buffers(is->video_st->codec);
1465
1466         SDL_LockMutex(is->pictq_mutex);
1467         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1468         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1469             is->pictq[i].target_clock= 0;
1470         }
1471         while (is->pictq_size && !is->videoq.abort_request) {
1472             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1473         }
1474         is->video_current_pos = -1;
1475         SDL_UnlockMutex(is->pictq_mutex);
1476
1477         init_pts_correction(&is->pts_ctx);
1478         is->frame_last_pts = AV_NOPTS_VALUE;
1479         is->frame_last_delay = 0;
1480         is->frame_timer = (double)av_gettime() / 1000000.0;
1481         is->skip_frames = 1;
1482         is->skip_frames_index = 0;
1483         return 0;
1484     }
1485
1486     len1 = avcodec_decode_video2(is->video_st->codec,
1487                                  frame, &got_picture,
1488                                  pkt);
1489
1490     if (got_picture) {
1491         if (decoder_reorder_pts == -1) {
1492             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1493         } else if (decoder_reorder_pts) {
1494             *pts = frame->pkt_pts;
1495         } else {
1496             *pts = frame->pkt_dts;
1497         }
1498
1499         if (*pts == AV_NOPTS_VALUE) {
1500             *pts = 0;
1501         }
1502
1503         is->skip_frames_index += 1;
1504         if(is->skip_frames_index >= is->skip_frames){
1505             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1506             return 1;
1507         }
1508
1509     }
1510     return 0;
1511 }
1512
1513 #if CONFIG_AVFILTER
1514 typedef struct {
1515     VideoState *is;
1516     AVFrame *frame;
1517     int use_dr1;
1518 } FilterPriv;
1519
1520 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1521 {
1522     AVFilterContext *ctx = codec->opaque;
1523     AVFilterBufferRef  *ref;
1524     int perms = AV_PERM_WRITE;
1525     int i, w, h, stride[4];
1526     unsigned edge;
1527     int pixel_size;
1528
1529     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1530         perms |= AV_PERM_NEG_LINESIZES;
1531
1532     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1533         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1534         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1535         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1536     }
1537     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1538
1539     w = codec->width;
1540     h = codec->height;
1541     avcodec_align_dimensions2(codec, &w, &h, stride);
1542     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1543     w += edge << 1;
1544     h += edge << 1;
1545
1546     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1547         return -1;
1548
1549     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1550     ref->video->w = codec->width;
1551     ref->video->h = codec->height;
1552     for(i = 0; i < 4; i ++) {
1553         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1554         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1555
1556         if (ref->data[i]) {
1557             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1558         }
1559         pic->data[i]     = ref->data[i];
1560         pic->linesize[i] = ref->linesize[i];
1561     }
1562     pic->opaque = ref;
1563     pic->age    = INT_MAX;
1564     pic->type   = FF_BUFFER_TYPE_USER;
1565     pic->reordered_opaque = codec->reordered_opaque;
1566     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1567     else           pic->pkt_pts = AV_NOPTS_VALUE;
1568     return 0;
1569 }
1570
1571 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1572 {
1573     memset(pic->data, 0, sizeof(pic->data));
1574     avfilter_unref_buffer(pic->opaque);
1575 }
1576
1577 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1578 {
1579     AVFilterBufferRef *ref = pic->opaque;
1580
1581     if (pic->data[0] == NULL) {
1582         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1583         return codec->get_buffer(codec, pic);
1584     }
1585
1586     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1587         (codec->pix_fmt != ref->format)) {
1588         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1589         return -1;
1590     }
1591
1592     pic->reordered_opaque = codec->reordered_opaque;
1593     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1594     else           pic->pkt_pts = AV_NOPTS_VALUE;
1595     return 0;
1596 }
1597
1598 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1599 {
1600     FilterPriv *priv = ctx->priv;
1601     AVCodecContext *codec;
1602     if(!opaque) return -1;
1603
1604     priv->is = opaque;
1605     codec    = priv->is->video_st->codec;
1606     codec->opaque = ctx;
1607     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1608         priv->use_dr1 = 1;
1609         codec->get_buffer     = input_get_buffer;
1610         codec->release_buffer = input_release_buffer;
1611         codec->reget_buffer   = input_reget_buffer;
1612         codec->thread_safe_callbacks = 1;
1613     }
1614
1615     priv->frame = avcodec_alloc_frame();
1616
1617     return 0;
1618 }
1619
1620 static void input_uninit(AVFilterContext *ctx)
1621 {
1622     FilterPriv *priv = ctx->priv;
1623     av_free(priv->frame);
1624 }
1625
1626 static int input_request_frame(AVFilterLink *link)
1627 {
1628     FilterPriv *priv = link->src->priv;
1629     AVFilterBufferRef *picref;
1630     int64_t pts = 0;
1631     AVPacket pkt;
1632     int ret;
1633
1634     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1635         av_free_packet(&pkt);
1636     if (ret < 0)
1637         return -1;
1638
1639     if(priv->use_dr1) {
1640         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1641     } else {
1642         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1643         av_image_copy(picref->data, picref->linesize,
1644                       priv->frame->data, priv->frame->linesize,
1645                       picref->format, link->w, link->h);
1646     }
1647     av_free_packet(&pkt);
1648
1649     picref->pts = pts;
1650     picref->pos = pkt.pos;
1651     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1652     avfilter_start_frame(link, picref);
1653     avfilter_draw_slice(link, 0, link->h, 1);
1654     avfilter_end_frame(link);
1655
1656     return 0;
1657 }
1658
1659 static int input_query_formats(AVFilterContext *ctx)
1660 {
1661     FilterPriv *priv = ctx->priv;
1662     enum PixelFormat pix_fmts[] = {
1663         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1664     };
1665
1666     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1667     return 0;
1668 }
1669
1670 static int input_config_props(AVFilterLink *link)
1671 {
1672     FilterPriv *priv  = link->src->priv;
1673     AVCodecContext *c = priv->is->video_st->codec;
1674
1675     link->w = c->width;
1676     link->h = c->height;
1677     link->time_base = priv->is->video_st->time_base;
1678
1679     return 0;
1680 }
1681
1682 static AVFilter input_filter =
1683 {
1684     .name      = "ffplay_input",
1685
1686     .priv_size = sizeof(FilterPriv),
1687
1688     .init      = input_init,
1689     .uninit    = input_uninit,
1690
1691     .query_formats = input_query_formats,
1692
1693     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1694     .outputs   = (AVFilterPad[]) {{ .name = "default",
1695                                     .type = AVMEDIA_TYPE_VIDEO,
1696                                     .request_frame = input_request_frame,
1697                                     .config_props  = input_config_props, },
1698                                   { .name = NULL }},
1699 };
1700
1701 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1702 {
1703     char sws_flags_str[128];
1704     int ret;
1705     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1706     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1707     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1708     graph->scale_sws_opts = av_strdup(sws_flags_str);
1709
1710     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1711                                             NULL, is, graph)) < 0)
1712         goto the_end;
1713     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1714                                             NULL, &ffsink_ctx, graph)) < 0)
1715         goto the_end;
1716
1717     if(vfilters) {
1718         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1719         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1720
1721         outputs->name    = av_strdup("in");
1722         outputs->filter_ctx = filt_src;
1723         outputs->pad_idx = 0;
1724         outputs->next    = NULL;
1725
1726         inputs->name    = av_strdup("out");
1727         inputs->filter_ctx = filt_out;
1728         inputs->pad_idx = 0;
1729         inputs->next    = NULL;
1730
1731         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1732             goto the_end;
1733         av_freep(&vfilters);
1734     } else {
1735         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1736             goto the_end;
1737     }
1738
1739     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1740         goto the_end;
1741
1742     is->out_video_filter = filt_out;
1743 the_end:
1744     return ret;
1745 }
1746
1747 #endif  /* CONFIG_AVFILTER */
1748
1749 static int video_thread(void *arg)
1750 {
1751     VideoState *is = arg;
1752     AVFrame *frame= avcodec_alloc_frame();
1753     int64_t pts_int;
1754     double pts;
1755     int ret;
1756
1757 #if CONFIG_AVFILTER
1758     AVFilterGraph *graph = avfilter_graph_alloc();
1759     AVFilterContext *filt_out = NULL;
1760     int64_t pos;
1761
1762     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1763         goto the_end;
1764     filt_out = is->out_video_filter;
1765 #endif
1766
1767     for(;;) {
1768 #if !CONFIG_AVFILTER
1769         AVPacket pkt;
1770 #else
1771         AVFilterBufferRef *picref;
1772         AVRational tb;
1773 #endif
1774         while (is->paused && !is->videoq.abort_request)
1775             SDL_Delay(10);
1776 #if CONFIG_AVFILTER
1777         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1778         if (picref) {
1779             pts_int = picref->pts;
1780             pos     = picref->pos;
1781             frame->opaque = picref;
1782         }
1783
1784         if (av_cmp_q(tb, is->video_st->time_base)) {
1785             av_unused int64_t pts1 = pts_int;
1786             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1787             av_dlog(NULL, "video_thread(): "
1788                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1789                     tb.num, tb.den, pts1,
1790                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1791         }
1792 #else
1793         ret = get_video_frame(is, frame, &pts_int, &pkt);
1794 #endif
1795
1796         if (ret < 0) goto the_end;
1797
1798         if (!ret)
1799             continue;
1800
1801         pts = pts_int*av_q2d(is->video_st->time_base);
1802
1803 #if CONFIG_AVFILTER
1804         ret = output_picture2(is, frame, pts, pos);
1805 #else
1806         ret = output_picture2(is, frame, pts,  pkt.pos);
1807         av_free_packet(&pkt);
1808 #endif
1809         if (ret < 0)
1810             goto the_end;
1811
1812         if (step)
1813             if (cur_stream)
1814                 stream_pause(cur_stream);
1815     }
1816  the_end:
1817 #if CONFIG_AVFILTER
1818     avfilter_graph_free(&graph);
1819 #endif
1820     av_free(frame);
1821     return 0;
1822 }
1823
1824 static int subtitle_thread(void *arg)
1825 {
1826     VideoState *is = arg;
1827     SubPicture *sp;
1828     AVPacket pkt1, *pkt = &pkt1;
1829     int len1, got_subtitle;
1830     double pts;
1831     int i, j;
1832     int r, g, b, y, u, v, a;
1833
1834     for(;;) {
1835         while (is->paused && !is->subtitleq.abort_request) {
1836             SDL_Delay(10);
1837         }
1838         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1839             break;
1840
1841         if(pkt->data == flush_pkt.data){
1842             avcodec_flush_buffers(is->subtitle_st->codec);
1843             continue;
1844         }
1845         SDL_LockMutex(is->subpq_mutex);
1846         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1847                !is->subtitleq.abort_request) {
1848             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1849         }
1850         SDL_UnlockMutex(is->subpq_mutex);
1851
1852         if (is->subtitleq.abort_request)
1853             goto the_end;
1854
1855         sp = &is->subpq[is->subpq_windex];
1856
1857        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1858            this packet, if any */
1859         pts = 0;
1860         if (pkt->pts != AV_NOPTS_VALUE)
1861             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1862
1863         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1864                                     &sp->sub, &got_subtitle,
1865                                     pkt);
1866         if (got_subtitle && sp->sub.format == 0) {
1867             sp->pts = pts;
1868
1869             for (i = 0; i < sp->sub.num_rects; i++)
1870             {
1871                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1872                 {
1873                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1874                     y = RGB_TO_Y_CCIR(r, g, b);
1875                     u = RGB_TO_U_CCIR(r, g, b, 0);
1876                     v = RGB_TO_V_CCIR(r, g, b, 0);
1877                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1878                 }
1879             }
1880
1881             /* now we can update the picture count */
1882             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1883                 is->subpq_windex = 0;
1884             SDL_LockMutex(is->subpq_mutex);
1885             is->subpq_size++;
1886             SDL_UnlockMutex(is->subpq_mutex);
1887         }
1888         av_free_packet(pkt);
1889     }
1890  the_end:
1891     return 0;
1892 }
1893
1894 /* copy samples for viewing in editor window */
1895 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1896 {
1897     int size, len, channels;
1898
1899     channels = is->audio_st->codec->channels;
1900
1901     size = samples_size / sizeof(short);
1902     while (size > 0) {
1903         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1904         if (len > size)
1905             len = size;
1906         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1907         samples += len;
1908         is->sample_array_index += len;
1909         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1910             is->sample_array_index = 0;
1911         size -= len;
1912     }
1913 }
1914
1915 /* return the new audio buffer size (samples can be added or deleted
1916    to get better sync if video or external master clock) */
1917 static int synchronize_audio(VideoState *is, short *samples,
1918                              int samples_size1, double pts)
1919 {
1920     int n, samples_size;
1921     double ref_clock;
1922
1923     n = 2 * is->audio_st->codec->channels;
1924     samples_size = samples_size1;
1925
1926     /* if not master, then we try to remove or add samples to correct the clock */
1927     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1928          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1929         double diff, avg_diff;
1930         int wanted_size, min_size, max_size, nb_samples;
1931
1932         ref_clock = get_master_clock(is);
1933         diff = get_audio_clock(is) - ref_clock;
1934
1935         if (diff < AV_NOSYNC_THRESHOLD) {
1936             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1937             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1938                 /* not enough measures to have a correct estimate */
1939                 is->audio_diff_avg_count++;
1940             } else {
1941                 /* estimate the A-V difference */
1942                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1943
1944                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1945                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1946                     nb_samples = samples_size / n;
1947
1948                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1949                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1950                     if (wanted_size < min_size)
1951                         wanted_size = min_size;
1952                     else if (wanted_size > max_size)
1953                         wanted_size = max_size;
1954
1955                     /* add or remove samples to correction the synchro */
1956                     if (wanted_size < samples_size) {
1957                         /* remove samples */
1958                         samples_size = wanted_size;
1959                     } else if (wanted_size > samples_size) {
1960                         uint8_t *samples_end, *q;
1961                         int nb;
1962
1963                         /* add samples */
1964                         nb = (samples_size - wanted_size);
1965                         samples_end = (uint8_t *)samples + samples_size - n;
1966                         q = samples_end + n;
1967                         while (nb > 0) {
1968                             memcpy(q, samples_end, n);
1969                             q += n;
1970                             nb -= n;
1971                         }
1972                         samples_size = wanted_size;
1973                     }
1974                 }
1975                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1976                         diff, avg_diff, samples_size - samples_size1,
1977                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1978             }
1979         } else {
1980             /* too big difference : may be initial PTS errors, so
1981                reset A-V filter */
1982             is->audio_diff_avg_count = 0;
1983             is->audio_diff_cum = 0;
1984         }
1985     }
1986
1987     return samples_size;
1988 }
1989
1990 /* decode one audio frame and returns its uncompressed size */
1991 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1992 {
1993     AVPacket *pkt_temp = &is->audio_pkt_temp;
1994     AVPacket *pkt = &is->audio_pkt;
1995     AVCodecContext *dec= is->audio_st->codec;
1996     int n, len1, data_size;
1997     double pts;
1998
1999     for(;;) {
2000         /* NOTE: the audio packet can contain several frames */
2001         while (pkt_temp->size > 0) {
2002             data_size = sizeof(is->audio_buf1);
2003             len1 = avcodec_decode_audio3(dec,
2004                                         (int16_t *)is->audio_buf1, &data_size,
2005                                         pkt_temp);
2006             if (len1 < 0) {
2007                 /* if error, we skip the frame */
2008                 pkt_temp->size = 0;
2009                 break;
2010             }
2011
2012             pkt_temp->data += len1;
2013             pkt_temp->size -= len1;
2014             if (data_size <= 0)
2015                 continue;
2016
2017             if (dec->sample_fmt != is->audio_src_fmt) {
2018                 if (is->reformat_ctx)
2019                     av_audio_convert_free(is->reformat_ctx);
2020                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2021                                                          dec->sample_fmt, 1, NULL, 0);
2022                 if (!is->reformat_ctx) {
2023                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2024                         av_get_sample_fmt_name(dec->sample_fmt),
2025                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2026                         break;
2027                 }
2028                 is->audio_src_fmt= dec->sample_fmt;
2029             }
2030
2031             if (is->reformat_ctx) {
2032                 const void *ibuf[6]= {is->audio_buf1};
2033                 void *obuf[6]= {is->audio_buf2};
2034                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2035                 int ostride[6]= {2};
2036                 int len= data_size/istride[0];
2037                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2038                     printf("av_audio_convert() failed\n");
2039                     break;
2040                 }
2041                 is->audio_buf= is->audio_buf2;
2042                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2043                           remove this legacy cruft */
2044                 data_size= len*2;
2045             }else{
2046                 is->audio_buf= is->audio_buf1;
2047             }
2048
2049             /* if no pts, then compute it */
2050             pts = is->audio_clock;
2051             *pts_ptr = pts;
2052             n = 2 * dec->channels;
2053             is->audio_clock += (double)data_size /
2054                 (double)(n * dec->sample_rate);
2055 #ifdef DEBUG
2056             {
2057                 static double last_clock;
2058                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2059                        is->audio_clock - last_clock,
2060                        is->audio_clock, pts);
2061                 last_clock = is->audio_clock;
2062             }
2063 #endif
2064             return data_size;
2065         }
2066
2067         /* free the current packet */
2068         if (pkt->data)
2069             av_free_packet(pkt);
2070
2071         if (is->paused || is->audioq.abort_request) {
2072             return -1;
2073         }
2074
2075         /* read next packet */
2076         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2077             return -1;
2078         if(pkt->data == flush_pkt.data){
2079             avcodec_flush_buffers(dec);
2080             continue;
2081         }
2082
2083         pkt_temp->data = pkt->data;
2084         pkt_temp->size = pkt->size;
2085
2086         /* if update the audio clock with the pts */
2087         if (pkt->pts != AV_NOPTS_VALUE) {
2088             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2089         }
2090     }
2091 }
2092
2093 /* prepare a new audio buffer */
2094 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2095 {
2096     VideoState *is = opaque;
2097     int audio_size, len1;
2098     double pts;
2099
2100     audio_callback_time = av_gettime();
2101
2102     while (len > 0) {
2103         if (is->audio_buf_index >= is->audio_buf_size) {
2104            audio_size = audio_decode_frame(is, &pts);
2105            if (audio_size < 0) {
2106                 /* if error, just output silence */
2107                is->audio_buf = is->audio_buf1;
2108                is->audio_buf_size = 1024;
2109                memset(is->audio_buf, 0, is->audio_buf_size);
2110            } else {
2111                if (is->show_audio)
2112                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2113                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2114                                               pts);
2115                is->audio_buf_size = audio_size;
2116            }
2117            is->audio_buf_index = 0;
2118         }
2119         len1 = is->audio_buf_size - is->audio_buf_index;
2120         if (len1 > len)
2121             len1 = len;
2122         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2123         len -= len1;
2124         stream += len1;
2125         is->audio_buf_index += len1;
2126     }
2127 }
2128
2129 /* open a given stream. Return 0 if OK */
2130 static int stream_component_open(VideoState *is, int stream_index)
2131 {
2132     AVFormatContext *ic = is->ic;
2133     AVCodecContext *avctx;
2134     AVCodec *codec;
2135     SDL_AudioSpec wanted_spec, spec;
2136
2137     if (stream_index < 0 || stream_index >= ic->nb_streams)
2138         return -1;
2139     avctx = ic->streams[stream_index]->codec;
2140
2141     /* prepare audio output */
2142     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2143         if (avctx->channels > 0) {
2144             avctx->request_channels = FFMIN(2, avctx->channels);
2145         } else {
2146             avctx->request_channels = 2;
2147         }
2148     }
2149
2150     codec = avcodec_find_decoder(avctx->codec_id);
2151     avctx->debug_mv = debug_mv;
2152     avctx->debug = debug;
2153     avctx->workaround_bugs = workaround_bugs;
2154     avctx->lowres = lowres;
2155     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2156     avctx->idct_algo= idct;
2157     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2158     avctx->skip_frame= skip_frame;
2159     avctx->skip_idct= skip_idct;
2160     avctx->skip_loop_filter= skip_loop_filter;
2161     avctx->error_recognition= error_recognition;
2162     avctx->error_concealment= error_concealment;
2163     avctx->thread_count= thread_count;
2164
2165     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2166
2167     if (!codec ||
2168         avcodec_open(avctx, codec) < 0)
2169         return -1;
2170
2171     /* prepare audio output */
2172     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2173         wanted_spec.freq = avctx->sample_rate;
2174         wanted_spec.format = AUDIO_S16SYS;
2175         wanted_spec.channels = avctx->channels;
2176         wanted_spec.silence = 0;
2177         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2178         wanted_spec.callback = sdl_audio_callback;
2179         wanted_spec.userdata = is;
2180         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2181             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2182             return -1;
2183         }
2184         is->audio_hw_buf_size = spec.size;
2185         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2186     }
2187
2188     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2189     switch(avctx->codec_type) {
2190     case AVMEDIA_TYPE_AUDIO:
2191         is->audio_stream = stream_index;
2192         is->audio_st = ic->streams[stream_index];
2193         is->audio_buf_size = 0;
2194         is->audio_buf_index = 0;
2195
2196         /* init averaging filter */
2197         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2198         is->audio_diff_avg_count = 0;
2199         /* since we do not have a precise anough audio fifo fullness,
2200            we correct audio sync only if larger than this threshold */
2201         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2202
2203         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2204         packet_queue_init(&is->audioq);
2205         SDL_PauseAudio(0);
2206         break;
2207     case AVMEDIA_TYPE_VIDEO:
2208         is->video_stream = stream_index;
2209         is->video_st = ic->streams[stream_index];
2210
2211         packet_queue_init(&is->videoq);
2212         is->video_tid = SDL_CreateThread(video_thread, is);
2213         break;
2214     case AVMEDIA_TYPE_SUBTITLE:
2215         is->subtitle_stream = stream_index;
2216         is->subtitle_st = ic->streams[stream_index];
2217         packet_queue_init(&is->subtitleq);
2218
2219         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2220         break;
2221     default:
2222         break;
2223     }
2224     return 0;
2225 }
2226
2227 static void stream_component_close(VideoState *is, int stream_index)
2228 {
2229     AVFormatContext *ic = is->ic;
2230     AVCodecContext *avctx;
2231
2232     if (stream_index < 0 || stream_index >= ic->nb_streams)
2233         return;
2234     avctx = ic->streams[stream_index]->codec;
2235
2236     switch(avctx->codec_type) {
2237     case AVMEDIA_TYPE_AUDIO:
2238         packet_queue_abort(&is->audioq);
2239
2240         SDL_CloseAudio();
2241
2242         packet_queue_end(&is->audioq);
2243         if (is->reformat_ctx)
2244             av_audio_convert_free(is->reformat_ctx);
2245         is->reformat_ctx = NULL;
2246         break;
2247     case AVMEDIA_TYPE_VIDEO:
2248         packet_queue_abort(&is->videoq);
2249
2250         /* note: we also signal this mutex to make sure we deblock the
2251            video thread in all cases */
2252         SDL_LockMutex(is->pictq_mutex);
2253         SDL_CondSignal(is->pictq_cond);
2254         SDL_UnlockMutex(is->pictq_mutex);
2255
2256         SDL_WaitThread(is->video_tid, NULL);
2257
2258         packet_queue_end(&is->videoq);
2259         break;
2260     case AVMEDIA_TYPE_SUBTITLE:
2261         packet_queue_abort(&is->subtitleq);
2262
2263         /* note: we also signal this mutex to make sure we deblock the
2264            video thread in all cases */
2265         SDL_LockMutex(is->subpq_mutex);
2266         is->subtitle_stream_changed = 1;
2267
2268         SDL_CondSignal(is->subpq_cond);
2269         SDL_UnlockMutex(is->subpq_mutex);
2270
2271         SDL_WaitThread(is->subtitle_tid, NULL);
2272
2273         packet_queue_end(&is->subtitleq);
2274         break;
2275     default:
2276         break;
2277     }
2278
2279     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2280     avcodec_close(avctx);
2281     switch(avctx->codec_type) {
2282     case AVMEDIA_TYPE_AUDIO:
2283         is->audio_st = NULL;
2284         is->audio_stream = -1;
2285         break;
2286     case AVMEDIA_TYPE_VIDEO:
2287         is->video_st = NULL;
2288         is->video_stream = -1;
2289         break;
2290     case AVMEDIA_TYPE_SUBTITLE:
2291         is->subtitle_st = NULL;
2292         is->subtitle_stream = -1;
2293         break;
2294     default:
2295         break;
2296     }
2297 }
2298
2299 /* since we have only one decoding thread, we can use a global
2300    variable instead of a thread local variable */
2301 static VideoState *global_video_state;
2302
2303 static int decode_interrupt_cb(void)
2304 {
2305     return (global_video_state && global_video_state->abort_request);
2306 }
2307
2308 /* this thread gets the stream from the disk or the network */
2309 static int decode_thread(void *arg)
2310 {
2311     VideoState *is = arg;
2312     AVFormatContext *ic;
2313     int err, i, ret;
2314     int st_index[AVMEDIA_TYPE_NB];
2315     AVPacket pkt1, *pkt = &pkt1;
2316     AVFormatParameters params, *ap = &params;
2317     int eof=0;
2318     int pkt_in_play_range = 0;
2319
2320     ic = avformat_alloc_context();
2321
2322     memset(st_index, -1, sizeof(st_index));
2323     is->video_stream = -1;
2324     is->audio_stream = -1;
2325     is->subtitle_stream = -1;
2326
2327     global_video_state = is;
2328     avio_set_interrupt_cb(decode_interrupt_cb);
2329
2330     memset(ap, 0, sizeof(*ap));
2331
2332     ap->prealloced_context = 1;
2333     ap->width = frame_width;
2334     ap->height= frame_height;
2335     ap->time_base= (AVRational){1, 25};
2336     ap->pix_fmt = frame_pix_fmt;
2337
2338     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2339
2340     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2341     if (err < 0) {
2342         print_error(is->filename, err);
2343         ret = -1;
2344         goto fail;
2345     }
2346     is->ic = ic;
2347
2348     if(genpts)
2349         ic->flags |= AVFMT_FLAG_GENPTS;
2350
2351     /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2352     for (i = 0; i < ic->nb_streams; i++) {
2353         AVCodecContext *dec = ic->streams[i]->codec;
2354         switch (dec->codec_type) {
2355         case AVMEDIA_TYPE_AUDIO:
2356             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2357                              AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2358                              NULL);
2359             break;
2360         case AVMEDIA_TYPE_VIDEO:
2361             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2362                              AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2363                              NULL);
2364             break;
2365         }
2366     }
2367
2368     err = av_find_stream_info(ic);
2369     if (err < 0) {
2370         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2371         ret = -1;
2372         goto fail;
2373     }
2374     if(ic->pb)
2375         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2376
2377     if(seek_by_bytes<0)
2378         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2379
2380     /* if seeking requested, we execute it */
2381     if (start_time != AV_NOPTS_VALUE) {
2382         int64_t timestamp;
2383
2384         timestamp = start_time;
2385         /* add the stream start time */
2386         if (ic->start_time != AV_NOPTS_VALUE)
2387             timestamp += ic->start_time;
2388         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2389         if (ret < 0) {
2390             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2391                     is->filename, (double)timestamp / AV_TIME_BASE);
2392         }
2393     }
2394
2395     for (i = 0; i < ic->nb_streams; i++)
2396         ic->streams[i]->discard = AVDISCARD_ALL;
2397     if (!video_disable)
2398         st_index[AVMEDIA_TYPE_VIDEO] =
2399             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2400                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2401     if (!audio_disable)
2402         st_index[AVMEDIA_TYPE_AUDIO] =
2403             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2404                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2405                                 st_index[AVMEDIA_TYPE_VIDEO],
2406                                 NULL, 0);
2407     if (!video_disable)
2408         st_index[AVMEDIA_TYPE_SUBTITLE] =
2409             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2410                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2411                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2412                                  st_index[AVMEDIA_TYPE_AUDIO] :
2413                                  st_index[AVMEDIA_TYPE_VIDEO]),
2414                                 NULL, 0);
2415     if (show_status) {
2416         av_dump_format(ic, 0, is->filename, 0);
2417     }
2418
2419     /* open the streams */
2420     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2421         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2422     }
2423
2424     ret=-1;
2425     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2426         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2427     }
2428     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2429     if(ret<0) {
2430         if (!display_disable)
2431             is->show_audio = 2;
2432     }
2433
2434     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2435         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2436     }
2437
2438     if (is->video_stream < 0 && is->audio_stream < 0) {
2439         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2440         ret = -1;
2441         goto fail;
2442     }
2443
2444     for(;;) {
2445         if (is->abort_request)
2446             break;
2447         if (is->paused != is->last_paused) {
2448             is->last_paused = is->paused;
2449             if (is->paused)
2450                 is->read_pause_return= av_read_pause(ic);
2451             else
2452                 av_read_play(ic);
2453         }
2454 #if CONFIG_RTSP_DEMUXER
2455         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2456             /* wait 10 ms to avoid trying to get another packet */
2457             /* XXX: horrible */
2458             SDL_Delay(10);
2459             continue;
2460         }
2461 #endif
2462         if (is->seek_req) {
2463             int64_t seek_target= is->seek_pos;
2464             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2465             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2466 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2467 //      of the seek_pos/seek_rel variables
2468
2469             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2470             if (ret < 0) {
2471                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2472             }else{
2473                 if (is->audio_stream >= 0) {
2474                     packet_queue_flush(&is->audioq);
2475                     packet_queue_put(&is->audioq, &flush_pkt);
2476                 }
2477                 if (is->subtitle_stream >= 0) {
2478                     packet_queue_flush(&is->subtitleq);
2479                     packet_queue_put(&is->subtitleq, &flush_pkt);
2480                 }
2481                 if (is->video_stream >= 0) {
2482                     packet_queue_flush(&is->videoq);
2483                     packet_queue_put(&is->videoq, &flush_pkt);
2484                 }
2485             }
2486             is->seek_req = 0;
2487             eof= 0;
2488         }
2489
2490         /* if the queue are full, no need to read more */
2491         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2492             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2493                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2494                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2495             /* wait 10 ms */
2496             SDL_Delay(10);
2497             continue;
2498         }
2499         if(eof) {
2500             if(is->video_stream >= 0){
2501                 av_init_packet(pkt);
2502                 pkt->data=NULL;
2503                 pkt->size=0;
2504                 pkt->stream_index= is->video_stream;
2505                 packet_queue_put(&is->videoq, pkt);
2506             }
2507             SDL_Delay(10);
2508             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2509                 if(loop!=1 && (!loop || --loop)){
2510                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2511                 }else if(autoexit){
2512                     ret=AVERROR_EOF;
2513                     goto fail;
2514                 }
2515             }
2516             continue;
2517         }
2518         ret = av_read_frame(ic, pkt);
2519         if (ret < 0) {
2520             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2521                 eof=1;
2522             if (ic->pb && ic->pb->error)
2523                 break;
2524             SDL_Delay(100); /* wait for user event */
2525             continue;
2526         }
2527         /* check if packet is in play range specified by user, then queue, otherwise discard */
2528         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2529                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2530                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2531                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2532                 <= ((double)duration/1000000);
2533         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2534             packet_queue_put(&is->audioq, pkt);
2535         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2536             packet_queue_put(&is->videoq, pkt);
2537         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2538             packet_queue_put(&is->subtitleq, pkt);
2539         } else {
2540             av_free_packet(pkt);
2541         }
2542     }
2543     /* wait until the end */
2544     while (!is->abort_request) {
2545         SDL_Delay(100);
2546     }
2547
2548     ret = 0;
2549  fail:
2550     /* disable interrupting */
2551     global_video_state = NULL;
2552
2553     /* close each stream */
2554     if (is->audio_stream >= 0)
2555         stream_component_close(is, is->audio_stream);
2556     if (is->video_stream >= 0)
2557         stream_component_close(is, is->video_stream);
2558     if (is->subtitle_stream >= 0)
2559         stream_component_close(is, is->subtitle_stream);
2560     if (is->ic) {
2561         av_close_input_file(is->ic);
2562         is->ic = NULL; /* safety */
2563     }
2564     avio_set_interrupt_cb(NULL);
2565
2566     if (ret != 0) {
2567         SDL_Event event;
2568
2569         event.type = FF_QUIT_EVENT;
2570         event.user.data1 = is;
2571         SDL_PushEvent(&event);
2572     }
2573     return 0;
2574 }
2575
2576 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2577 {
2578     VideoState *is;
2579
2580     is = av_mallocz(sizeof(VideoState));
2581     if (!is)
2582         return NULL;
2583     av_strlcpy(is->filename, filename, sizeof(is->filename));
2584     is->iformat = iformat;
2585     is->ytop = 0;
2586     is->xleft = 0;
2587
2588     /* start video display */
2589     is->pictq_mutex = SDL_CreateMutex();
2590     is->pictq_cond = SDL_CreateCond();
2591
2592     is->subpq_mutex = SDL_CreateMutex();
2593     is->subpq_cond = SDL_CreateCond();
2594
2595     is->av_sync_type = av_sync_type;
2596     is->parse_tid = SDL_CreateThread(decode_thread, is);
2597     if (!is->parse_tid) {
2598         av_free(is);
2599         return NULL;
2600     }
2601     return is;
2602 }
2603
2604 static void stream_cycle_channel(VideoState *is, int codec_type)
2605 {
2606     AVFormatContext *ic = is->ic;
2607     int start_index, stream_index;
2608     AVStream *st;
2609
2610     if (codec_type == AVMEDIA_TYPE_VIDEO)
2611         start_index = is->video_stream;
2612     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2613         start_index = is->audio_stream;
2614     else
2615         start_index = is->subtitle_stream;
2616     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2617         return;
2618     stream_index = start_index;
2619     for(;;) {
2620         if (++stream_index >= is->ic->nb_streams)
2621         {
2622             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2623             {
2624                 stream_index = -1;
2625                 goto the_end;
2626             } else
2627                 stream_index = 0;
2628         }
2629         if (stream_index == start_index)
2630             return;
2631         st = ic->streams[stream_index];
2632         if (st->codec->codec_type == codec_type) {
2633             /* check that parameters are OK */
2634             switch(codec_type) {
2635             case AVMEDIA_TYPE_AUDIO:
2636                 if (st->codec->sample_rate != 0 &&
2637                     st->codec->channels != 0)
2638                     goto the_end;
2639                 break;
2640             case AVMEDIA_TYPE_VIDEO:
2641             case AVMEDIA_TYPE_SUBTITLE:
2642                 goto the_end;
2643             default:
2644                 break;
2645             }
2646         }
2647     }
2648  the_end:
2649     stream_component_close(is, start_index);
2650     stream_component_open(is, stream_index);
2651 }
2652
2653
2654 static void toggle_full_screen(void)
2655 {
2656     is_full_screen = !is_full_screen;
2657     video_open(cur_stream);
2658 }
2659
2660 static void toggle_pause(void)
2661 {
2662     if (cur_stream)
2663         stream_pause(cur_stream);
2664     step = 0;
2665 }
2666
2667 static void step_to_next_frame(void)
2668 {
2669     if (cur_stream) {
2670         /* if the stream is paused unpause it, then step */
2671         if (cur_stream->paused)
2672             stream_pause(cur_stream);
2673     }
2674     step = 1;
2675 }
2676
2677 static void toggle_audio_display(void)
2678 {
2679     if (cur_stream) {
2680         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2681         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2682         fill_rectangle(screen,
2683                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2684                     bgcolor);
2685         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2686     }
2687 }
2688
2689 /* handle an event sent by the GUI */
2690 static void event_loop(void)
2691 {
2692     SDL_Event event;
2693     double incr, pos, frac;
2694
2695     for(;;) {
2696         double x;
2697         SDL_WaitEvent(&event);
2698         switch(event.type) {
2699         case SDL_KEYDOWN:
2700             if (exit_on_keydown) {
2701                 do_exit();
2702                 break;
2703             }
2704             switch(event.key.keysym.sym) {
2705             case SDLK_ESCAPE:
2706             case SDLK_q:
2707                 do_exit();
2708                 break;
2709             case SDLK_f:
2710                 toggle_full_screen();
2711                 break;
2712             case SDLK_p:
2713             case SDLK_SPACE:
2714                 toggle_pause();
2715                 break;
2716             case SDLK_s: //S: Step to next frame
2717                 step_to_next_frame();
2718                 break;
2719             case SDLK_a:
2720                 if (cur_stream)
2721                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2722                 break;
2723             case SDLK_v:
2724                 if (cur_stream)
2725                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2726                 break;
2727             case SDLK_t:
2728                 if (cur_stream)
2729                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2730                 break;
2731             case SDLK_w:
2732                 toggle_audio_display();
2733                 break;
2734             case SDLK_LEFT:
2735                 incr = -10.0;
2736                 goto do_seek;
2737             case SDLK_RIGHT:
2738                 incr = 10.0;
2739                 goto do_seek;
2740             case SDLK_UP:
2741                 incr = 60.0;
2742                 goto do_seek;
2743             case SDLK_DOWN:
2744                 incr = -60.0;
2745             do_seek:
2746                 if (cur_stream) {
2747                     if (seek_by_bytes) {
2748                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2749                             pos= cur_stream->video_current_pos;
2750                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2751                             pos= cur_stream->audio_pkt.pos;
2752                         }else
2753                             pos = avio_tell(cur_stream->ic->pb);
2754                         if (cur_stream->ic->bit_rate)
2755                             incr *= cur_stream->ic->bit_rate / 8.0;
2756                         else
2757                             incr *= 180000.0;
2758                         pos += incr;
2759                         stream_seek(cur_stream, pos, incr, 1);
2760                     } else {
2761                         pos = get_master_clock(cur_stream);
2762                         pos += incr;
2763                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2764                     }
2765                 }
2766                 break;
2767             default:
2768                 break;
2769             }
2770             break;
2771         case SDL_MOUSEBUTTONDOWN:
2772             if (exit_on_mousedown) {
2773                 do_exit();
2774                 break;
2775             }
2776         case SDL_MOUSEMOTION:
2777             if(event.type ==SDL_MOUSEBUTTONDOWN){
2778                 x= event.button.x;
2779             }else{
2780                 if(event.motion.state != SDL_PRESSED)
2781                     break;
2782                 x= event.motion.x;
2783             }
2784             if (cur_stream) {
2785                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2786                     uint64_t size=  avio_size(cur_stream->ic->pb);
2787                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2788                 }else{
2789                     int64_t ts;
2790                     int ns, hh, mm, ss;
2791                     int tns, thh, tmm, tss;
2792                     tns = cur_stream->ic->duration/1000000LL;
2793                     thh = tns/3600;
2794                     tmm = (tns%3600)/60;
2795                     tss = (tns%60);
2796                     frac = x/cur_stream->width;
2797                     ns = frac*tns;
2798                     hh = ns/3600;
2799                     mm = (ns%3600)/60;
2800                     ss = (ns%60);
2801                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2802                             hh, mm, ss, thh, tmm, tss);
2803                     ts = frac*cur_stream->ic->duration;
2804                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2805                         ts += cur_stream->ic->start_time;
2806                     stream_seek(cur_stream, ts, 0, 0);
2807                 }
2808             }
2809             break;
2810         case SDL_VIDEORESIZE:
2811             if (cur_stream) {
2812                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2813                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2814                 screen_width = cur_stream->width = event.resize.w;
2815                 screen_height= cur_stream->height= event.resize.h;
2816             }
2817             break;
2818         case SDL_QUIT:
2819         case FF_QUIT_EVENT:
2820             do_exit();
2821             break;
2822         case FF_ALLOC_EVENT:
2823             video_open(event.user.data1);
2824             alloc_picture(event.user.data1);
2825             break;
2826         case FF_REFRESH_EVENT:
2827             video_refresh_timer(event.user.data1);
2828             cur_stream->refresh=0;
2829             break;
2830         default:
2831             break;
2832         }
2833     }
2834 }
2835
2836 static int opt_frame_size(const char *opt, const char *arg)
2837 {
2838     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2839         fprintf(stderr, "Incorrect frame size\n");
2840         return AVERROR(EINVAL);
2841     }
2842     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2843         fprintf(stderr, "Frame size must be a multiple of 2\n");
2844         return AVERROR(EINVAL);
2845     }
2846     return 0;
2847 }
2848
2849 static int opt_width(const char *opt, const char *arg)
2850 {
2851     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2852     return 0;
2853 }
2854
2855 static int opt_height(const char *opt, const char *arg)
2856 {
2857     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2858     return 0;
2859 }
2860
2861 static int opt_format(const char *opt, const char *arg)
2862 {
2863     file_iformat = av_find_input_format(arg);
2864     if (!file_iformat) {
2865         fprintf(stderr, "Unknown input format: %s\n", arg);
2866         return AVERROR(EINVAL);
2867     }
2868     return 0;
2869 }
2870
2871 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2872 {
2873     frame_pix_fmt = av_get_pix_fmt(arg);
2874     return 0;
2875 }
2876
2877 static int opt_sync(const char *opt, const char *arg)
2878 {
2879     if (!strcmp(arg, "audio"))
2880         av_sync_type = AV_SYNC_AUDIO_MASTER;
2881     else if (!strcmp(arg, "video"))
2882         av_sync_type = AV_SYNC_VIDEO_MASTER;
2883     else if (!strcmp(arg, "ext"))
2884         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2885     else {
2886         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2887         exit(1);
2888     }
2889     return 0;
2890 }
2891
2892 static int opt_seek(const char *opt, const char *arg)
2893 {
2894     start_time = parse_time_or_die(opt, arg, 1);
2895     return 0;
2896 }
2897
2898 static int opt_duration(const char *opt, const char *arg)
2899 {
2900     duration = parse_time_or_die(opt, arg, 1);
2901     return 0;
2902 }
2903
2904 static int opt_debug(const char *opt, const char *arg)
2905 {
2906     av_log_set_level(99);
2907     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2908     return 0;
2909 }
2910
2911 static int opt_vismv(const char *opt, const char *arg)
2912 {
2913     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2914     return 0;
2915 }
2916
2917 static int opt_thread_count(const char *opt, const char *arg)
2918 {
2919     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2920 #if !HAVE_THREADS
2921     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2922 #endif
2923     return 0;
2924 }
2925
2926 static const OptionDef options[] = {
2927 #include "cmdutils_common_opts.h"
2928     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2929     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2930     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2931     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2932     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2933     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2934     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2935     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2936     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2937     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2938     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2939     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2940     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2941     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2942     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2943     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2944     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2945     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2946     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2947     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2948     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2949     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2950     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2951     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2952     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2953     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2954     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2955     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2956     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2957     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2958     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2959     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2960     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2961     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2962     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2963     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2964     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2965 #if CONFIG_AVFILTER
2966     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2967 #endif
2968     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2969     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2970     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2971     { NULL, },
2972 };
2973
2974 static void show_usage(void)
2975 {
2976     printf("Simple media player\n");
2977     printf("usage: ffplay [options] input_file\n");
2978     printf("\n");
2979 }
2980
2981 static void show_help(void)
2982 {
2983     av_log_set_callback(log_callback_help);
2984     show_usage();
2985     show_help_options(options, "Main options:\n",
2986                       OPT_EXPERT, 0);
2987     show_help_options(options, "\nAdvanced options:\n",
2988                       OPT_EXPERT, OPT_EXPERT);
2989     printf("\n");
2990     av_opt_show2(avcodec_opts[0], NULL,
2991                  AV_OPT_FLAG_DECODING_PARAM, 0);
2992     printf("\n");
2993     av_opt_show2(avformat_opts, NULL,
2994                  AV_OPT_FLAG_DECODING_PARAM, 0);
2995 #if !CONFIG_AVFILTER
2996     printf("\n");
2997     av_opt_show2(sws_opts, NULL,
2998                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2999 #endif
3000     printf("\nWhile playing:\n"
3001            "q, ESC              quit\n"
3002            "f                   toggle full screen\n"
3003            "p, SPC              pause\n"
3004            "a                   cycle audio channel\n"
3005            "v                   cycle video channel\n"
3006            "t                   cycle subtitle channel\n"
3007            "w                   show audio waves\n"
3008            "s                   activate frame-step mode\n"
3009            "left/right          seek backward/forward 10 seconds\n"
3010            "down/up             seek backward/forward 1 minute\n"
3011            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3012            );
3013 }
3014
3015 static void opt_input_file(const char *filename)
3016 {
3017     if (input_filename) {
3018         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3019                 filename, input_filename);
3020         exit(1);
3021     }
3022     if (!strcmp(filename, "-"))
3023         filename = "pipe:";
3024     input_filename = filename;
3025 }
3026
3027 /* Called from the main */
3028 int main(int argc, char **argv)
3029 {
3030     int flags;
3031
3032     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3033
3034     /* register all codecs, demux and protocols */
3035     avcodec_register_all();
3036 #if CONFIG_AVDEVICE
3037     avdevice_register_all();
3038 #endif
3039 #if CONFIG_AVFILTER
3040     avfilter_register_all();
3041 #endif
3042     av_register_all();
3043
3044     init_opts();
3045
3046     show_banner();
3047
3048     parse_options(argc, argv, options, opt_input_file);
3049
3050     if (!input_filename) {
3051         show_usage();
3052         fprintf(stderr, "An input file must be specified\n");
3053         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3054         exit(1);
3055     }
3056
3057     if (display_disable) {
3058         video_disable = 1;
3059     }
3060     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3061 #if !defined(__MINGW32__) && !defined(__APPLE__)
3062     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3063 #endif
3064     if (SDL_Init (flags)) {
3065         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3066         exit(1);
3067     }
3068
3069     if (!display_disable) {
3070 #if HAVE_SDL_VIDEO_SIZE
3071         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3072         fs_screen_width = vi->current_w;
3073         fs_screen_height = vi->current_h;
3074 #endif
3075     }
3076
3077     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3078     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3079     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3080
3081     av_init_packet(&flush_pkt);
3082     flush_pkt.data= "FLUSH";
3083
3084     cur_stream = stream_open(input_filename, file_iformat);
3085
3086     event_loop();
3087
3088     /* never returns */
3089
3090     return 0;
3091 }