]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/buffersink.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int workaround_bugs = 1;
245 static int fast = 0;
246 static int genpts = 0;
247 static int lowres = 0;
248 static int idct = FF_IDCT_AUTO;
249 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
252 static int error_recognition = FF_ER_CAREFUL;
253 static int error_concealment = 3;
254 static int decoder_reorder_pts= -1;
255 static int autoexit;
256 static int exit_on_keydown;
257 static int exit_on_mousedown;
258 static int loop=1;
259 static int framedrop=-1;
260 static enum ShowMode show_mode = SHOW_MODE_NONE;
261
262 static int rdftspeed=20;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static int64_t audio_callback_time;
270
271 static AVPacket flush_pkt;
272
273 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
274 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
275 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
276
277 static SDL_Surface *screen;
278
279 void exit_program(int ret)
280 {
281     exit(ret);
282 }
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
285 {
286     AVPacketList *pkt1;
287
288     /* duplicate the packet */
289     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
290         return -1;
291
292     pkt1 = av_malloc(sizeof(AVPacketList));
293     if (!pkt1)
294         return -1;
295     pkt1->pkt = *pkt;
296     pkt1->next = NULL;
297
298
299     SDL_LockMutex(q->mutex);
300
301     if (!q->last_pkt)
302
303         q->first_pkt = pkt1;
304     else
305         q->last_pkt->next = pkt1;
306     q->last_pkt = pkt1;
307     q->nb_packets++;
308     q->size += pkt1->pkt.size + sizeof(*pkt1);
309     /* XXX: should duplicate packet data in DV case */
310     SDL_CondSignal(q->cond);
311
312     SDL_UnlockMutex(q->mutex);
313     return 0;
314 }
315
316 /* packet queue handling */
317 static void packet_queue_init(PacketQueue *q)
318 {
319     memset(q, 0, sizeof(PacketQueue));
320     q->mutex = SDL_CreateMutex();
321     q->cond = SDL_CreateCond();
322     packet_queue_put(q, &flush_pkt);
323 }
324
325 static void packet_queue_flush(PacketQueue *q)
326 {
327     AVPacketList *pkt, *pkt1;
328
329     SDL_LockMutex(q->mutex);
330     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
331         pkt1 = pkt->next;
332         av_free_packet(&pkt->pkt);
333         av_freep(&pkt);
334     }
335     q->last_pkt = NULL;
336     q->first_pkt = NULL;
337     q->nb_packets = 0;
338     q->size = 0;
339     SDL_UnlockMutex(q->mutex);
340 }
341
342 static void packet_queue_end(PacketQueue *q)
343 {
344     packet_queue_flush(q);
345     SDL_DestroyMutex(q->mutex);
346     SDL_DestroyCond(q->cond);
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for(;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (vp->picref->video->sample_aspect_ratio.num == 0)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st) {
673             if (is->subpq_size > 0) {
674                 sp = &is->subpq[is->subpq_rindex];
675
676                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
677                     SDL_LockYUVOverlay (vp->bmp);
678
679                     pict.data[0] = vp->bmp->pixels[0];
680                     pict.data[1] = vp->bmp->pixels[2];
681                     pict.data[2] = vp->bmp->pixels[1];
682
683                     pict.linesize[0] = vp->bmp->pitches[0];
684                     pict.linesize[1] = vp->bmp->pitches[2];
685                     pict.linesize[2] = vp->bmp->pitches[1];
686
687                     for (i = 0; i < sp->sub.num_rects; i++)
688                         blend_subrect(&pict, sp->sub.rects[i],
689                                       vp->bmp->w, vp->bmp->h);
690
691                     SDL_UnlockYUVOverlay (vp->bmp);
692                 }
693             }
694         }
695
696
697         /* XXX: we suppose the screen has a 1.0 pixel ratio */
698         height = is->height;
699         width = ((int)rint(height * aspect_ratio)) & ~1;
700         if (width > is->width) {
701             width = is->width;
702             height = ((int)rint(width / aspect_ratio)) & ~1;
703         }
704         x = (is->width - width) / 2;
705         y = (is->height - height) / 2;
706         is->no_background = 0;
707         rect.x = is->xleft + x;
708         rect.y = is->ytop  + y;
709         rect.w = FFMAX(width,  1);
710         rect.h = FFMAX(height, 1);
711         SDL_DisplayYUVOverlay(vp->bmp, &rect);
712     }
713 }
714
715 static inline int compute_mod(int a, int b)
716 {
717     return a < 0 ? a%b + b : a%b;
718 }
719
720 static void video_audio_display(VideoState *s)
721 {
722     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
723     int ch, channels, h, h2, bgcolor, fgcolor;
724     int16_t time_diff;
725     int rdft_bits, nb_freq;
726
727     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
728         ;
729     nb_freq= 1<<(rdft_bits-1);
730
731     /* compute display index : center on currently output samples */
732     channels = s->audio_st->codec->channels;
733     nb_display_channels = channels;
734     if (!s->paused) {
735         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
736         n = 2 * channels;
737         delay = s->audio_write_buf_size;
738         delay /= n;
739
740         /* to be more precise, we take into account the time spent since
741            the last buffer computation */
742         if (audio_callback_time) {
743             time_diff = av_gettime() - audio_callback_time;
744             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
745         }
746
747         delay += 2*data_used;
748         if (delay < data_used)
749             delay = data_used;
750
751         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
752         if (s->show_mode == SHOW_MODE_WAVES) {
753             h= INT_MIN;
754             for(i=0; i<1000; i+=channels){
755                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
756                 int a= s->sample_array[idx];
757                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
758                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
759                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
760                 int score= a-d;
761                 if(h<score && (b^c)<0){
762                     h= score;
763                     i_start= idx;
764                 }
765             }
766         }
767
768         s->last_i_start = i_start;
769     } else {
770         i_start = s->last_i_start;
771     }
772
773     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
774     if (s->show_mode == SHOW_MODE_WAVES) {
775         fill_rectangle(screen,
776                        s->xleft, s->ytop, s->width, s->height,
777                        bgcolor);
778
779         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
780
781         /* total height for one channel */
782         h = s->height / nb_display_channels;
783         /* graph height / 2 */
784         h2 = (h * 9) / 20;
785         for(ch = 0;ch < nb_display_channels; ch++) {
786             i = i_start + ch;
787             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
788             for(x = 0; x < s->width; x++) {
789                 y = (s->sample_array[i] * h2) >> 15;
790                 if (y < 0) {
791                     y = -y;
792                     ys = y1 - y;
793                 } else {
794                     ys = y1;
795                 }
796                 fill_rectangle(screen,
797                                s->xleft + x, ys, 1, y,
798                                fgcolor);
799                 i += channels;
800                 if (i >= SAMPLE_ARRAY_SIZE)
801                     i -= SAMPLE_ARRAY_SIZE;
802             }
803         }
804
805         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
806
807         for(ch = 1;ch < nb_display_channels; ch++) {
808             y = s->ytop + ch * h;
809             fill_rectangle(screen,
810                            s->xleft, y, s->width, 1,
811                            fgcolor);
812         }
813         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
814     }else{
815         nb_display_channels= FFMIN(nb_display_channels, 2);
816         if(rdft_bits != s->rdft_bits){
817             av_rdft_end(s->rdft);
818             av_free(s->rdft_data);
819             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
820             s->rdft_bits= rdft_bits;
821             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
822         }
823         {
824             FFTSample *data[2];
825             for(ch = 0;ch < nb_display_channels; ch++) {
826                 data[ch] = s->rdft_data + 2*nb_freq*ch;
827                 i = i_start + ch;
828                 for(x = 0; x < 2*nb_freq; x++) {
829                     double w= (x-nb_freq)*(1.0/nb_freq);
830                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
831                     i += channels;
832                     if (i >= SAMPLE_ARRAY_SIZE)
833                         i -= SAMPLE_ARRAY_SIZE;
834                 }
835                 av_rdft_calc(s->rdft, data[ch]);
836             }
837             //least efficient way to do this, we should of course directly access it but its more than fast enough
838             for(y=0; y<s->height; y++){
839                 double w= 1/sqrt(nb_freq);
840                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
841                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
842                        + data[1][2*y+1]*data[1][2*y+1])) : a;
843                 a= FFMIN(a,255);
844                 b= FFMIN(b,255);
845                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
846
847                 fill_rectangle(screen,
848                             s->xpos, s->height-y, 1, 1,
849                             fgcolor);
850             }
851         }
852         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
853         s->xpos++;
854         if(s->xpos >= s->width)
855             s->xpos= s->xleft;
856     }
857 }
858
859 static void stream_close(VideoState *is)
860 {
861     VideoPicture *vp;
862     int i;
863     /* XXX: use a special url_shutdown call to abort parse cleanly */
864     is->abort_request = 1;
865     SDL_WaitThread(is->read_tid, NULL);
866     SDL_WaitThread(is->refresh_tid, NULL);
867
868     /* free all pictures */
869     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
870         vp = &is->pictq[i];
871 #if CONFIG_AVFILTER
872         if (vp->picref) {
873             avfilter_unref_buffer(vp->picref);
874             vp->picref = NULL;
875         }
876 #endif
877         if (vp->bmp) {
878             SDL_FreeYUVOverlay(vp->bmp);
879             vp->bmp = NULL;
880         }
881     }
882     SDL_DestroyMutex(is->pictq_mutex);
883     SDL_DestroyCond(is->pictq_cond);
884     SDL_DestroyMutex(is->subpq_mutex);
885     SDL_DestroyCond(is->subpq_cond);
886 #if !CONFIG_AVFILTER
887     if (is->img_convert_ctx)
888         sws_freeContext(is->img_convert_ctx);
889 #endif
890     av_free(is);
891 }
892
893 static void do_exit(VideoState *is)
894 {
895     if (is) {
896         stream_close(is);
897     }
898     av_lockmgr_register(NULL);
899     uninit_opts();
900 #if CONFIG_AVFILTER
901     avfilter_uninit();
902 #endif
903     if (show_status)
904         printf("\n");
905     SDL_Quit();
906     av_log(NULL, AV_LOG_QUIET, "%s", "");
907     exit(0);
908 }
909
910 static int video_open(VideoState *is){
911     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
912     int w,h;
913
914     if(is_full_screen) flags |= SDL_FULLSCREEN;
915     else               flags |= SDL_RESIZABLE;
916
917     if (is_full_screen && fs_screen_width) {
918         w = fs_screen_width;
919         h = fs_screen_height;
920     } else if(!is_full_screen && screen_width){
921         w = screen_width;
922         h = screen_height;
923 #if CONFIG_AVFILTER
924     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
925         w = is->out_video_filter->inputs[0]->w;
926         h = is->out_video_filter->inputs[0]->h;
927 #else
928     }else if (is->video_st && is->video_st->codec->width){
929         w = is->video_st->codec->width;
930         h = is->video_st->codec->height;
931 #endif
932     } else {
933         w = 640;
934         h = 480;
935     }
936     if(screen && is->width == screen->w && screen->w == w
937        && is->height== screen->h && screen->h == h)
938         return 0;
939
940 #ifndef __APPLE__
941     screen = SDL_SetVideoMode(w, h, 0, flags);
942 #else
943     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
944     screen = SDL_SetVideoMode(w, h, 24, flags);
945 #endif
946     if (!screen) {
947         fprintf(stderr, "SDL: could not set video mode - exiting\n");
948         do_exit(is);
949     }
950     if (!window_title)
951         window_title = input_filename;
952     SDL_WM_SetCaption(window_title, window_title);
953
954     is->width = screen->w;
955     is->height = screen->h;
956
957     return 0;
958 }
959
960 /* display the current picture, if any */
961 static void video_display(VideoState *is)
962 {
963     if(!screen)
964         video_open(is);
965     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
966         video_audio_display(is);
967     else if (is->video_st)
968         video_image_display(is);
969 }
970
971 static int refresh_thread(void *opaque)
972 {
973     VideoState *is= opaque;
974     while(!is->abort_request){
975         SDL_Event event;
976         event.type = FF_REFRESH_EVENT;
977         event.user.data1 = opaque;
978         if(!is->refresh){
979             is->refresh=1;
980             SDL_PushEvent(&event);
981         }
982         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
983         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
984     }
985     return 0;
986 }
987
988 /* get the current audio clock value */
989 static double get_audio_clock(VideoState *is)
990 {
991     if (is->paused) {
992         return is->audio_current_pts;
993     } else {
994         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
995     }
996 }
997
998 /* get the current video clock value */
999 static double get_video_clock(VideoState *is)
1000 {
1001     if (is->paused) {
1002         return is->video_current_pts;
1003     } else {
1004         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1005     }
1006 }
1007
1008 /* get the current external clock value */
1009 static double get_external_clock(VideoState *is)
1010 {
1011     int64_t ti;
1012     ti = av_gettime();
1013     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1014 }
1015
1016 /* get the current master clock value */
1017 static double get_master_clock(VideoState *is)
1018 {
1019     double val;
1020
1021     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1022         if (is->video_st)
1023             val = get_video_clock(is);
1024         else
1025             val = get_audio_clock(is);
1026     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1027         if (is->audio_st)
1028             val = get_audio_clock(is);
1029         else
1030             val = get_video_clock(is);
1031     } else {
1032         val = get_external_clock(is);
1033     }
1034     return val;
1035 }
1036
1037 /* seek in the stream */
1038 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1039 {
1040     if (!is->seek_req) {
1041         is->seek_pos = pos;
1042         is->seek_rel = rel;
1043         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1044         if (seek_by_bytes)
1045             is->seek_flags |= AVSEEK_FLAG_BYTE;
1046         is->seek_req = 1;
1047     }
1048 }
1049
1050 /* pause or resume the video */
1051 static void stream_toggle_pause(VideoState *is)
1052 {
1053     if (is->paused) {
1054         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1055         if(is->read_pause_return != AVERROR(ENOSYS)){
1056             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1057         }
1058         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1059     }
1060     is->paused = !is->paused;
1061 }
1062
1063 static double compute_target_time(double frame_current_pts, VideoState *is)
1064 {
1065     double delay, sync_threshold, diff;
1066
1067     /* compute nominal delay */
1068     delay = frame_current_pts - is->frame_last_pts;
1069     if (delay <= 0 || delay >= 10.0) {
1070         /* if incorrect delay, use previous one */
1071         delay = is->frame_last_delay;
1072     } else {
1073         is->frame_last_delay = delay;
1074     }
1075     is->frame_last_pts = frame_current_pts;
1076
1077     /* update delay to follow master synchronisation source */
1078     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1079          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1080         /* if video is slave, we try to correct big delays by
1081            duplicating or deleting a frame */
1082         diff = get_video_clock(is) - get_master_clock(is);
1083
1084         /* skip or repeat frame. We take into account the
1085            delay to compute the threshold. I still don't know
1086            if it is the best guess */
1087         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1088         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1089             if (diff <= -sync_threshold)
1090                 delay = 0;
1091             else if (diff >= sync_threshold)
1092                 delay = 2 * delay;
1093         }
1094     }
1095     is->frame_timer += delay;
1096
1097     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1098             delay, frame_current_pts, -diff);
1099
1100     return is->frame_timer;
1101 }
1102
1103 /* called to display each frame */
1104 static void video_refresh(void *opaque)
1105 {
1106     VideoState *is = opaque;
1107     VideoPicture *vp;
1108
1109     SubPicture *sp, *sp2;
1110
1111     if (is->video_st) {
1112 retry:
1113         if (is->pictq_size == 0) {
1114             //nothing to do, no picture to display in the que
1115         } else {
1116             double time= av_gettime()/1000000.0;
1117             double next_target;
1118             /* dequeue the picture */
1119             vp = &is->pictq[is->pictq_rindex];
1120
1121             if(time < vp->target_clock)
1122                 return;
1123             /* update current video pts */
1124             is->video_current_pts = vp->pts;
1125             is->video_current_pts_drift = is->video_current_pts - time;
1126             is->video_current_pos = vp->pos;
1127             if(is->pictq_size > 1){
1128                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1129                 assert(nextvp->target_clock >= vp->target_clock);
1130                 next_target= nextvp->target_clock;
1131             }else{
1132                 next_target= vp->target_clock + vp->duration;
1133             }
1134             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1135                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1136                 if(is->pictq_size > 1){
1137                     /* update queue size and signal for next picture */
1138                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1139                         is->pictq_rindex = 0;
1140
1141                     SDL_LockMutex(is->pictq_mutex);
1142                     is->pictq_size--;
1143                     SDL_CondSignal(is->pictq_cond);
1144                     SDL_UnlockMutex(is->pictq_mutex);
1145                     goto retry;
1146                 }
1147             }
1148
1149             if(is->subtitle_st) {
1150                 if (is->subtitle_stream_changed) {
1151                     SDL_LockMutex(is->subpq_mutex);
1152
1153                     while (is->subpq_size) {
1154                         free_subpicture(&is->subpq[is->subpq_rindex]);
1155
1156                         /* update queue size and signal for next picture */
1157                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1158                             is->subpq_rindex = 0;
1159
1160                         is->subpq_size--;
1161                     }
1162                     is->subtitle_stream_changed = 0;
1163
1164                     SDL_CondSignal(is->subpq_cond);
1165                     SDL_UnlockMutex(is->subpq_mutex);
1166                 } else {
1167                     if (is->subpq_size > 0) {
1168                         sp = &is->subpq[is->subpq_rindex];
1169
1170                         if (is->subpq_size > 1)
1171                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1172                         else
1173                             sp2 = NULL;
1174
1175                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1176                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1177                         {
1178                             free_subpicture(sp);
1179
1180                             /* update queue size and signal for next picture */
1181                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1182                                 is->subpq_rindex = 0;
1183
1184                             SDL_LockMutex(is->subpq_mutex);
1185                             is->subpq_size--;
1186                             SDL_CondSignal(is->subpq_cond);
1187                             SDL_UnlockMutex(is->subpq_mutex);
1188                         }
1189                     }
1190                 }
1191             }
1192
1193             /* display picture */
1194             if (!display_disable)
1195                 video_display(is);
1196
1197             /* update queue size and signal for next picture */
1198             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1199                 is->pictq_rindex = 0;
1200
1201             SDL_LockMutex(is->pictq_mutex);
1202             is->pictq_size--;
1203             SDL_CondSignal(is->pictq_cond);
1204             SDL_UnlockMutex(is->pictq_mutex);
1205         }
1206     } else if (is->audio_st) {
1207         /* draw the next audio frame */
1208
1209         /* if only audio stream, then display the audio bars (better
1210            than nothing, just to test the implementation */
1211
1212         /* display picture */
1213         if (!display_disable)
1214             video_display(is);
1215     }
1216     if (show_status) {
1217         static int64_t last_time;
1218         int64_t cur_time;
1219         int aqsize, vqsize, sqsize;
1220         double av_diff;
1221
1222         cur_time = av_gettime();
1223         if (!last_time || (cur_time - last_time) >= 30000) {
1224             aqsize = 0;
1225             vqsize = 0;
1226             sqsize = 0;
1227             if (is->audio_st)
1228                 aqsize = is->audioq.size;
1229             if (is->video_st)
1230                 vqsize = is->videoq.size;
1231             if (is->subtitle_st)
1232                 sqsize = is->subtitleq.size;
1233             av_diff = 0;
1234             if (is->audio_st && is->video_st)
1235                 av_diff = get_audio_clock(is) - get_video_clock(is);
1236             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1237                    get_master_clock(is),
1238                    av_diff,
1239                    FFMAX(is->skip_frames-1, 0),
1240                    aqsize / 1024,
1241                    vqsize / 1024,
1242                    sqsize,
1243                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1244                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1245             fflush(stdout);
1246             last_time = cur_time;
1247         }
1248     }
1249 }
1250
1251 /* allocate a picture (needs to do that in main thread to avoid
1252    potential locking problems */
1253 static void alloc_picture(void *opaque)
1254 {
1255     VideoState *is = opaque;
1256     VideoPicture *vp;
1257
1258     vp = &is->pictq[is->pictq_windex];
1259
1260     if (vp->bmp)
1261         SDL_FreeYUVOverlay(vp->bmp);
1262
1263 #if CONFIG_AVFILTER
1264     if (vp->picref)
1265         avfilter_unref_buffer(vp->picref);
1266     vp->picref = NULL;
1267
1268     vp->width   = is->out_video_filter->inputs[0]->w;
1269     vp->height  = is->out_video_filter->inputs[0]->h;
1270     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1271 #else
1272     vp->width   = is->video_st->codec->width;
1273     vp->height  = is->video_st->codec->height;
1274     vp->pix_fmt = is->video_st->codec->pix_fmt;
1275 #endif
1276
1277     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1278                                    SDL_YV12_OVERLAY,
1279                                    screen);
1280     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1281         /* SDL allocates a buffer smaller than requested if the video
1282          * overlay hardware is unable to support the requested size. */
1283         fprintf(stderr, "Error: the video system does not support an image\n"
1284                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1285                         "to reduce the image size.\n", vp->width, vp->height );
1286         do_exit(is);
1287     }
1288
1289     SDL_LockMutex(is->pictq_mutex);
1290     vp->allocated = 1;
1291     SDL_CondSignal(is->pictq_cond);
1292     SDL_UnlockMutex(is->pictq_mutex);
1293 }
1294
1295 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1296 {
1297     VideoPicture *vp;
1298     double frame_delay, pts = pts1;
1299
1300     /* compute the exact PTS for the picture if it is omitted in the stream
1301      * pts1 is the dts of the pkt / pts of the frame */
1302     if (pts != 0) {
1303         /* update video clock with pts, if present */
1304         is->video_clock = pts;
1305     } else {
1306         pts = is->video_clock;
1307     }
1308     /* update video clock for next frame */
1309     frame_delay = av_q2d(is->video_st->codec->time_base);
1310     /* for MPEG2, the frame can be repeated, so we update the
1311        clock accordingly */
1312     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313     is->video_clock += frame_delay;
1314
1315 #if defined(DEBUG_SYNC) && 0
1316     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1317            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1318 #endif
1319
1320     /* wait until we have space to put a new picture */
1321     SDL_LockMutex(is->pictq_mutex);
1322
1323     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1324         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1325
1326     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1327            !is->videoq.abort_request) {
1328         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1329     }
1330     SDL_UnlockMutex(is->pictq_mutex);
1331
1332     if (is->videoq.abort_request)
1333         return -1;
1334
1335     vp = &is->pictq[is->pictq_windex];
1336
1337     vp->duration = frame_delay;
1338
1339     /* alloc or resize hardware picture buffer */
1340     if (!vp->bmp ||
1341 #if CONFIG_AVFILTER
1342         vp->width  != is->out_video_filter->inputs[0]->w ||
1343         vp->height != is->out_video_filter->inputs[0]->h) {
1344 #else
1345         vp->width != is->video_st->codec->width ||
1346         vp->height != is->video_st->codec->height) {
1347 #endif
1348         SDL_Event event;
1349
1350         vp->allocated = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1364         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1365             while (!vp->allocated) {
1366                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367             }
1368         }
1369         SDL_UnlockMutex(is->pictq_mutex);
1370
1371         if (is->videoq.abort_request)
1372             return -1;
1373     }
1374
1375     /* if the frame is not skipped, then display it */
1376     if (vp->bmp) {
1377         AVPicture pict;
1378 #if CONFIG_AVFILTER
1379         if(vp->picref)
1380             avfilter_unref_buffer(vp->picref);
1381         vp->picref = src_frame->opaque;
1382 #endif
1383
1384         /* get a pointer on the bitmap */
1385         SDL_LockYUVOverlay (vp->bmp);
1386
1387         memset(&pict,0,sizeof(AVPicture));
1388         pict.data[0] = vp->bmp->pixels[0];
1389         pict.data[1] = vp->bmp->pixels[2];
1390         pict.data[2] = vp->bmp->pixels[1];
1391
1392         pict.linesize[0] = vp->bmp->pitches[0];
1393         pict.linesize[1] = vp->bmp->pitches[2];
1394         pict.linesize[2] = vp->bmp->pitches[1];
1395
1396 #if CONFIG_AVFILTER
1397         //FIXME use direct rendering
1398         av_picture_copy(&pict, (AVPicture *)src_frame,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock= compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1431 {
1432     int got_picture, i;
1433
1434     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1435         return -1;
1436
1437     if (pkt->data == flush_pkt.data) {
1438         avcodec_flush_buffers(is->video_st->codec);
1439
1440         SDL_LockMutex(is->pictq_mutex);
1441         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1442         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1443             is->pictq[i].target_clock= 0;
1444         }
1445         while (is->pictq_size && !is->videoq.abort_request) {
1446             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1447         }
1448         is->video_current_pos = -1;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450
1451         is->frame_last_pts = AV_NOPTS_VALUE;
1452         is->frame_last_delay = 0;
1453         is->frame_timer = (double)av_gettime() / 1000000.0;
1454         is->skip_frames = 1;
1455         is->skip_frames_index = 0;
1456         return 0;
1457     }
1458
1459     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1460
1461     if (got_picture) {
1462         if (decoder_reorder_pts == -1) {
1463             *pts = frame->best_effort_timestamp;
1464         } else if (decoder_reorder_pts) {
1465             *pts = frame->pkt_pts;
1466         } else {
1467             *pts = frame->pkt_dts;
1468         }
1469
1470         if (*pts == AV_NOPTS_VALUE) {
1471             *pts = 0;
1472         }
1473
1474         is->skip_frames_index += 1;
1475         if(is->skip_frames_index >= is->skip_frames){
1476             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1477             return 1;
1478         }
1479
1480     }
1481     return 0;
1482 }
1483
1484 #if CONFIG_AVFILTER
1485 typedef struct {
1486     VideoState *is;
1487     AVFrame *frame;
1488     int use_dr1;
1489 } FilterPriv;
1490
1491 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1492 {
1493     AVFilterContext *ctx = codec->opaque;
1494     AVFilterBufferRef  *ref;
1495     int perms = AV_PERM_WRITE;
1496     int i, w, h, stride[4];
1497     unsigned edge;
1498     int pixel_size;
1499
1500     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1501
1502     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1503         perms |= AV_PERM_NEG_LINESIZES;
1504
1505     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1506         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1509     }
1510     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1511
1512     w = codec->width;
1513     h = codec->height;
1514
1515     if(av_image_check_size(w, h, 0, codec))
1516         return -1;
1517
1518     avcodec_align_dimensions2(codec, &w, &h, stride);
1519     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1520     w += edge << 1;
1521     h += edge << 1;
1522
1523     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1524         return -1;
1525
1526     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1527     ref->video->w = codec->width;
1528     ref->video->h = codec->height;
1529     for(i = 0; i < 4; i ++) {
1530         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1531         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1532
1533         if (ref->data[i]) {
1534             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1535         }
1536         pic->data[i]     = ref->data[i];
1537         pic->linesize[i] = ref->linesize[i];
1538     }
1539     pic->opaque = ref;
1540     pic->age    = INT_MAX;
1541     pic->type   = FF_BUFFER_TYPE_USER;
1542     pic->reordered_opaque = codec->reordered_opaque;
1543     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1544     else           pic->pkt_pts = AV_NOPTS_VALUE;
1545     return 0;
1546 }
1547
1548 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1549 {
1550     memset(pic->data, 0, sizeof(pic->data));
1551     avfilter_unref_buffer(pic->opaque);
1552 }
1553
1554 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1555 {
1556     AVFilterBufferRef *ref = pic->opaque;
1557
1558     if (pic->data[0] == NULL) {
1559         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1560         return codec->get_buffer(codec, pic);
1561     }
1562
1563     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1564         (codec->pix_fmt != ref->format)) {
1565         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1566         return -1;
1567     }
1568
1569     pic->reordered_opaque = codec->reordered_opaque;
1570     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1571     else           pic->pkt_pts = AV_NOPTS_VALUE;
1572     return 0;
1573 }
1574
1575 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1576 {
1577     FilterPriv *priv = ctx->priv;
1578     AVCodecContext *codec;
1579     if(!opaque) return -1;
1580
1581     priv->is = opaque;
1582     codec    = priv->is->video_st->codec;
1583     codec->opaque = ctx;
1584     if((codec->codec->capabilities & CODEC_CAP_DR1)
1585     ) {
1586         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1587         priv->use_dr1 = 1;
1588         codec->get_buffer     = input_get_buffer;
1589         codec->release_buffer = input_release_buffer;
1590         codec->reget_buffer   = input_reget_buffer;
1591         codec->thread_safe_callbacks = 1;
1592     }
1593
1594     priv->frame = avcodec_alloc_frame();
1595
1596     return 0;
1597 }
1598
1599 static void input_uninit(AVFilterContext *ctx)
1600 {
1601     FilterPriv *priv = ctx->priv;
1602     av_free(priv->frame);
1603 }
1604
1605 static int input_request_frame(AVFilterLink *link)
1606 {
1607     FilterPriv *priv = link->src->priv;
1608     AVFilterBufferRef *picref;
1609     int64_t pts = 0;
1610     AVPacket pkt;
1611     int ret;
1612
1613     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1614         av_free_packet(&pkt);
1615     if (ret < 0)
1616         return -1;
1617
1618     if(priv->use_dr1 && priv->frame->opaque) {
1619         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1620     } else {
1621         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1622         av_image_copy(picref->data, picref->linesize,
1623                       priv->frame->data, priv->frame->linesize,
1624                       picref->format, link->w, link->h);
1625     }
1626     av_free_packet(&pkt);
1627
1628     avfilter_copy_frame_props(picref, priv->frame);
1629     picref->pts = pts;
1630
1631     avfilter_start_frame(link, picref);
1632     avfilter_draw_slice(link, 0, link->h, 1);
1633     avfilter_end_frame(link);
1634
1635     return 0;
1636 }
1637
1638 static int input_query_formats(AVFilterContext *ctx)
1639 {
1640     FilterPriv *priv = ctx->priv;
1641     enum PixelFormat pix_fmts[] = {
1642         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1643     };
1644
1645     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1646     return 0;
1647 }
1648
1649 static int input_config_props(AVFilterLink *link)
1650 {
1651     FilterPriv *priv  = link->src->priv;
1652     AVStream *s = priv->is->video_st;
1653
1654     link->w = s->codec->width;
1655     link->h = s->codec->height;
1656     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1657         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1658     link->time_base = s->time_base;
1659
1660     return 0;
1661 }
1662
1663 static AVFilter input_filter =
1664 {
1665     .name      = "ffplay_input",
1666
1667     .priv_size = sizeof(FilterPriv),
1668
1669     .init      = input_init,
1670     .uninit    = input_uninit,
1671
1672     .query_formats = input_query_formats,
1673
1674     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1675     .outputs   = (AVFilterPad[]) {{ .name = "default",
1676                                     .type = AVMEDIA_TYPE_VIDEO,
1677                                     .request_frame = input_request_frame,
1678                                     .config_props  = input_config_props, },
1679                                   { .name = NULL }},
1680 };
1681
1682 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1683 {
1684     char sws_flags_str[128];
1685     int ret;
1686     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1687     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1688     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1689     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1690     graph->scale_sws_opts = av_strdup(sws_flags_str);
1691
1692     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1693                                             NULL, is, graph)) < 0)
1694         return ret;
1695 #if FF_API_OLD_VSINK_API
1696     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1697                                        NULL, pix_fmts, graph);
1698 #else
1699     buffersink_params->pixel_fmts = pix_fmts;
1700     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1701                                        NULL, buffersink_params, graph);
1702 #endif
1703     av_freep(&buffersink_params);
1704     if (ret < 0)
1705         return ret;
1706
1707     if(vfilters) {
1708         AVFilterInOut *outputs = avfilter_inout_alloc();
1709         AVFilterInOut *inputs  = avfilter_inout_alloc();
1710
1711         outputs->name    = av_strdup("in");
1712         outputs->filter_ctx = filt_src;
1713         outputs->pad_idx = 0;
1714         outputs->next    = NULL;
1715
1716         inputs->name    = av_strdup("out");
1717         inputs->filter_ctx = filt_out;
1718         inputs->pad_idx = 0;
1719         inputs->next    = NULL;
1720
1721         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1722             return ret;
1723     } else {
1724         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1725             return ret;
1726     }
1727
1728     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1729         return ret;
1730
1731     is->out_video_filter = filt_out;
1732
1733     return ret;
1734 }
1735
1736 #endif  /* CONFIG_AVFILTER */
1737
1738 static int video_thread(void *arg)
1739 {
1740     VideoState *is = arg;
1741     AVFrame *frame= avcodec_alloc_frame();
1742     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1743     double pts;
1744     int ret;
1745
1746 #if CONFIG_AVFILTER
1747     AVFilterGraph *graph = avfilter_graph_alloc();
1748     AVFilterContext *filt_out = NULL;
1749     int last_w = is->video_st->codec->width;
1750     int last_h = is->video_st->codec->height;
1751
1752     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1753         goto the_end;
1754     filt_out = is->out_video_filter;
1755 #endif
1756
1757     for(;;) {
1758 #if !CONFIG_AVFILTER
1759         AVPacket pkt;
1760 #else
1761         AVFilterBufferRef *picref;
1762         AVRational tb = filt_out->inputs[0]->time_base;
1763 #endif
1764         while (is->paused && !is->videoq.abort_request)
1765             SDL_Delay(10);
1766 #if CONFIG_AVFILTER
1767         if (   last_w != is->video_st->codec->width
1768             || last_h != is->video_st->codec->height) {
1769             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1770                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1771             avfilter_graph_free(&graph);
1772             graph = avfilter_graph_alloc();
1773             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1774                 goto the_end;
1775             filt_out = is->out_video_filter;
1776             last_w = is->video_st->codec->width;
1777             last_h = is->video_st->codec->height;
1778         }
1779         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1780         if (picref) {
1781             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1782             pts_int = picref->pts;
1783             pos     = picref->pos;
1784             frame->opaque = picref;
1785         }
1786
1787         if (av_cmp_q(tb, is->video_st->time_base)) {
1788             av_unused int64_t pts1 = pts_int;
1789             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1790             av_dlog(NULL, "video_thread(): "
1791                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1792                     tb.num, tb.den, pts1,
1793                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1794         }
1795 #else
1796         ret = get_video_frame(is, frame, &pts_int, &pkt);
1797         pos = pkt.pos;
1798         av_free_packet(&pkt);
1799 #endif
1800
1801         if (ret < 0) goto the_end;
1802
1803 #if CONFIG_AVFILTER
1804         if (!picref)
1805             continue;
1806 #endif
1807
1808         pts = pts_int*av_q2d(is->video_st->time_base);
1809
1810         ret = queue_picture(is, frame, pts, pos);
1811
1812         if (ret < 0)
1813             goto the_end;
1814
1815         if (is->step)
1816             stream_toggle_pause(is);
1817     }
1818  the_end:
1819 #if CONFIG_AVFILTER
1820     avfilter_graph_free(&graph);
1821 #endif
1822     av_free(frame);
1823     return 0;
1824 }
1825
1826 static int subtitle_thread(void *arg)
1827 {
1828     VideoState *is = arg;
1829     SubPicture *sp;
1830     AVPacket pkt1, *pkt = &pkt1;
1831     int got_subtitle;
1832     double pts;
1833     int i, j;
1834     int r, g, b, y, u, v, a;
1835
1836     for(;;) {
1837         while (is->paused && !is->subtitleq.abort_request) {
1838             SDL_Delay(10);
1839         }
1840         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1841             break;
1842
1843         if(pkt->data == flush_pkt.data){
1844             avcodec_flush_buffers(is->subtitle_st->codec);
1845             continue;
1846         }
1847         SDL_LockMutex(is->subpq_mutex);
1848         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1849                !is->subtitleq.abort_request) {
1850             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1851         }
1852         SDL_UnlockMutex(is->subpq_mutex);
1853
1854         if (is->subtitleq.abort_request)
1855             return 0;
1856
1857         sp = &is->subpq[is->subpq_windex];
1858
1859        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1860            this packet, if any */
1861         pts = 0;
1862         if (pkt->pts != AV_NOPTS_VALUE)
1863             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1864
1865         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1866                                  &got_subtitle, pkt);
1867
1868         if (got_subtitle && sp->sub.format == 0) {
1869             sp->pts = pts;
1870
1871             for (i = 0; i < sp->sub.num_rects; i++)
1872             {
1873                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1874                 {
1875                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1876                     y = RGB_TO_Y_CCIR(r, g, b);
1877                     u = RGB_TO_U_CCIR(r, g, b, 0);
1878                     v = RGB_TO_V_CCIR(r, g, b, 0);
1879                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1880                 }
1881             }
1882
1883             /* now we can update the picture count */
1884             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1885                 is->subpq_windex = 0;
1886             SDL_LockMutex(is->subpq_mutex);
1887             is->subpq_size++;
1888             SDL_UnlockMutex(is->subpq_mutex);
1889         }
1890         av_free_packet(pkt);
1891     }
1892     return 0;
1893 }
1894
1895 /* copy samples for viewing in editor window */
1896 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1897 {
1898     int size, len;
1899
1900     size = samples_size / sizeof(short);
1901     while (size > 0) {
1902         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1903         if (len > size)
1904             len = size;
1905         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1906         samples += len;
1907         is->sample_array_index += len;
1908         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1909             is->sample_array_index = 0;
1910         size -= len;
1911     }
1912 }
1913
1914 /* return the new audio buffer size (samples can be added or deleted
1915    to get better sync if video or external master clock) */
1916 static int synchronize_audio(VideoState *is, short *samples,
1917                              int samples_size1, double pts)
1918 {
1919     int n, samples_size;
1920     double ref_clock;
1921
1922     n = 2 * is->audio_st->codec->channels;
1923     samples_size = samples_size1;
1924
1925     /* if not master, then we try to remove or add samples to correct the clock */
1926     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1927          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1928         double diff, avg_diff;
1929         int wanted_size, min_size, max_size, nb_samples;
1930
1931         ref_clock = get_master_clock(is);
1932         diff = get_audio_clock(is) - ref_clock;
1933
1934         if (diff < AV_NOSYNC_THRESHOLD) {
1935             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1936             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1937                 /* not enough measures to have a correct estimate */
1938                 is->audio_diff_avg_count++;
1939             } else {
1940                 /* estimate the A-V difference */
1941                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1942
1943                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1944                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1945                     nb_samples = samples_size / n;
1946
1947                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1948                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1949                     if (wanted_size < min_size)
1950                         wanted_size = min_size;
1951                     else if (wanted_size > max_size)
1952                         wanted_size = max_size;
1953
1954                     /* add or remove samples to correction the synchro */
1955                     if (wanted_size < samples_size) {
1956                         /* remove samples */
1957                         samples_size = wanted_size;
1958                     } else if (wanted_size > samples_size) {
1959                         uint8_t *samples_end, *q;
1960                         int nb;
1961
1962                         /* add samples */
1963                         nb = (samples_size - wanted_size);
1964                         samples_end = (uint8_t *)samples + samples_size - n;
1965                         q = samples_end + n;
1966                         while (nb > 0) {
1967                             memcpy(q, samples_end, n);
1968                             q += n;
1969                             nb -= n;
1970                         }
1971                         samples_size = wanted_size;
1972                     }
1973                 }
1974                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1975                         diff, avg_diff, samples_size - samples_size1,
1976                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1977             }
1978         } else {
1979             /* too big difference : may be initial PTS errors, so
1980                reset A-V filter */
1981             is->audio_diff_avg_count = 0;
1982             is->audio_diff_cum = 0;
1983         }
1984     }
1985
1986     return samples_size;
1987 }
1988
1989 /* decode one audio frame and returns its uncompressed size */
1990 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1991 {
1992     AVPacket *pkt_temp = &is->audio_pkt_temp;
1993     AVPacket *pkt = &is->audio_pkt;
1994     AVCodecContext *dec= is->audio_st->codec;
1995     int n, len1, data_size;
1996     double pts;
1997     int new_packet = 0;
1998     int flush_complete = 0;
1999
2000     for(;;) {
2001         /* NOTE: the audio packet can contain several frames */
2002         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2003             if (flush_complete)
2004                 break;
2005             new_packet = 0;
2006             data_size = sizeof(is->audio_buf1);
2007             len1 = avcodec_decode_audio3(dec,
2008                                         (int16_t *)is->audio_buf1, &data_size,
2009                                         pkt_temp);
2010             if (len1 < 0) {
2011                 /* if error, we skip the frame */
2012                 pkt_temp->size = 0;
2013                 break;
2014             }
2015
2016             pkt_temp->data += len1;
2017             pkt_temp->size -= len1;
2018
2019             if (data_size <= 0) {
2020                 /* stop sending empty packets if the decoder is finished */
2021                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2022                     flush_complete = 1;
2023                 continue;
2024             }
2025
2026             if (dec->sample_fmt != is->audio_src_fmt) {
2027                 if (is->reformat_ctx)
2028                     av_audio_convert_free(is->reformat_ctx);
2029                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2030                                                          dec->sample_fmt, 1, NULL, 0);
2031                 if (!is->reformat_ctx) {
2032                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2033                         av_get_sample_fmt_name(dec->sample_fmt),
2034                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2035                         break;
2036                 }
2037                 is->audio_src_fmt= dec->sample_fmt;
2038             }
2039
2040             if (is->reformat_ctx) {
2041                 const void *ibuf[6]= {is->audio_buf1};
2042                 void *obuf[6]= {is->audio_buf2};
2043                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2044                 int ostride[6]= {2};
2045                 int len= data_size/istride[0];
2046                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2047                     printf("av_audio_convert() failed\n");
2048                     break;
2049                 }
2050                 is->audio_buf= is->audio_buf2;
2051                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2052                           remove this legacy cruft */
2053                 data_size= len*2;
2054             }else{
2055                 is->audio_buf= is->audio_buf1;
2056             }
2057
2058             /* if no pts, then compute it */
2059             pts = is->audio_clock;
2060             *pts_ptr = pts;
2061             n = 2 * dec->channels;
2062             is->audio_clock += (double)data_size /
2063                 (double)(n * dec->sample_rate);
2064 #ifdef DEBUG
2065             {
2066                 static double last_clock;
2067                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2068                        is->audio_clock - last_clock,
2069                        is->audio_clock, pts);
2070                 last_clock = is->audio_clock;
2071             }
2072 #endif
2073             return data_size;
2074         }
2075
2076         /* free the current packet */
2077         if (pkt->data)
2078             av_free_packet(pkt);
2079
2080         if (is->paused || is->audioq.abort_request) {
2081             return -1;
2082         }
2083
2084         /* read next packet */
2085         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2086             return -1;
2087
2088         if (pkt->data == flush_pkt.data)
2089             avcodec_flush_buffers(dec);
2090
2091         pkt_temp->data = pkt->data;
2092         pkt_temp->size = pkt->size;
2093
2094         /* if update the audio clock with the pts */
2095         if (pkt->pts != AV_NOPTS_VALUE) {
2096             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2097         }
2098     }
2099 }
2100
2101 /* prepare a new audio buffer */
2102 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2103 {
2104     VideoState *is = opaque;
2105     int audio_size, len1;
2106     int bytes_per_sec;
2107     double pts;
2108
2109     audio_callback_time = av_gettime();
2110
2111     while (len > 0) {
2112         if (is->audio_buf_index >= is->audio_buf_size) {
2113            audio_size = audio_decode_frame(is, &pts);
2114            if (audio_size < 0) {
2115                 /* if error, just output silence */
2116                is->audio_buf = is->audio_buf1;
2117                is->audio_buf_size = 1024;
2118                memset(is->audio_buf, 0, is->audio_buf_size);
2119            } else {
2120                if (is->show_mode != SHOW_MODE_VIDEO)
2121                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2122                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2123                                               pts);
2124                is->audio_buf_size = audio_size;
2125            }
2126            is->audio_buf_index = 0;
2127         }
2128         len1 = is->audio_buf_size - is->audio_buf_index;
2129         if (len1 > len)
2130             len1 = len;
2131         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2132         len -= len1;
2133         stream += len1;
2134         is->audio_buf_index += len1;
2135     }
2136     bytes_per_sec = is->audio_st->codec->sample_rate *
2137             2 * is->audio_st->codec->channels;
2138     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2139     /* Let's assume the audio driver that is used by SDL has two periods. */
2140     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2141     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2142 }
2143
2144 /* open a given stream. Return 0 if OK */
2145 static int stream_component_open(VideoState *is, int stream_index)
2146 {
2147     AVFormatContext *ic = is->ic;
2148     AVCodecContext *avctx;
2149     AVCodec *codec;
2150     SDL_AudioSpec wanted_spec, spec;
2151     AVDictionary *opts;
2152     AVDictionaryEntry *t = NULL;
2153
2154     if (stream_index < 0 || stream_index >= ic->nb_streams)
2155         return -1;
2156     avctx = ic->streams[stream_index]->codec;
2157
2158     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2159
2160     /* prepare audio output */
2161     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2162         if (avctx->channels > 0) {
2163             avctx->request_channels = FFMIN(2, avctx->channels);
2164         } else {
2165             avctx->request_channels = 2;
2166         }
2167     }
2168
2169     codec = avcodec_find_decoder(avctx->codec_id);
2170     if (!codec)
2171         return -1;
2172
2173     avctx->workaround_bugs = workaround_bugs;
2174     avctx->lowres = lowres;
2175     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2176     avctx->idct_algo= idct;
2177     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2178     avctx->skip_frame= skip_frame;
2179     avctx->skip_idct= skip_idct;
2180     avctx->skip_loop_filter= skip_loop_filter;
2181     avctx->error_recognition= error_recognition;
2182     avctx->error_concealment= error_concealment;
2183
2184     if(codec->capabilities & CODEC_CAP_DR1)
2185         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2186
2187     wanted_spec.freq = avctx->sample_rate;
2188     wanted_spec.channels = avctx->channels;
2189     if (!codec ||
2190         avcodec_open2(avctx, codec, &opts) < 0)
2191         return -1;
2192     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2193         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2194         return AVERROR_OPTION_NOT_FOUND;
2195     }
2196
2197     /* prepare audio output */
2198     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2199         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2200             fprintf(stderr, "Invalid sample rate or channel count\n");
2201             return -1;
2202         }
2203         wanted_spec.format = AUDIO_S16SYS;
2204         wanted_spec.silence = 0;
2205         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2206         wanted_spec.callback = sdl_audio_callback;
2207         wanted_spec.userdata = is;
2208         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2209             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2210             return -1;
2211         }
2212         is->audio_hw_buf_size = spec.size;
2213         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2214     }
2215
2216     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2217     switch(avctx->codec_type) {
2218     case AVMEDIA_TYPE_AUDIO:
2219         is->audio_stream = stream_index;
2220         is->audio_st = ic->streams[stream_index];
2221         is->audio_buf_size = 0;
2222         is->audio_buf_index = 0;
2223
2224         /* init averaging filter */
2225         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2226         is->audio_diff_avg_count = 0;
2227         /* since we do not have a precise anough audio fifo fullness,
2228            we correct audio sync only if larger than this threshold */
2229         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2230
2231         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2232         packet_queue_init(&is->audioq);
2233         SDL_PauseAudio(0);
2234         break;
2235     case AVMEDIA_TYPE_VIDEO:
2236         is->video_stream = stream_index;
2237         is->video_st = ic->streams[stream_index];
2238
2239         packet_queue_init(&is->videoq);
2240         is->video_tid = SDL_CreateThread(video_thread, is);
2241         break;
2242     case AVMEDIA_TYPE_SUBTITLE:
2243         is->subtitle_stream = stream_index;
2244         is->subtitle_st = ic->streams[stream_index];
2245         packet_queue_init(&is->subtitleq);
2246
2247         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2248         break;
2249     default:
2250         break;
2251     }
2252     return 0;
2253 }
2254
2255 static void stream_component_close(VideoState *is, int stream_index)
2256 {
2257     AVFormatContext *ic = is->ic;
2258     AVCodecContext *avctx;
2259
2260     if (stream_index < 0 || stream_index >= ic->nb_streams)
2261         return;
2262     avctx = ic->streams[stream_index]->codec;
2263
2264     switch(avctx->codec_type) {
2265     case AVMEDIA_TYPE_AUDIO:
2266         packet_queue_abort(&is->audioq);
2267
2268         SDL_CloseAudio();
2269
2270         packet_queue_end(&is->audioq);
2271         if (is->reformat_ctx)
2272             av_audio_convert_free(is->reformat_ctx);
2273         is->reformat_ctx = NULL;
2274         break;
2275     case AVMEDIA_TYPE_VIDEO:
2276         packet_queue_abort(&is->videoq);
2277
2278         /* note: we also signal this mutex to make sure we deblock the
2279            video thread in all cases */
2280         SDL_LockMutex(is->pictq_mutex);
2281         SDL_CondSignal(is->pictq_cond);
2282         SDL_UnlockMutex(is->pictq_mutex);
2283
2284         SDL_WaitThread(is->video_tid, NULL);
2285
2286         packet_queue_end(&is->videoq);
2287         break;
2288     case AVMEDIA_TYPE_SUBTITLE:
2289         packet_queue_abort(&is->subtitleq);
2290
2291         /* note: we also signal this mutex to make sure we deblock the
2292            video thread in all cases */
2293         SDL_LockMutex(is->subpq_mutex);
2294         is->subtitle_stream_changed = 1;
2295
2296         SDL_CondSignal(is->subpq_cond);
2297         SDL_UnlockMutex(is->subpq_mutex);
2298
2299         SDL_WaitThread(is->subtitle_tid, NULL);
2300
2301         packet_queue_end(&is->subtitleq);
2302         break;
2303     default:
2304         break;
2305     }
2306
2307     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2308     avcodec_close(avctx);
2309     switch(avctx->codec_type) {
2310     case AVMEDIA_TYPE_AUDIO:
2311         is->audio_st = NULL;
2312         is->audio_stream = -1;
2313         break;
2314     case AVMEDIA_TYPE_VIDEO:
2315         is->video_st = NULL;
2316         is->video_stream = -1;
2317         break;
2318     case AVMEDIA_TYPE_SUBTITLE:
2319         is->subtitle_st = NULL;
2320         is->subtitle_stream = -1;
2321         break;
2322     default:
2323         break;
2324     }
2325 }
2326
2327 /* since we have only one decoding thread, we can use a global
2328    variable instead of a thread local variable */
2329 static VideoState *global_video_state;
2330
2331 static int decode_interrupt_cb(void)
2332 {
2333     return (global_video_state && global_video_state->abort_request);
2334 }
2335
2336 /* this thread gets the stream from the disk or the network */
2337 static int read_thread(void *arg)
2338 {
2339     VideoState *is = arg;
2340     AVFormatContext *ic = NULL;
2341     int err, i, ret;
2342     int st_index[AVMEDIA_TYPE_NB];
2343     AVPacket pkt1, *pkt = &pkt1;
2344     int eof=0;
2345     int pkt_in_play_range = 0;
2346     AVDictionaryEntry *t;
2347     AVDictionary **opts;
2348     int orig_nb_streams;
2349
2350     memset(st_index, -1, sizeof(st_index));
2351     is->video_stream = -1;
2352     is->audio_stream = -1;
2353     is->subtitle_stream = -1;
2354
2355     global_video_state = is;
2356     avio_set_interrupt_cb(decode_interrupt_cb);
2357
2358     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2359     if (err < 0) {
2360         print_error(is->filename, err);
2361         ret = -1;
2362         goto fail;
2363     }
2364     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2365         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2366         ret = AVERROR_OPTION_NOT_FOUND;
2367         goto fail;
2368     }
2369     is->ic = ic;
2370
2371     if(genpts)
2372         ic->flags |= AVFMT_FLAG_GENPTS;
2373
2374     opts = setup_find_stream_info_opts(ic, codec_opts);
2375     orig_nb_streams = ic->nb_streams;
2376
2377     err = avformat_find_stream_info(ic, opts);
2378     if (err < 0) {
2379         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2380         ret = -1;
2381         goto fail;
2382     }
2383     for (i = 0; i < orig_nb_streams; i++)
2384         av_dict_free(&opts[i]);
2385     av_freep(&opts);
2386
2387     if(ic->pb)
2388         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2389
2390     if(seek_by_bytes<0)
2391         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2392
2393     /* if seeking requested, we execute it */
2394     if (start_time != AV_NOPTS_VALUE) {
2395         int64_t timestamp;
2396
2397         timestamp = start_time;
2398         /* add the stream start time */
2399         if (ic->start_time != AV_NOPTS_VALUE)
2400             timestamp += ic->start_time;
2401         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2402         if (ret < 0) {
2403             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2404                     is->filename, (double)timestamp / AV_TIME_BASE);
2405         }
2406     }
2407
2408     for (i = 0; i < ic->nb_streams; i++)
2409         ic->streams[i]->discard = AVDISCARD_ALL;
2410     if (!video_disable)
2411         st_index[AVMEDIA_TYPE_VIDEO] =
2412             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2413                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2414     if (!audio_disable)
2415         st_index[AVMEDIA_TYPE_AUDIO] =
2416             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2417                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2418                                 st_index[AVMEDIA_TYPE_VIDEO],
2419                                 NULL, 0);
2420     if (!video_disable)
2421         st_index[AVMEDIA_TYPE_SUBTITLE] =
2422             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2423                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2424                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2425                                  st_index[AVMEDIA_TYPE_AUDIO] :
2426                                  st_index[AVMEDIA_TYPE_VIDEO]),
2427                                 NULL, 0);
2428     if (show_status) {
2429         av_dump_format(ic, 0, is->filename, 0);
2430     }
2431
2432     is->show_mode = show_mode;
2433
2434     /* open the streams */
2435     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2436         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2437     }
2438
2439     ret=-1;
2440     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2441         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2442     }
2443     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2444     if (is->show_mode == SHOW_MODE_NONE)
2445         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2446
2447     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2448         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2449     }
2450
2451     if (is->video_stream < 0 && is->audio_stream < 0) {
2452         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2453         ret = -1;
2454         goto fail;
2455     }
2456
2457     for(;;) {
2458         if (is->abort_request)
2459             break;
2460         if (is->paused != is->last_paused) {
2461             is->last_paused = is->paused;
2462             if (is->paused)
2463                 is->read_pause_return= av_read_pause(ic);
2464             else
2465                 av_read_play(ic);
2466         }
2467 #if CONFIG_RTSP_DEMUXER
2468         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2469             /* wait 10 ms to avoid trying to get another packet */
2470             /* XXX: horrible */
2471             SDL_Delay(10);
2472             continue;
2473         }
2474 #endif
2475         if (is->seek_req) {
2476             int64_t seek_target= is->seek_pos;
2477             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2478             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2479 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2480 //      of the seek_pos/seek_rel variables
2481
2482             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2483             if (ret < 0) {
2484                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2485             }else{
2486                 if (is->audio_stream >= 0) {
2487                     packet_queue_flush(&is->audioq);
2488                     packet_queue_put(&is->audioq, &flush_pkt);
2489                 }
2490                 if (is->subtitle_stream >= 0) {
2491                     packet_queue_flush(&is->subtitleq);
2492                     packet_queue_put(&is->subtitleq, &flush_pkt);
2493                 }
2494                 if (is->video_stream >= 0) {
2495                     packet_queue_flush(&is->videoq);
2496                     packet_queue_put(&is->videoq, &flush_pkt);
2497                 }
2498             }
2499             is->seek_req = 0;
2500             eof= 0;
2501         }
2502
2503         /* if the queue are full, no need to read more */
2504         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2505             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2506                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2507                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2508             /* wait 10 ms */
2509             SDL_Delay(10);
2510             continue;
2511         }
2512         if(eof) {
2513             if(is->video_stream >= 0){
2514                 av_init_packet(pkt);
2515                 pkt->data=NULL;
2516                 pkt->size=0;
2517                 pkt->stream_index= is->video_stream;
2518                 packet_queue_put(&is->videoq, pkt);
2519             }
2520             if (is->audio_stream >= 0 &&
2521                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2522                 av_init_packet(pkt);
2523                 pkt->data = NULL;
2524                 pkt->size = 0;
2525                 pkt->stream_index = is->audio_stream;
2526                 packet_queue_put(&is->audioq, pkt);
2527             }
2528             SDL_Delay(10);
2529             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2530                 if(loop!=1 && (!loop || --loop)){
2531                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2532                 }else if(autoexit){
2533                     ret=AVERROR_EOF;
2534                     goto fail;
2535                 }
2536             }
2537             eof=0;
2538             continue;
2539         }
2540         ret = av_read_frame(ic, pkt);
2541         if (ret < 0) {
2542             if (ret == AVERROR_EOF || url_feof(ic->pb))
2543                 eof=1;
2544             if (ic->pb && ic->pb->error)
2545                 break;
2546             SDL_Delay(100); /* wait for user event */
2547             continue;
2548         }
2549         /* check if packet is in play range specified by user, then queue, otherwise discard */
2550         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2551                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2552                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2553                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2554                 <= ((double)duration/1000000);
2555         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2556             packet_queue_put(&is->audioq, pkt);
2557         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2558             packet_queue_put(&is->videoq, pkt);
2559         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2560             packet_queue_put(&is->subtitleq, pkt);
2561         } else {
2562             av_free_packet(pkt);
2563         }
2564     }
2565     /* wait until the end */
2566     while (!is->abort_request) {
2567         SDL_Delay(100);
2568     }
2569
2570     ret = 0;
2571  fail:
2572     /* disable interrupting */
2573     global_video_state = NULL;
2574
2575     /* close each stream */
2576     if (is->audio_stream >= 0)
2577         stream_component_close(is, is->audio_stream);
2578     if (is->video_stream >= 0)
2579         stream_component_close(is, is->video_stream);
2580     if (is->subtitle_stream >= 0)
2581         stream_component_close(is, is->subtitle_stream);
2582     if (is->ic) {
2583         av_close_input_file(is->ic);
2584         is->ic = NULL; /* safety */
2585     }
2586     avio_set_interrupt_cb(NULL);
2587
2588     if (ret != 0) {
2589         SDL_Event event;
2590
2591         event.type = FF_QUIT_EVENT;
2592         event.user.data1 = is;
2593         SDL_PushEvent(&event);
2594     }
2595     return 0;
2596 }
2597
2598 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2599 {
2600     VideoState *is;
2601
2602     is = av_mallocz(sizeof(VideoState));
2603     if (!is)
2604         return NULL;
2605     av_strlcpy(is->filename, filename, sizeof(is->filename));
2606     is->iformat = iformat;
2607     is->ytop = 0;
2608     is->xleft = 0;
2609
2610     /* start video display */
2611     is->pictq_mutex = SDL_CreateMutex();
2612     is->pictq_cond = SDL_CreateCond();
2613
2614     is->subpq_mutex = SDL_CreateMutex();
2615     is->subpq_cond = SDL_CreateCond();
2616
2617     is->av_sync_type = av_sync_type;
2618     is->read_tid = SDL_CreateThread(read_thread, is);
2619     if (!is->read_tid) {
2620         av_free(is);
2621         return NULL;
2622     }
2623     return is;
2624 }
2625
2626 static void stream_cycle_channel(VideoState *is, int codec_type)
2627 {
2628     AVFormatContext *ic = is->ic;
2629     int start_index, stream_index;
2630     AVStream *st;
2631
2632     if (codec_type == AVMEDIA_TYPE_VIDEO)
2633         start_index = is->video_stream;
2634     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2635         start_index = is->audio_stream;
2636     else
2637         start_index = is->subtitle_stream;
2638     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2639         return;
2640     stream_index = start_index;
2641     for(;;) {
2642         if (++stream_index >= is->ic->nb_streams)
2643         {
2644             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2645             {
2646                 stream_index = -1;
2647                 goto the_end;
2648             } else
2649                 stream_index = 0;
2650         }
2651         if (stream_index == start_index)
2652             return;
2653         st = ic->streams[stream_index];
2654         if (st->codec->codec_type == codec_type) {
2655             /* check that parameters are OK */
2656             switch(codec_type) {
2657             case AVMEDIA_TYPE_AUDIO:
2658                 if (st->codec->sample_rate != 0 &&
2659                     st->codec->channels != 0)
2660                     goto the_end;
2661                 break;
2662             case AVMEDIA_TYPE_VIDEO:
2663             case AVMEDIA_TYPE_SUBTITLE:
2664                 goto the_end;
2665             default:
2666                 break;
2667             }
2668         }
2669     }
2670  the_end:
2671     stream_component_close(is, start_index);
2672     stream_component_open(is, stream_index);
2673 }
2674
2675
2676 static void toggle_full_screen(VideoState *is)
2677 {
2678     is_full_screen = !is_full_screen;
2679     video_open(is);
2680 }
2681
2682 static void toggle_pause(VideoState *is)
2683 {
2684     stream_toggle_pause(is);
2685     is->step = 0;
2686 }
2687
2688 static void step_to_next_frame(VideoState *is)
2689 {
2690     /* if the stream is paused unpause it, then step */
2691     if (is->paused)
2692         stream_toggle_pause(is);
2693     is->step = 1;
2694 }
2695
2696 static void toggle_audio_display(VideoState *is)
2697 {
2698     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2699     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2700     fill_rectangle(screen,
2701                 is->xleft, is->ytop, is->width, is->height,
2702                 bgcolor);
2703     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2704 }
2705
2706 /* handle an event sent by the GUI */
2707 static void event_loop(VideoState *cur_stream)
2708 {
2709     SDL_Event event;
2710     double incr, pos, frac;
2711
2712     for(;;) {
2713         double x;
2714         SDL_WaitEvent(&event);
2715         switch(event.type) {
2716         case SDL_KEYDOWN:
2717             if (exit_on_keydown) {
2718                 do_exit(cur_stream);
2719                 break;
2720             }
2721             switch(event.key.keysym.sym) {
2722             case SDLK_ESCAPE:
2723             case SDLK_q:
2724                 do_exit(cur_stream);
2725                 break;
2726             case SDLK_f:
2727                 toggle_full_screen(cur_stream);
2728                 break;
2729             case SDLK_p:
2730             case SDLK_SPACE:
2731                 toggle_pause(cur_stream);
2732                 break;
2733             case SDLK_s: //S: Step to next frame
2734                 step_to_next_frame(cur_stream);
2735                 break;
2736             case SDLK_a:
2737                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2738                 break;
2739             case SDLK_v:
2740                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2741                 break;
2742             case SDLK_t:
2743                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2744                 break;
2745             case SDLK_w:
2746                 toggle_audio_display(cur_stream);
2747                 break;
2748             case SDLK_LEFT:
2749                 incr = -10.0;
2750                 goto do_seek;
2751             case SDLK_RIGHT:
2752                 incr = 10.0;
2753                 goto do_seek;
2754             case SDLK_UP:
2755                 incr = 60.0;
2756                 goto do_seek;
2757             case SDLK_DOWN:
2758                 incr = -60.0;
2759             do_seek:
2760                 if (seek_by_bytes) {
2761                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2762                         pos= cur_stream->video_current_pos;
2763                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2764                         pos= cur_stream->audio_pkt.pos;
2765                     }else
2766                         pos = avio_tell(cur_stream->ic->pb);
2767                     if (cur_stream->ic->bit_rate)
2768                         incr *= cur_stream->ic->bit_rate / 8.0;
2769                     else
2770                         incr *= 180000.0;
2771                     pos += incr;
2772                     stream_seek(cur_stream, pos, incr, 1);
2773                 } else {
2774                     pos = get_master_clock(cur_stream);
2775                     pos += incr;
2776                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2777                 }
2778                 break;
2779             default:
2780                 break;
2781             }
2782             break;
2783         case SDL_MOUSEBUTTONDOWN:
2784             if (exit_on_mousedown) {
2785                 do_exit(cur_stream);
2786                 break;
2787             }
2788         case SDL_MOUSEMOTION:
2789             if(event.type ==SDL_MOUSEBUTTONDOWN){
2790                 x= event.button.x;
2791             }else{
2792                 if(event.motion.state != SDL_PRESSED)
2793                     break;
2794                 x= event.motion.x;
2795             }
2796             if(seek_by_bytes || cur_stream->ic->duration<=0){
2797                 uint64_t size=  avio_size(cur_stream->ic->pb);
2798                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2799             }else{
2800                 int64_t ts;
2801                 int ns, hh, mm, ss;
2802                 int tns, thh, tmm, tss;
2803                 tns = cur_stream->ic->duration/1000000LL;
2804                 thh = tns/3600;
2805                 tmm = (tns%3600)/60;
2806                 tss = (tns%60);
2807                 frac = x/cur_stream->width;
2808                 ns = frac*tns;
2809                 hh = ns/3600;
2810                 mm = (ns%3600)/60;
2811                 ss = (ns%60);
2812                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2813                         hh, mm, ss, thh, tmm, tss);
2814                 ts = frac*cur_stream->ic->duration;
2815                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2816                     ts += cur_stream->ic->start_time;
2817                 stream_seek(cur_stream, ts, 0, 0);
2818             }
2819             break;
2820         case SDL_VIDEORESIZE:
2821             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2822                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2823             screen_width = cur_stream->width = event.resize.w;
2824             screen_height= cur_stream->height= event.resize.h;
2825             break;
2826         case SDL_QUIT:
2827         case FF_QUIT_EVENT:
2828             do_exit(cur_stream);
2829             break;
2830         case FF_ALLOC_EVENT:
2831             video_open(event.user.data1);
2832             alloc_picture(event.user.data1);
2833             break;
2834         case FF_REFRESH_EVENT:
2835             video_refresh(event.user.data1);
2836             cur_stream->refresh=0;
2837             break;
2838         default:
2839             break;
2840         }
2841     }
2842 }
2843
2844 static int opt_frame_size(const char *opt, const char *arg)
2845 {
2846     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2847     return opt_default("video_size", arg);
2848 }
2849
2850 static int opt_width(const char *opt, const char *arg)
2851 {
2852     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2853     return 0;
2854 }
2855
2856 static int opt_height(const char *opt, const char *arg)
2857 {
2858     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2859     return 0;
2860 }
2861
2862 static int opt_format(const char *opt, const char *arg)
2863 {
2864     file_iformat = av_find_input_format(arg);
2865     if (!file_iformat) {
2866         fprintf(stderr, "Unknown input format: %s\n", arg);
2867         return AVERROR(EINVAL);
2868     }
2869     return 0;
2870 }
2871
2872 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2873 {
2874     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2875     return opt_default("pixel_format", arg);
2876 }
2877
2878 static int opt_sync(const char *opt, const char *arg)
2879 {
2880     if (!strcmp(arg, "audio"))
2881         av_sync_type = AV_SYNC_AUDIO_MASTER;
2882     else if (!strcmp(arg, "video"))
2883         av_sync_type = AV_SYNC_VIDEO_MASTER;
2884     else if (!strcmp(arg, "ext"))
2885         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2886     else {
2887         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2888         exit(1);
2889     }
2890     return 0;
2891 }
2892
2893 static int opt_seek(const char *opt, const char *arg)
2894 {
2895     start_time = parse_time_or_die(opt, arg, 1);
2896     return 0;
2897 }
2898
2899 static int opt_duration(const char *opt, const char *arg)
2900 {
2901     duration = parse_time_or_die(opt, arg, 1);
2902     return 0;
2903 }
2904
2905 static int opt_show_mode(const char *opt, const char *arg)
2906 {
2907     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2908                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2909                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2910                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2911     return 0;
2912 }
2913
2914 static void opt_input_file(void *optctx, const char *filename)
2915 {
2916     if (input_filename) {
2917         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2918                 filename, input_filename);
2919         exit_program(1);
2920     }
2921     if (!strcmp(filename, "-"))
2922         filename = "pipe:";
2923     input_filename = filename;
2924 }
2925
2926 static int dummy;
2927
2928 static const OptionDef options[] = {
2929 #include "cmdutils_common_opts.h"
2930     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2931     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2932     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2933     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2934     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2935     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2936     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2937     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2938     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2939     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2940     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2941     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2942     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2943     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2944     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2945     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2946     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2947     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2948     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2949     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2950     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2951     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2952     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2953     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2954     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2955     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2956     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2957     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2958     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2959     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2960     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2961     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2962     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2963     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2964 #if CONFIG_AVFILTER
2965     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2966 #endif
2967     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2968     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2969     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2970     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
2971     { NULL, },
2972 };
2973
2974 static void show_usage(void)
2975 {
2976     printf("Simple media player\n");
2977     printf("usage: %s [options] input_file\n", program_name);
2978     printf("\n");
2979 }
2980
2981 static int opt_help(const char *opt, const char *arg)
2982 {
2983     const AVClass *class;
2984     av_log_set_callback(log_callback_help);
2985     show_usage();
2986     show_help_options(options, "Main options:\n",
2987                       OPT_EXPERT, 0);
2988     show_help_options(options, "\nAdvanced options:\n",
2989                       OPT_EXPERT, OPT_EXPERT);
2990     printf("\n");
2991     class = avcodec_get_class();
2992     av_opt_show2(&class, NULL,
2993                  AV_OPT_FLAG_DECODING_PARAM, 0);
2994     printf("\n");
2995     class = avformat_get_class();
2996     av_opt_show2(&class, NULL,
2997                  AV_OPT_FLAG_DECODING_PARAM, 0);
2998 #if !CONFIG_AVFILTER
2999     printf("\n");
3000     class = sws_get_class();
3001     av_opt_show2(&class, NULL,
3002                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3003 #endif
3004     printf("\nWhile playing:\n"
3005            "q, ESC              quit\n"
3006            "f                   toggle full screen\n"
3007            "p, SPC              pause\n"
3008            "a                   cycle audio channel\n"
3009            "v                   cycle video channel\n"
3010            "t                   cycle subtitle channel\n"
3011            "w                   show audio waves\n"
3012            "s                   activate frame-step mode\n"
3013            "left/right          seek backward/forward 10 seconds\n"
3014            "down/up             seek backward/forward 1 minute\n"
3015            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3016            );
3017     return 0;
3018 }
3019
3020 static int lockmgr(void **mtx, enum AVLockOp op)
3021 {
3022    switch(op) {
3023       case AV_LOCK_CREATE:
3024           *mtx = SDL_CreateMutex();
3025           if(!*mtx)
3026               return 1;
3027           return 0;
3028       case AV_LOCK_OBTAIN:
3029           return !!SDL_LockMutex(*mtx);
3030       case AV_LOCK_RELEASE:
3031           return !!SDL_UnlockMutex(*mtx);
3032       case AV_LOCK_DESTROY:
3033           SDL_DestroyMutex(*mtx);
3034           return 0;
3035    }
3036    return 1;
3037 }
3038
3039 /* Called from the main */
3040 int main(int argc, char **argv)
3041 {
3042     int flags;
3043     VideoState *is;
3044
3045     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3046
3047     /* register all codecs, demux and protocols */
3048     avcodec_register_all();
3049 #if CONFIG_AVDEVICE
3050     avdevice_register_all();
3051 #endif
3052 #if CONFIG_AVFILTER
3053     avfilter_register_all();
3054 #endif
3055     av_register_all();
3056
3057     init_opts();
3058
3059     show_banner();
3060
3061     parse_options(NULL, argc, argv, options, opt_input_file);
3062
3063     if (!input_filename) {
3064         show_usage();
3065         fprintf(stderr, "An input file must be specified\n");
3066         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3067         exit(1);
3068     }
3069
3070     if (display_disable) {
3071         video_disable = 1;
3072     }
3073     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3074     if (audio_disable)
3075         flags &= ~SDL_INIT_AUDIO;
3076 #if !defined(__MINGW32__) && !defined(__APPLE__)
3077     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3078 #endif
3079     if (SDL_Init (flags)) {
3080         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3081         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3082         exit(1);
3083     }
3084
3085     if (!display_disable) {
3086 #if HAVE_SDL_VIDEO_SIZE
3087         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3088         fs_screen_width = vi->current_w;
3089         fs_screen_height = vi->current_h;
3090 #endif
3091     }
3092
3093     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3094     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3095     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3096
3097     if (av_lockmgr_register(lockmgr)) {
3098         fprintf(stderr, "Could not initialize lock manager!\n");
3099         do_exit(NULL);
3100     }
3101
3102     av_init_packet(&flush_pkt);
3103     flush_pkt.data= "FLUSH";
3104
3105     is = stream_open(input_filename, file_iformat);
3106     if (!is) {
3107         fprintf(stderr, "Failed to initialize VideoState!\n");
3108         do_exit(NULL);
3109     }
3110
3111     event_loop(is);
3112
3113     /* never returns */
3114
3115     return 0;
3116 }