]> git.sesse.net Git - ffmpeg/blob - ffplay.c
lavfi: fix avfilter_get_audio_buffer() doxy to match reality.
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/buffersink.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int workaround_bugs = 1;
245 static int fast = 0;
246 static int genpts = 0;
247 static int lowres = 0;
248 static int idct = FF_IDCT_AUTO;
249 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
252 static int error_recognition = FF_ER_CAREFUL;
253 static int error_concealment = 3;
254 static int decoder_reorder_pts= -1;
255 static int autoexit;
256 static int exit_on_keydown;
257 static int exit_on_mousedown;
258 static int loop=1;
259 static int framedrop=-1;
260 static enum ShowMode show_mode = SHOW_MODE_NONE;
261
262 static int rdftspeed=20;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static int64_t audio_callback_time;
270
271 static AVPacket flush_pkt;
272
273 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
274 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
275 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
276
277 static SDL_Surface *screen;
278
279 void exit_program(int ret)
280 {
281     exit(ret);
282 }
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
285 {
286     AVPacketList *pkt1;
287
288     /* duplicate the packet */
289     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
290         return -1;
291
292     pkt1 = av_malloc(sizeof(AVPacketList));
293     if (!pkt1)
294         return -1;
295     pkt1->pkt = *pkt;
296     pkt1->next = NULL;
297
298
299     SDL_LockMutex(q->mutex);
300
301     if (!q->last_pkt)
302
303         q->first_pkt = pkt1;
304     else
305         q->last_pkt->next = pkt1;
306     q->last_pkt = pkt1;
307     q->nb_packets++;
308     q->size += pkt1->pkt.size + sizeof(*pkt1);
309     /* XXX: should duplicate packet data in DV case */
310     SDL_CondSignal(q->cond);
311
312     SDL_UnlockMutex(q->mutex);
313     return 0;
314 }
315
316 /* packet queue handling */
317 static void packet_queue_init(PacketQueue *q)
318 {
319     memset(q, 0, sizeof(PacketQueue));
320     q->mutex = SDL_CreateMutex();
321     q->cond = SDL_CreateCond();
322     packet_queue_put(q, &flush_pkt);
323 }
324
325 static void packet_queue_flush(PacketQueue *q)
326 {
327     AVPacketList *pkt, *pkt1;
328
329     SDL_LockMutex(q->mutex);
330     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
331         pkt1 = pkt->next;
332         av_free_packet(&pkt->pkt);
333         av_freep(&pkt);
334     }
335     q->last_pkt = NULL;
336     q->first_pkt = NULL;
337     q->nb_packets = 0;
338     q->size = 0;
339     SDL_UnlockMutex(q->mutex);
340 }
341
342 static void packet_queue_end(PacketQueue *q)
343 {
344     packet_queue_flush(q);
345     SDL_DestroyMutex(q->mutex);
346     SDL_DestroyCond(q->cond);
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for(;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (vp->picref->video->sample_aspect_ratio.num == 0)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st) {
673             if (is->subpq_size > 0) {
674                 sp = &is->subpq[is->subpq_rindex];
675
676                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
677                     SDL_LockYUVOverlay (vp->bmp);
678
679                     pict.data[0] = vp->bmp->pixels[0];
680                     pict.data[1] = vp->bmp->pixels[2];
681                     pict.data[2] = vp->bmp->pixels[1];
682
683                     pict.linesize[0] = vp->bmp->pitches[0];
684                     pict.linesize[1] = vp->bmp->pitches[2];
685                     pict.linesize[2] = vp->bmp->pitches[1];
686
687                     for (i = 0; i < sp->sub.num_rects; i++)
688                         blend_subrect(&pict, sp->sub.rects[i],
689                                       vp->bmp->w, vp->bmp->h);
690
691                     SDL_UnlockYUVOverlay (vp->bmp);
692                 }
693             }
694         }
695
696
697         /* XXX: we suppose the screen has a 1.0 pixel ratio */
698         height = is->height;
699         width = ((int)rint(height * aspect_ratio)) & ~1;
700         if (width > is->width) {
701             width = is->width;
702             height = ((int)rint(width / aspect_ratio)) & ~1;
703         }
704         x = (is->width - width) / 2;
705         y = (is->height - height) / 2;
706         is->no_background = 0;
707         rect.x = is->xleft + x;
708         rect.y = is->ytop  + y;
709         rect.w = FFMAX(width,  1);
710         rect.h = FFMAX(height, 1);
711         SDL_DisplayYUVOverlay(vp->bmp, &rect);
712     }
713 }
714
715 static inline int compute_mod(int a, int b)
716 {
717     return a < 0 ? a%b + b : a%b;
718 }
719
720 static void video_audio_display(VideoState *s)
721 {
722     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
723     int ch, channels, h, h2, bgcolor, fgcolor;
724     int16_t time_diff;
725     int rdft_bits, nb_freq;
726
727     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
728         ;
729     nb_freq= 1<<(rdft_bits-1);
730
731     /* compute display index : center on currently output samples */
732     channels = s->audio_st->codec->channels;
733     nb_display_channels = channels;
734     if (!s->paused) {
735         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
736         n = 2 * channels;
737         delay = s->audio_write_buf_size;
738         delay /= n;
739
740         /* to be more precise, we take into account the time spent since
741            the last buffer computation */
742         if (audio_callback_time) {
743             time_diff = av_gettime() - audio_callback_time;
744             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
745         }
746
747         delay += 2*data_used;
748         if (delay < data_used)
749             delay = data_used;
750
751         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
752         if (s->show_mode == SHOW_MODE_WAVES) {
753             h= INT_MIN;
754             for(i=0; i<1000; i+=channels){
755                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
756                 int a= s->sample_array[idx];
757                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
758                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
759                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
760                 int score= a-d;
761                 if(h<score && (b^c)<0){
762                     h= score;
763                     i_start= idx;
764                 }
765             }
766         }
767
768         s->last_i_start = i_start;
769     } else {
770         i_start = s->last_i_start;
771     }
772
773     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
774     if (s->show_mode == SHOW_MODE_WAVES) {
775         fill_rectangle(screen,
776                        s->xleft, s->ytop, s->width, s->height,
777                        bgcolor);
778
779         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
780
781         /* total height for one channel */
782         h = s->height / nb_display_channels;
783         /* graph height / 2 */
784         h2 = (h * 9) / 20;
785         for(ch = 0;ch < nb_display_channels; ch++) {
786             i = i_start + ch;
787             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
788             for(x = 0; x < s->width; x++) {
789                 y = (s->sample_array[i] * h2) >> 15;
790                 if (y < 0) {
791                     y = -y;
792                     ys = y1 - y;
793                 } else {
794                     ys = y1;
795                 }
796                 fill_rectangle(screen,
797                                s->xleft + x, ys, 1, y,
798                                fgcolor);
799                 i += channels;
800                 if (i >= SAMPLE_ARRAY_SIZE)
801                     i -= SAMPLE_ARRAY_SIZE;
802             }
803         }
804
805         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
806
807         for(ch = 1;ch < nb_display_channels; ch++) {
808             y = s->ytop + ch * h;
809             fill_rectangle(screen,
810                            s->xleft, y, s->width, 1,
811                            fgcolor);
812         }
813         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
814     }else{
815         nb_display_channels= FFMIN(nb_display_channels, 2);
816         if(rdft_bits != s->rdft_bits){
817             av_rdft_end(s->rdft);
818             av_free(s->rdft_data);
819             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
820             s->rdft_bits= rdft_bits;
821             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
822         }
823         {
824             FFTSample *data[2];
825             for(ch = 0;ch < nb_display_channels; ch++) {
826                 data[ch] = s->rdft_data + 2*nb_freq*ch;
827                 i = i_start + ch;
828                 for(x = 0; x < 2*nb_freq; x++) {
829                     double w= (x-nb_freq)*(1.0/nb_freq);
830                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
831                     i += channels;
832                     if (i >= SAMPLE_ARRAY_SIZE)
833                         i -= SAMPLE_ARRAY_SIZE;
834                 }
835                 av_rdft_calc(s->rdft, data[ch]);
836             }
837             //least efficient way to do this, we should of course directly access it but its more than fast enough
838             for(y=0; y<s->height; y++){
839                 double w= 1/sqrt(nb_freq);
840                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
841                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
842                        + data[1][2*y+1]*data[1][2*y+1])) : a;
843                 a= FFMIN(a,255);
844                 b= FFMIN(b,255);
845                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
846
847                 fill_rectangle(screen,
848                             s->xpos, s->height-y, 1, 1,
849                             fgcolor);
850             }
851         }
852         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
853         s->xpos++;
854         if(s->xpos >= s->width)
855             s->xpos= s->xleft;
856     }
857 }
858
859 static void stream_close(VideoState *is)
860 {
861     VideoPicture *vp;
862     int i;
863     /* XXX: use a special url_shutdown call to abort parse cleanly */
864     is->abort_request = 1;
865     SDL_WaitThread(is->read_tid, NULL);
866     SDL_WaitThread(is->refresh_tid, NULL);
867
868     /* free all pictures */
869     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
870         vp = &is->pictq[i];
871 #if CONFIG_AVFILTER
872         if (vp->picref) {
873             avfilter_unref_buffer(vp->picref);
874             vp->picref = NULL;
875         }
876 #endif
877         if (vp->bmp) {
878             SDL_FreeYUVOverlay(vp->bmp);
879             vp->bmp = NULL;
880         }
881     }
882     SDL_DestroyMutex(is->pictq_mutex);
883     SDL_DestroyCond(is->pictq_cond);
884     SDL_DestroyMutex(is->subpq_mutex);
885     SDL_DestroyCond(is->subpq_cond);
886 #if !CONFIG_AVFILTER
887     if (is->img_convert_ctx)
888         sws_freeContext(is->img_convert_ctx);
889 #endif
890     av_free(is);
891 }
892
893 static void do_exit(VideoState *is)
894 {
895     if (is) {
896         stream_close(is);
897     }
898     av_lockmgr_register(NULL);
899     uninit_opts();
900 #if CONFIG_AVFILTER
901     avfilter_uninit();
902 #endif
903     if (show_status)
904         printf("\n");
905     SDL_Quit();
906     av_log(NULL, AV_LOG_QUIET, "%s", "");
907     exit(0);
908 }
909
910 static int video_open(VideoState *is){
911     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
912     int w,h;
913
914     if(is_full_screen) flags |= SDL_FULLSCREEN;
915     else               flags |= SDL_RESIZABLE;
916
917     if (is_full_screen && fs_screen_width) {
918         w = fs_screen_width;
919         h = fs_screen_height;
920     } else if(!is_full_screen && screen_width){
921         w = screen_width;
922         h = screen_height;
923 #if CONFIG_AVFILTER
924     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
925         w = is->out_video_filter->inputs[0]->w;
926         h = is->out_video_filter->inputs[0]->h;
927 #else
928     }else if (is->video_st && is->video_st->codec->width){
929         w = is->video_st->codec->width;
930         h = is->video_st->codec->height;
931 #endif
932     } else {
933         w = 640;
934         h = 480;
935     }
936     if(screen && is->width == screen->w && screen->w == w
937        && is->height== screen->h && screen->h == h)
938         return 0;
939
940 #ifndef __APPLE__
941     screen = SDL_SetVideoMode(w, h, 0, flags);
942 #else
943     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
944     screen = SDL_SetVideoMode(w, h, 24, flags);
945 #endif
946     if (!screen) {
947         fprintf(stderr, "SDL: could not set video mode - exiting\n");
948         do_exit(is);
949     }
950     if (!window_title)
951         window_title = input_filename;
952     SDL_WM_SetCaption(window_title, window_title);
953
954     is->width = screen->w;
955     is->height = screen->h;
956
957     return 0;
958 }
959
960 /* display the current picture, if any */
961 static void video_display(VideoState *is)
962 {
963     if(!screen)
964         video_open(is);
965     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
966         video_audio_display(is);
967     else if (is->video_st)
968         video_image_display(is);
969 }
970
971 static int refresh_thread(void *opaque)
972 {
973     VideoState *is= opaque;
974     while(!is->abort_request){
975         SDL_Event event;
976         event.type = FF_REFRESH_EVENT;
977         event.user.data1 = opaque;
978         if(!is->refresh){
979             is->refresh=1;
980             SDL_PushEvent(&event);
981         }
982         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
983         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
984     }
985     return 0;
986 }
987
988 /* get the current audio clock value */
989 static double get_audio_clock(VideoState *is)
990 {
991     if (is->paused) {
992         return is->audio_current_pts;
993     } else {
994         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
995     }
996 }
997
998 /* get the current video clock value */
999 static double get_video_clock(VideoState *is)
1000 {
1001     if (is->paused) {
1002         return is->video_current_pts;
1003     } else {
1004         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1005     }
1006 }
1007
1008 /* get the current external clock value */
1009 static double get_external_clock(VideoState *is)
1010 {
1011     int64_t ti;
1012     ti = av_gettime();
1013     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1014 }
1015
1016 /* get the current master clock value */
1017 static double get_master_clock(VideoState *is)
1018 {
1019     double val;
1020
1021     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1022         if (is->video_st)
1023             val = get_video_clock(is);
1024         else
1025             val = get_audio_clock(is);
1026     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1027         if (is->audio_st)
1028             val = get_audio_clock(is);
1029         else
1030             val = get_video_clock(is);
1031     } else {
1032         val = get_external_clock(is);
1033     }
1034     return val;
1035 }
1036
1037 /* seek in the stream */
1038 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1039 {
1040     if (!is->seek_req) {
1041         is->seek_pos = pos;
1042         is->seek_rel = rel;
1043         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1044         if (seek_by_bytes)
1045             is->seek_flags |= AVSEEK_FLAG_BYTE;
1046         is->seek_req = 1;
1047     }
1048 }
1049
1050 /* pause or resume the video */
1051 static void stream_toggle_pause(VideoState *is)
1052 {
1053     if (is->paused) {
1054         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1055         if(is->read_pause_return != AVERROR(ENOSYS)){
1056             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1057         }
1058         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1059     }
1060     is->paused = !is->paused;
1061 }
1062
1063 static double compute_target_time(double frame_current_pts, VideoState *is)
1064 {
1065     double delay, sync_threshold, diff;
1066
1067     /* compute nominal delay */
1068     delay = frame_current_pts - is->frame_last_pts;
1069     if (delay <= 0 || delay >= 10.0) {
1070         /* if incorrect delay, use previous one */
1071         delay = is->frame_last_delay;
1072     } else {
1073         is->frame_last_delay = delay;
1074     }
1075     is->frame_last_pts = frame_current_pts;
1076
1077     /* update delay to follow master synchronisation source */
1078     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1079          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1080         /* if video is slave, we try to correct big delays by
1081            duplicating or deleting a frame */
1082         diff = get_video_clock(is) - get_master_clock(is);
1083
1084         /* skip or repeat frame. We take into account the
1085            delay to compute the threshold. I still don't know
1086            if it is the best guess */
1087         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1088         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1089             if (diff <= -sync_threshold)
1090                 delay = 0;
1091             else if (diff >= sync_threshold)
1092                 delay = 2 * delay;
1093         }
1094     }
1095     is->frame_timer += delay;
1096
1097     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1098             delay, frame_current_pts, -diff);
1099
1100     return is->frame_timer;
1101 }
1102
1103 /* called to display each frame */
1104 static void video_refresh(void *opaque)
1105 {
1106     VideoState *is = opaque;
1107     VideoPicture *vp;
1108
1109     SubPicture *sp, *sp2;
1110
1111     if (is->video_st) {
1112 retry:
1113         if (is->pictq_size == 0) {
1114             //nothing to do, no picture to display in the que
1115         } else {
1116             double time= av_gettime()/1000000.0;
1117             double next_target;
1118             /* dequeue the picture */
1119             vp = &is->pictq[is->pictq_rindex];
1120
1121             if(time < vp->target_clock)
1122                 return;
1123             /* update current video pts */
1124             is->video_current_pts = vp->pts;
1125             is->video_current_pts_drift = is->video_current_pts - time;
1126             is->video_current_pos = vp->pos;
1127             if(is->pictq_size > 1){
1128                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1129                 assert(nextvp->target_clock >= vp->target_clock);
1130                 next_target= nextvp->target_clock;
1131             }else{
1132                 next_target= vp->target_clock + vp->duration;
1133             }
1134             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1135                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1136                 if(is->pictq_size > 1){
1137                     /* update queue size and signal for next picture */
1138                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1139                         is->pictq_rindex = 0;
1140
1141                     SDL_LockMutex(is->pictq_mutex);
1142                     is->pictq_size--;
1143                     SDL_CondSignal(is->pictq_cond);
1144                     SDL_UnlockMutex(is->pictq_mutex);
1145                     goto retry;
1146                 }
1147             }
1148
1149             if(is->subtitle_st) {
1150                 if (is->subtitle_stream_changed) {
1151                     SDL_LockMutex(is->subpq_mutex);
1152
1153                     while (is->subpq_size) {
1154                         free_subpicture(&is->subpq[is->subpq_rindex]);
1155
1156                         /* update queue size and signal for next picture */
1157                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1158                             is->subpq_rindex = 0;
1159
1160                         is->subpq_size--;
1161                     }
1162                     is->subtitle_stream_changed = 0;
1163
1164                     SDL_CondSignal(is->subpq_cond);
1165                     SDL_UnlockMutex(is->subpq_mutex);
1166                 } else {
1167                     if (is->subpq_size > 0) {
1168                         sp = &is->subpq[is->subpq_rindex];
1169
1170                         if (is->subpq_size > 1)
1171                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1172                         else
1173                             sp2 = NULL;
1174
1175                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1176                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1177                         {
1178                             free_subpicture(sp);
1179
1180                             /* update queue size and signal for next picture */
1181                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1182                                 is->subpq_rindex = 0;
1183
1184                             SDL_LockMutex(is->subpq_mutex);
1185                             is->subpq_size--;
1186                             SDL_CondSignal(is->subpq_cond);
1187                             SDL_UnlockMutex(is->subpq_mutex);
1188                         }
1189                     }
1190                 }
1191             }
1192
1193             /* display picture */
1194             if (!display_disable)
1195                 video_display(is);
1196
1197             /* update queue size and signal for next picture */
1198             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1199                 is->pictq_rindex = 0;
1200
1201             SDL_LockMutex(is->pictq_mutex);
1202             is->pictq_size--;
1203             SDL_CondSignal(is->pictq_cond);
1204             SDL_UnlockMutex(is->pictq_mutex);
1205         }
1206     } else if (is->audio_st) {
1207         /* draw the next audio frame */
1208
1209         /* if only audio stream, then display the audio bars (better
1210            than nothing, just to test the implementation */
1211
1212         /* display picture */
1213         if (!display_disable)
1214             video_display(is);
1215     }
1216     if (show_status) {
1217         static int64_t last_time;
1218         int64_t cur_time;
1219         int aqsize, vqsize, sqsize;
1220         double av_diff;
1221
1222         cur_time = av_gettime();
1223         if (!last_time || (cur_time - last_time) >= 30000) {
1224             aqsize = 0;
1225             vqsize = 0;
1226             sqsize = 0;
1227             if (is->audio_st)
1228                 aqsize = is->audioq.size;
1229             if (is->video_st)
1230                 vqsize = is->videoq.size;
1231             if (is->subtitle_st)
1232                 sqsize = is->subtitleq.size;
1233             av_diff = 0;
1234             if (is->audio_st && is->video_st)
1235                 av_diff = get_audio_clock(is) - get_video_clock(is);
1236             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1237                    get_master_clock(is),
1238                    av_diff,
1239                    FFMAX(is->skip_frames-1, 0),
1240                    aqsize / 1024,
1241                    vqsize / 1024,
1242                    sqsize,
1243                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1244                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1245             fflush(stdout);
1246             last_time = cur_time;
1247         }
1248     }
1249 }
1250
1251 /* allocate a picture (needs to do that in main thread to avoid
1252    potential locking problems */
1253 static void alloc_picture(void *opaque)
1254 {
1255     VideoState *is = opaque;
1256     VideoPicture *vp;
1257
1258     vp = &is->pictq[is->pictq_windex];
1259
1260     if (vp->bmp)
1261         SDL_FreeYUVOverlay(vp->bmp);
1262
1263 #if CONFIG_AVFILTER
1264     if (vp->picref)
1265         avfilter_unref_buffer(vp->picref);
1266     vp->picref = NULL;
1267
1268     vp->width   = is->out_video_filter->inputs[0]->w;
1269     vp->height  = is->out_video_filter->inputs[0]->h;
1270     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1271 #else
1272     vp->width   = is->video_st->codec->width;
1273     vp->height  = is->video_st->codec->height;
1274     vp->pix_fmt = is->video_st->codec->pix_fmt;
1275 #endif
1276
1277     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1278                                    SDL_YV12_OVERLAY,
1279                                    screen);
1280     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1281         /* SDL allocates a buffer smaller than requested if the video
1282          * overlay hardware is unable to support the requested size. */
1283         fprintf(stderr, "Error: the video system does not support an image\n"
1284                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1285                         "to reduce the image size.\n", vp->width, vp->height );
1286         do_exit(is);
1287     }
1288
1289     SDL_LockMutex(is->pictq_mutex);
1290     vp->allocated = 1;
1291     SDL_CondSignal(is->pictq_cond);
1292     SDL_UnlockMutex(is->pictq_mutex);
1293 }
1294
1295 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1296 {
1297     VideoPicture *vp;
1298     double frame_delay, pts = pts1;
1299
1300     /* compute the exact PTS for the picture if it is omitted in the stream
1301      * pts1 is the dts of the pkt / pts of the frame */
1302     if (pts != 0) {
1303         /* update video clock with pts, if present */
1304         is->video_clock = pts;
1305     } else {
1306         pts = is->video_clock;
1307     }
1308     /* update video clock for next frame */
1309     frame_delay = av_q2d(is->video_st->codec->time_base);
1310     /* for MPEG2, the frame can be repeated, so we update the
1311        clock accordingly */
1312     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313     is->video_clock += frame_delay;
1314
1315 #if defined(DEBUG_SYNC) && 0
1316     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1317            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1318 #endif
1319
1320     /* wait until we have space to put a new picture */
1321     SDL_LockMutex(is->pictq_mutex);
1322
1323     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1324         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1325
1326     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1327            !is->videoq.abort_request) {
1328         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1329     }
1330     SDL_UnlockMutex(is->pictq_mutex);
1331
1332     if (is->videoq.abort_request)
1333         return -1;
1334
1335     vp = &is->pictq[is->pictq_windex];
1336
1337     vp->duration = frame_delay;
1338
1339     /* alloc or resize hardware picture buffer */
1340     if (!vp->bmp ||
1341 #if CONFIG_AVFILTER
1342         vp->width  != is->out_video_filter->inputs[0]->w ||
1343         vp->height != is->out_video_filter->inputs[0]->h) {
1344 #else
1345         vp->width != is->video_st->codec->width ||
1346         vp->height != is->video_st->codec->height) {
1347 #endif
1348         SDL_Event event;
1349
1350         vp->allocated = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1364         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1365             while (!vp->allocated) {
1366                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367             }
1368         }
1369         SDL_UnlockMutex(is->pictq_mutex);
1370
1371         if (is->videoq.abort_request)
1372             return -1;
1373     }
1374
1375     /* if the frame is not skipped, then display it */
1376     if (vp->bmp) {
1377         AVPicture pict;
1378 #if CONFIG_AVFILTER
1379         if(vp->picref)
1380             avfilter_unref_buffer(vp->picref);
1381         vp->picref = src_frame->opaque;
1382 #endif
1383
1384         /* get a pointer on the bitmap */
1385         SDL_LockYUVOverlay (vp->bmp);
1386
1387         memset(&pict,0,sizeof(AVPicture));
1388         pict.data[0] = vp->bmp->pixels[0];
1389         pict.data[1] = vp->bmp->pixels[2];
1390         pict.data[2] = vp->bmp->pixels[1];
1391
1392         pict.linesize[0] = vp->bmp->pitches[0];
1393         pict.linesize[1] = vp->bmp->pitches[2];
1394         pict.linesize[2] = vp->bmp->pitches[1];
1395
1396 #if CONFIG_AVFILTER
1397         //FIXME use direct rendering
1398         av_picture_copy(&pict, (AVPicture *)src_frame,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock= compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1431 {
1432     int got_picture, i;
1433
1434     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1435         return -1;
1436
1437     if (pkt->data == flush_pkt.data) {
1438         avcodec_flush_buffers(is->video_st->codec);
1439
1440         SDL_LockMutex(is->pictq_mutex);
1441         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1442         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1443             is->pictq[i].target_clock= 0;
1444         }
1445         while (is->pictq_size && !is->videoq.abort_request) {
1446             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1447         }
1448         is->video_current_pos = -1;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450
1451         is->frame_last_pts = AV_NOPTS_VALUE;
1452         is->frame_last_delay = 0;
1453         is->frame_timer = (double)av_gettime() / 1000000.0;
1454         is->skip_frames = 1;
1455         is->skip_frames_index = 0;
1456         return 0;
1457     }
1458
1459     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1460
1461     if (got_picture) {
1462         if (decoder_reorder_pts == -1) {
1463             *pts = frame->best_effort_timestamp;
1464         } else if (decoder_reorder_pts) {
1465             *pts = frame->pkt_pts;
1466         } else {
1467             *pts = frame->pkt_dts;
1468         }
1469
1470         if (*pts == AV_NOPTS_VALUE) {
1471             *pts = 0;
1472         }
1473
1474         is->skip_frames_index += 1;
1475         if(is->skip_frames_index >= is->skip_frames){
1476             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1477             return 1;
1478         }
1479
1480     }
1481     return 0;
1482 }
1483
1484 #if CONFIG_AVFILTER
1485 typedef struct {
1486     VideoState *is;
1487     AVFrame *frame;
1488     int use_dr1;
1489 } FilterPriv;
1490
1491 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1492 {
1493     AVFilterContext *ctx = codec->opaque;
1494     AVFilterBufferRef  *ref;
1495     int perms = AV_PERM_WRITE;
1496     int i, w, h, stride[4];
1497     unsigned edge;
1498     int pixel_size;
1499
1500     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1501
1502     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1503         perms |= AV_PERM_NEG_LINESIZES;
1504
1505     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1506         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1509     }
1510     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1511
1512     w = codec->width;
1513     h = codec->height;
1514
1515     if(av_image_check_size(w, h, 0, codec))
1516         return -1;
1517
1518     avcodec_align_dimensions2(codec, &w, &h, stride);
1519     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1520     w += edge << 1;
1521     h += edge << 1;
1522
1523     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1524         return -1;
1525
1526     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1527     ref->video->w = codec->width;
1528     ref->video->h = codec->height;
1529     for(i = 0; i < 4; i ++) {
1530         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1531         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1532
1533         if (ref->data[i]) {
1534             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1535         }
1536         pic->data[i]     = ref->data[i];
1537         pic->linesize[i] = ref->linesize[i];
1538     }
1539     pic->opaque = ref;
1540     pic->age    = INT_MAX;
1541     pic->type   = FF_BUFFER_TYPE_USER;
1542     pic->reordered_opaque = codec->reordered_opaque;
1543     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1544     else           pic->pkt_pts = AV_NOPTS_VALUE;
1545     return 0;
1546 }
1547
1548 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1549 {
1550     memset(pic->data, 0, sizeof(pic->data));
1551     avfilter_unref_buffer(pic->opaque);
1552 }
1553
1554 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1555 {
1556     AVFilterBufferRef *ref = pic->opaque;
1557
1558     if (pic->data[0] == NULL) {
1559         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1560         return codec->get_buffer(codec, pic);
1561     }
1562
1563     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1564         (codec->pix_fmt != ref->format)) {
1565         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1566         return -1;
1567     }
1568
1569     pic->reordered_opaque = codec->reordered_opaque;
1570     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1571     else           pic->pkt_pts = AV_NOPTS_VALUE;
1572     return 0;
1573 }
1574
1575 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1576 {
1577     FilterPriv *priv = ctx->priv;
1578     AVCodecContext *codec;
1579     if(!opaque) return -1;
1580
1581     priv->is = opaque;
1582     codec    = priv->is->video_st->codec;
1583     codec->opaque = ctx;
1584     if((codec->codec->capabilities & CODEC_CAP_DR1)
1585     ) {
1586         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1587         priv->use_dr1 = 1;
1588         codec->get_buffer     = input_get_buffer;
1589         codec->release_buffer = input_release_buffer;
1590         codec->reget_buffer   = input_reget_buffer;
1591         codec->thread_safe_callbacks = 1;
1592     }
1593
1594     priv->frame = avcodec_alloc_frame();
1595
1596     return 0;
1597 }
1598
1599 static void input_uninit(AVFilterContext *ctx)
1600 {
1601     FilterPriv *priv = ctx->priv;
1602     av_free(priv->frame);
1603 }
1604
1605 static int input_request_frame(AVFilterLink *link)
1606 {
1607     FilterPriv *priv = link->src->priv;
1608     AVFilterBufferRef *picref;
1609     int64_t pts = 0;
1610     AVPacket pkt;
1611     int ret;
1612
1613     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1614         av_free_packet(&pkt);
1615     if (ret < 0)
1616         return -1;
1617
1618     if(priv->use_dr1 && priv->frame->opaque) {
1619         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1620     } else {
1621         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1622         av_image_copy(picref->data, picref->linesize,
1623                       priv->frame->data, priv->frame->linesize,
1624                       picref->format, link->w, link->h);
1625     }
1626     av_free_packet(&pkt);
1627
1628     avfilter_copy_frame_props(picref, priv->frame);
1629     picref->pts = pts;
1630
1631     avfilter_start_frame(link, picref);
1632     avfilter_draw_slice(link, 0, link->h, 1);
1633     avfilter_end_frame(link);
1634
1635     return 0;
1636 }
1637
1638 static int input_query_formats(AVFilterContext *ctx)
1639 {
1640     FilterPriv *priv = ctx->priv;
1641     enum PixelFormat pix_fmts[] = {
1642         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1643     };
1644
1645     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1646     return 0;
1647 }
1648
1649 static int input_config_props(AVFilterLink *link)
1650 {
1651     FilterPriv *priv  = link->src->priv;
1652     AVStream *s = priv->is->video_st;
1653
1654     link->w = s->codec->width;
1655     link->h = s->codec->height;
1656     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1657         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1658     link->time_base = s->time_base;
1659
1660     return 0;
1661 }
1662
1663 static AVFilter input_filter =
1664 {
1665     .name      = "ffplay_input",
1666
1667     .priv_size = sizeof(FilterPriv),
1668
1669     .init      = input_init,
1670     .uninit    = input_uninit,
1671
1672     .query_formats = input_query_formats,
1673
1674     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1675     .outputs   = (AVFilterPad[]) {{ .name = "default",
1676                                     .type = AVMEDIA_TYPE_VIDEO,
1677                                     .request_frame = input_request_frame,
1678                                     .config_props  = input_config_props, },
1679                                   { .name = NULL }},
1680 };
1681
1682 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1683 {
1684     char sws_flags_str[128];
1685     int ret;
1686     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1687     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1688     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1689     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1690     graph->scale_sws_opts = av_strdup(sws_flags_str);
1691
1692     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1693                                             NULL, is, graph)) < 0)
1694         return ret;
1695 #if FF_API_OLD_VSINK_API
1696     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1697                                        NULL, pix_fmts, graph);
1698 #else
1699     buffersink_params->pixel_fmts = pix_fmts;
1700     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1701                                        NULL, buffersink_params, graph);
1702 #endif
1703     av_freep(&buffersink_params);
1704     if (ret < 0)
1705         return ret;
1706
1707     if(vfilters) {
1708         AVFilterInOut *outputs = avfilter_inout_alloc();
1709         AVFilterInOut *inputs  = avfilter_inout_alloc();
1710
1711         outputs->name    = av_strdup("in");
1712         outputs->filter_ctx = filt_src;
1713         outputs->pad_idx = 0;
1714         outputs->next    = NULL;
1715
1716         inputs->name    = av_strdup("out");
1717         inputs->filter_ctx = filt_out;
1718         inputs->pad_idx = 0;
1719         inputs->next    = NULL;
1720
1721         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1722             return ret;
1723     } else {
1724         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1725             return ret;
1726     }
1727
1728     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1729         return ret;
1730
1731     is->out_video_filter = filt_out;
1732
1733     return ret;
1734 }
1735
1736 #endif  /* CONFIG_AVFILTER */
1737
1738 static int video_thread(void *arg)
1739 {
1740     VideoState *is = arg;
1741     AVFrame *frame= avcodec_alloc_frame();
1742     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1743     double pts;
1744     int ret;
1745
1746 #if CONFIG_AVFILTER
1747     AVFilterGraph *graph = avfilter_graph_alloc();
1748     AVFilterContext *filt_out = NULL;
1749     int last_w = is->video_st->codec->width;
1750     int last_h = is->video_st->codec->height;
1751
1752     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1753         goto the_end;
1754     filt_out = is->out_video_filter;
1755 #endif
1756
1757     for(;;) {
1758 #if !CONFIG_AVFILTER
1759         AVPacket pkt;
1760 #else
1761         AVFilterBufferRef *picref;
1762         AVRational tb = filt_out->inputs[0]->time_base;
1763 #endif
1764         while (is->paused && !is->videoq.abort_request)
1765             SDL_Delay(10);
1766 #if CONFIG_AVFILTER
1767         if (   last_w != is->video_st->codec->width
1768             || last_h != is->video_st->codec->height) {
1769             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1770                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1771             avfilter_graph_free(&graph);
1772             graph = avfilter_graph_alloc();
1773             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1774                 goto the_end;
1775             filt_out = is->out_video_filter;
1776             last_w = is->video_st->codec->width;
1777             last_h = is->video_st->codec->height;
1778         }
1779         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1780         if (picref) {
1781             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1782             pts_int = picref->pts;
1783             pos     = picref->pos;
1784             frame->opaque = picref;
1785         }
1786
1787         if (av_cmp_q(tb, is->video_st->time_base)) {
1788             av_unused int64_t pts1 = pts_int;
1789             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1790             av_dlog(NULL, "video_thread(): "
1791                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1792                     tb.num, tb.den, pts1,
1793                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1794         }
1795 #else
1796         ret = get_video_frame(is, frame, &pts_int, &pkt);
1797         pos = pkt.pos;
1798         av_free_packet(&pkt);
1799 #endif
1800
1801         if (ret < 0) goto the_end;
1802
1803 #if CONFIG_AVFILTER
1804         if (!picref)
1805             continue;
1806 #endif
1807
1808         pts = pts_int*av_q2d(is->video_st->time_base);
1809
1810         ret = queue_picture(is, frame, pts, pos);
1811
1812         if (ret < 0)
1813             goto the_end;
1814
1815         if (is->step)
1816             stream_toggle_pause(is);
1817     }
1818  the_end:
1819 #if CONFIG_AVFILTER
1820     avfilter_graph_free(&graph);
1821 #endif
1822     av_free(frame);
1823     return 0;
1824 }
1825
1826 static int subtitle_thread(void *arg)
1827 {
1828     VideoState *is = arg;
1829     SubPicture *sp;
1830     AVPacket pkt1, *pkt = &pkt1;
1831     int got_subtitle;
1832     double pts;
1833     int i, j;
1834     int r, g, b, y, u, v, a;
1835
1836     for(;;) {
1837         while (is->paused && !is->subtitleq.abort_request) {
1838             SDL_Delay(10);
1839         }
1840         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1841             break;
1842
1843         if(pkt->data == flush_pkt.data){
1844             avcodec_flush_buffers(is->subtitle_st->codec);
1845             continue;
1846         }
1847         SDL_LockMutex(is->subpq_mutex);
1848         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1849                !is->subtitleq.abort_request) {
1850             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1851         }
1852         SDL_UnlockMutex(is->subpq_mutex);
1853
1854         if (is->subtitleq.abort_request)
1855             return 0;
1856
1857         sp = &is->subpq[is->subpq_windex];
1858
1859        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1860            this packet, if any */
1861         pts = 0;
1862         if (pkt->pts != AV_NOPTS_VALUE)
1863             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1864
1865         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1866                                  &got_subtitle, pkt);
1867
1868         if (got_subtitle && sp->sub.format == 0) {
1869             sp->pts = pts;
1870
1871             for (i = 0; i < sp->sub.num_rects; i++)
1872             {
1873                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1874                 {
1875                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1876                     y = RGB_TO_Y_CCIR(r, g, b);
1877                     u = RGB_TO_U_CCIR(r, g, b, 0);
1878                     v = RGB_TO_V_CCIR(r, g, b, 0);
1879                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1880                 }
1881             }
1882
1883             /* now we can update the picture count */
1884             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1885                 is->subpq_windex = 0;
1886             SDL_LockMutex(is->subpq_mutex);
1887             is->subpq_size++;
1888             SDL_UnlockMutex(is->subpq_mutex);
1889         }
1890         av_free_packet(pkt);
1891     }
1892     return 0;
1893 }
1894
1895 /* copy samples for viewing in editor window */
1896 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1897 {
1898     int size, len;
1899
1900     size = samples_size / sizeof(short);
1901     while (size > 0) {
1902         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1903         if (len > size)
1904             len = size;
1905         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1906         samples += len;
1907         is->sample_array_index += len;
1908         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1909             is->sample_array_index = 0;
1910         size -= len;
1911     }
1912 }
1913
1914 /* return the new audio buffer size (samples can be added or deleted
1915    to get better sync if video or external master clock) */
1916 static int synchronize_audio(VideoState *is, short *samples,
1917                              int samples_size1, double pts)
1918 {
1919     int n, samples_size;
1920     double ref_clock;
1921
1922     n = 2 * is->audio_st->codec->channels;
1923     samples_size = samples_size1;
1924
1925     /* if not master, then we try to remove or add samples to correct the clock */
1926     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1927          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1928         double diff, avg_diff;
1929         int wanted_size, min_size, max_size, nb_samples;
1930
1931         ref_clock = get_master_clock(is);
1932         diff = get_audio_clock(is) - ref_clock;
1933
1934         if (diff < AV_NOSYNC_THRESHOLD) {
1935             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1936             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1937                 /* not enough measures to have a correct estimate */
1938                 is->audio_diff_avg_count++;
1939             } else {
1940                 /* estimate the A-V difference */
1941                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1942
1943                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1944                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1945                     nb_samples = samples_size / n;
1946
1947                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1948                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1949                     if (wanted_size < min_size)
1950                         wanted_size = min_size;
1951                     else if (wanted_size > max_size)
1952                         wanted_size = max_size;
1953
1954                     /* add or remove samples to correction the synchro */
1955                     if (wanted_size < samples_size) {
1956                         /* remove samples */
1957                         samples_size = wanted_size;
1958                     } else if (wanted_size > samples_size) {
1959                         uint8_t *samples_end, *q;
1960                         int nb;
1961
1962                         /* add samples */
1963                         nb = (samples_size - wanted_size);
1964                         samples_end = (uint8_t *)samples + samples_size - n;
1965                         q = samples_end + n;
1966                         while (nb > 0) {
1967                             memcpy(q, samples_end, n);
1968                             q += n;
1969                             nb -= n;
1970                         }
1971                         samples_size = wanted_size;
1972                     }
1973                 }
1974                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1975                         diff, avg_diff, samples_size - samples_size1,
1976                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1977             }
1978         } else {
1979             /* too big difference : may be initial PTS errors, so
1980                reset A-V filter */
1981             is->audio_diff_avg_count = 0;
1982             is->audio_diff_cum = 0;
1983         }
1984     }
1985
1986     return samples_size;
1987 }
1988
1989 /* decode one audio frame and returns its uncompressed size */
1990 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1991 {
1992     AVPacket *pkt_temp = &is->audio_pkt_temp;
1993     AVPacket *pkt = &is->audio_pkt;
1994     AVCodecContext *dec= is->audio_st->codec;
1995     int n, len1, data_size;
1996     double pts;
1997
1998     for(;;) {
1999         /* NOTE: the audio packet can contain several frames */
2000         while (pkt_temp->size > 0) {
2001             data_size = sizeof(is->audio_buf1);
2002             len1 = avcodec_decode_audio3(dec,
2003                                         (int16_t *)is->audio_buf1, &data_size,
2004                                         pkt_temp);
2005             if (len1 < 0) {
2006                 /* if error, we skip the frame */
2007                 pkt_temp->size = 0;
2008                 break;
2009             }
2010
2011             pkt_temp->data += len1;
2012             pkt_temp->size -= len1;
2013             if (data_size <= 0)
2014                 continue;
2015
2016             if (dec->sample_fmt != is->audio_src_fmt) {
2017                 if (is->reformat_ctx)
2018                     av_audio_convert_free(is->reformat_ctx);
2019                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2020                                                          dec->sample_fmt, 1, NULL, 0);
2021                 if (!is->reformat_ctx) {
2022                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2023                         av_get_sample_fmt_name(dec->sample_fmt),
2024                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2025                         break;
2026                 }
2027                 is->audio_src_fmt= dec->sample_fmt;
2028             }
2029
2030             if (is->reformat_ctx) {
2031                 const void *ibuf[6]= {is->audio_buf1};
2032                 void *obuf[6]= {is->audio_buf2};
2033                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2034                 int ostride[6]= {2};
2035                 int len= data_size/istride[0];
2036                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2037                     printf("av_audio_convert() failed\n");
2038                     break;
2039                 }
2040                 is->audio_buf= is->audio_buf2;
2041                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2042                           remove this legacy cruft */
2043                 data_size= len*2;
2044             }else{
2045                 is->audio_buf= is->audio_buf1;
2046             }
2047
2048             /* if no pts, then compute it */
2049             pts = is->audio_clock;
2050             *pts_ptr = pts;
2051             n = 2 * dec->channels;
2052             is->audio_clock += (double)data_size /
2053                 (double)(n * dec->sample_rate);
2054 #ifdef DEBUG
2055             {
2056                 static double last_clock;
2057                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2058                        is->audio_clock - last_clock,
2059                        is->audio_clock, pts);
2060                 last_clock = is->audio_clock;
2061             }
2062 #endif
2063             return data_size;
2064         }
2065
2066         /* free the current packet */
2067         if (pkt->data)
2068             av_free_packet(pkt);
2069
2070         if (is->paused || is->audioq.abort_request) {
2071             return -1;
2072         }
2073
2074         /* read next packet */
2075         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2076             return -1;
2077         if(pkt->data == flush_pkt.data){
2078             avcodec_flush_buffers(dec);
2079             continue;
2080         }
2081
2082         pkt_temp->data = pkt->data;
2083         pkt_temp->size = pkt->size;
2084
2085         /* if update the audio clock with the pts */
2086         if (pkt->pts != AV_NOPTS_VALUE) {
2087             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2088         }
2089     }
2090 }
2091
2092 /* prepare a new audio buffer */
2093 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2094 {
2095     VideoState *is = opaque;
2096     int audio_size, len1;
2097     int bytes_per_sec;
2098     double pts;
2099
2100     audio_callback_time = av_gettime();
2101
2102     while (len > 0) {
2103         if (is->audio_buf_index >= is->audio_buf_size) {
2104            audio_size = audio_decode_frame(is, &pts);
2105            if (audio_size < 0) {
2106                 /* if error, just output silence */
2107                is->audio_buf = is->audio_buf1;
2108                is->audio_buf_size = 1024;
2109                memset(is->audio_buf, 0, is->audio_buf_size);
2110            } else {
2111                if (is->show_mode != SHOW_MODE_VIDEO)
2112                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2113                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2114                                               pts);
2115                is->audio_buf_size = audio_size;
2116            }
2117            is->audio_buf_index = 0;
2118         }
2119         len1 = is->audio_buf_size - is->audio_buf_index;
2120         if (len1 > len)
2121             len1 = len;
2122         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2123         len -= len1;
2124         stream += len1;
2125         is->audio_buf_index += len1;
2126     }
2127     bytes_per_sec = is->audio_st->codec->sample_rate *
2128             2 * is->audio_st->codec->channels;
2129     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2130     /* Let's assume the audio driver that is used by SDL has two periods. */
2131     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2132     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2133 }
2134
2135 /* open a given stream. Return 0 if OK */
2136 static int stream_component_open(VideoState *is, int stream_index)
2137 {
2138     AVFormatContext *ic = is->ic;
2139     AVCodecContext *avctx;
2140     AVCodec *codec;
2141     SDL_AudioSpec wanted_spec, spec;
2142     AVDictionary *opts;
2143     AVDictionaryEntry *t = NULL;
2144
2145     if (stream_index < 0 || stream_index >= ic->nb_streams)
2146         return -1;
2147     avctx = ic->streams[stream_index]->codec;
2148
2149     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2150
2151     /* prepare audio output */
2152     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2153         if (avctx->channels > 0) {
2154             avctx->request_channels = FFMIN(2, avctx->channels);
2155         } else {
2156             avctx->request_channels = 2;
2157         }
2158     }
2159
2160     codec = avcodec_find_decoder(avctx->codec_id);
2161     if (!codec)
2162         return -1;
2163
2164     avctx->workaround_bugs = workaround_bugs;
2165     avctx->lowres = lowres;
2166     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2167     avctx->idct_algo= idct;
2168     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2169     avctx->skip_frame= skip_frame;
2170     avctx->skip_idct= skip_idct;
2171     avctx->skip_loop_filter= skip_loop_filter;
2172     avctx->error_recognition= error_recognition;
2173     avctx->error_concealment= error_concealment;
2174
2175     if(codec->capabilities & CODEC_CAP_DR1)
2176         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2177
2178     if (!codec ||
2179         avcodec_open2(avctx, codec, &opts) < 0)
2180         return -1;
2181     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2182         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2183         return AVERROR_OPTION_NOT_FOUND;
2184     }
2185
2186     /* prepare audio output */
2187     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2188         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2189             fprintf(stderr, "Invalid sample rate or channel count\n");
2190             return -1;
2191         }
2192         wanted_spec.freq = avctx->sample_rate;
2193         wanted_spec.format = AUDIO_S16SYS;
2194         wanted_spec.channels = avctx->channels;
2195         wanted_spec.silence = 0;
2196         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2197         wanted_spec.callback = sdl_audio_callback;
2198         wanted_spec.userdata = is;
2199         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2200             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2201             return -1;
2202         }
2203         is->audio_hw_buf_size = spec.size;
2204         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2205     }
2206
2207     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2208     switch(avctx->codec_type) {
2209     case AVMEDIA_TYPE_AUDIO:
2210         is->audio_stream = stream_index;
2211         is->audio_st = ic->streams[stream_index];
2212         is->audio_buf_size = 0;
2213         is->audio_buf_index = 0;
2214
2215         /* init averaging filter */
2216         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2217         is->audio_diff_avg_count = 0;
2218         /* since we do not have a precise anough audio fifo fullness,
2219            we correct audio sync only if larger than this threshold */
2220         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2221
2222         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2223         packet_queue_init(&is->audioq);
2224         SDL_PauseAudio(0);
2225         break;
2226     case AVMEDIA_TYPE_VIDEO:
2227         is->video_stream = stream_index;
2228         is->video_st = ic->streams[stream_index];
2229
2230         packet_queue_init(&is->videoq);
2231         is->video_tid = SDL_CreateThread(video_thread, is);
2232         break;
2233     case AVMEDIA_TYPE_SUBTITLE:
2234         is->subtitle_stream = stream_index;
2235         is->subtitle_st = ic->streams[stream_index];
2236         packet_queue_init(&is->subtitleq);
2237
2238         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2239         break;
2240     default:
2241         break;
2242     }
2243     return 0;
2244 }
2245
2246 static void stream_component_close(VideoState *is, int stream_index)
2247 {
2248     AVFormatContext *ic = is->ic;
2249     AVCodecContext *avctx;
2250
2251     if (stream_index < 0 || stream_index >= ic->nb_streams)
2252         return;
2253     avctx = ic->streams[stream_index]->codec;
2254
2255     switch(avctx->codec_type) {
2256     case AVMEDIA_TYPE_AUDIO:
2257         packet_queue_abort(&is->audioq);
2258
2259         SDL_CloseAudio();
2260
2261         packet_queue_end(&is->audioq);
2262         if (is->reformat_ctx)
2263             av_audio_convert_free(is->reformat_ctx);
2264         is->reformat_ctx = NULL;
2265         break;
2266     case AVMEDIA_TYPE_VIDEO:
2267         packet_queue_abort(&is->videoq);
2268
2269         /* note: we also signal this mutex to make sure we deblock the
2270            video thread in all cases */
2271         SDL_LockMutex(is->pictq_mutex);
2272         SDL_CondSignal(is->pictq_cond);
2273         SDL_UnlockMutex(is->pictq_mutex);
2274
2275         SDL_WaitThread(is->video_tid, NULL);
2276
2277         packet_queue_end(&is->videoq);
2278         break;
2279     case AVMEDIA_TYPE_SUBTITLE:
2280         packet_queue_abort(&is->subtitleq);
2281
2282         /* note: we also signal this mutex to make sure we deblock the
2283            video thread in all cases */
2284         SDL_LockMutex(is->subpq_mutex);
2285         is->subtitle_stream_changed = 1;
2286
2287         SDL_CondSignal(is->subpq_cond);
2288         SDL_UnlockMutex(is->subpq_mutex);
2289
2290         SDL_WaitThread(is->subtitle_tid, NULL);
2291
2292         packet_queue_end(&is->subtitleq);
2293         break;
2294     default:
2295         break;
2296     }
2297
2298     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2299     avcodec_close(avctx);
2300     switch(avctx->codec_type) {
2301     case AVMEDIA_TYPE_AUDIO:
2302         is->audio_st = NULL;
2303         is->audio_stream = -1;
2304         break;
2305     case AVMEDIA_TYPE_VIDEO:
2306         is->video_st = NULL;
2307         is->video_stream = -1;
2308         break;
2309     case AVMEDIA_TYPE_SUBTITLE:
2310         is->subtitle_st = NULL;
2311         is->subtitle_stream = -1;
2312         break;
2313     default:
2314         break;
2315     }
2316 }
2317
2318 /* since we have only one decoding thread, we can use a global
2319    variable instead of a thread local variable */
2320 static VideoState *global_video_state;
2321
2322 static int decode_interrupt_cb(void)
2323 {
2324     return (global_video_state && global_video_state->abort_request);
2325 }
2326
2327 /* this thread gets the stream from the disk or the network */
2328 static int read_thread(void *arg)
2329 {
2330     VideoState *is = arg;
2331     AVFormatContext *ic = NULL;
2332     int err, i, ret;
2333     int st_index[AVMEDIA_TYPE_NB];
2334     AVPacket pkt1, *pkt = &pkt1;
2335     int eof=0;
2336     int pkt_in_play_range = 0;
2337     AVDictionaryEntry *t;
2338     AVDictionary **opts;
2339     int orig_nb_streams;
2340
2341     memset(st_index, -1, sizeof(st_index));
2342     is->video_stream = -1;
2343     is->audio_stream = -1;
2344     is->subtitle_stream = -1;
2345
2346     global_video_state = is;
2347     avio_set_interrupt_cb(decode_interrupt_cb);
2348
2349     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2350     if (err < 0) {
2351         print_error(is->filename, err);
2352         ret = -1;
2353         goto fail;
2354     }
2355     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2356         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2357         ret = AVERROR_OPTION_NOT_FOUND;
2358         goto fail;
2359     }
2360     is->ic = ic;
2361
2362     if(genpts)
2363         ic->flags |= AVFMT_FLAG_GENPTS;
2364
2365     opts = setup_find_stream_info_opts(ic, codec_opts);
2366     orig_nb_streams = ic->nb_streams;
2367
2368     err = avformat_find_stream_info(ic, opts);
2369     if (err < 0) {
2370         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2371         ret = -1;
2372         goto fail;
2373     }
2374     for (i = 0; i < orig_nb_streams; i++)
2375         av_dict_free(&opts[i]);
2376     av_freep(&opts);
2377
2378     if(ic->pb)
2379         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2380
2381     if(seek_by_bytes<0)
2382         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2383
2384     /* if seeking requested, we execute it */
2385     if (start_time != AV_NOPTS_VALUE) {
2386         int64_t timestamp;
2387
2388         timestamp = start_time;
2389         /* add the stream start time */
2390         if (ic->start_time != AV_NOPTS_VALUE)
2391             timestamp += ic->start_time;
2392         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2393         if (ret < 0) {
2394             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2395                     is->filename, (double)timestamp / AV_TIME_BASE);
2396         }
2397     }
2398
2399     for (i = 0; i < ic->nb_streams; i++)
2400         ic->streams[i]->discard = AVDISCARD_ALL;
2401     if (!video_disable)
2402         st_index[AVMEDIA_TYPE_VIDEO] =
2403             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2404                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2405     if (!audio_disable)
2406         st_index[AVMEDIA_TYPE_AUDIO] =
2407             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2408                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2409                                 st_index[AVMEDIA_TYPE_VIDEO],
2410                                 NULL, 0);
2411     if (!video_disable)
2412         st_index[AVMEDIA_TYPE_SUBTITLE] =
2413             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2414                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2415                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2416                                  st_index[AVMEDIA_TYPE_AUDIO] :
2417                                  st_index[AVMEDIA_TYPE_VIDEO]),
2418                                 NULL, 0);
2419     if (show_status) {
2420         av_dump_format(ic, 0, is->filename, 0);
2421     }
2422
2423     is->show_mode = show_mode;
2424
2425     /* open the streams */
2426     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2427         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2428     }
2429
2430     ret=-1;
2431     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2432         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2433     }
2434     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2435     if (is->show_mode == SHOW_MODE_NONE)
2436         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2437
2438     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2439         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2440     }
2441
2442     if (is->video_stream < 0 && is->audio_stream < 0) {
2443         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2444         ret = -1;
2445         goto fail;
2446     }
2447
2448     for(;;) {
2449         if (is->abort_request)
2450             break;
2451         if (is->paused != is->last_paused) {
2452             is->last_paused = is->paused;
2453             if (is->paused)
2454                 is->read_pause_return= av_read_pause(ic);
2455             else
2456                 av_read_play(ic);
2457         }
2458 #if CONFIG_RTSP_DEMUXER
2459         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2460             /* wait 10 ms to avoid trying to get another packet */
2461             /* XXX: horrible */
2462             SDL_Delay(10);
2463             continue;
2464         }
2465 #endif
2466         if (is->seek_req) {
2467             int64_t seek_target= is->seek_pos;
2468             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2469             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2470 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2471 //      of the seek_pos/seek_rel variables
2472
2473             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2474             if (ret < 0) {
2475                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2476             }else{
2477                 if (is->audio_stream >= 0) {
2478                     packet_queue_flush(&is->audioq);
2479                     packet_queue_put(&is->audioq, &flush_pkt);
2480                 }
2481                 if (is->subtitle_stream >= 0) {
2482                     packet_queue_flush(&is->subtitleq);
2483                     packet_queue_put(&is->subtitleq, &flush_pkt);
2484                 }
2485                 if (is->video_stream >= 0) {
2486                     packet_queue_flush(&is->videoq);
2487                     packet_queue_put(&is->videoq, &flush_pkt);
2488                 }
2489             }
2490             is->seek_req = 0;
2491             eof= 0;
2492         }
2493
2494         /* if the queue are full, no need to read more */
2495         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2496             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2497                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2498                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2499             /* wait 10 ms */
2500             SDL_Delay(10);
2501             continue;
2502         }
2503         if(eof) {
2504             if(is->video_stream >= 0){
2505                 av_init_packet(pkt);
2506                 pkt->data=NULL;
2507                 pkt->size=0;
2508                 pkt->stream_index= is->video_stream;
2509                 packet_queue_put(&is->videoq, pkt);
2510             }
2511             SDL_Delay(10);
2512             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2513                 if(loop!=1 && (!loop || --loop)){
2514                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2515                 }else if(autoexit){
2516                     ret=AVERROR_EOF;
2517                     goto fail;
2518                 }
2519             }
2520             eof=0;
2521             continue;
2522         }
2523         ret = av_read_frame(ic, pkt);
2524         if (ret < 0) {
2525             if (ret == AVERROR_EOF || url_feof(ic->pb))
2526                 eof=1;
2527             if (ic->pb && ic->pb->error)
2528                 break;
2529             SDL_Delay(100); /* wait for user event */
2530             continue;
2531         }
2532         /* check if packet is in play range specified by user, then queue, otherwise discard */
2533         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2534                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2535                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2536                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2537                 <= ((double)duration/1000000);
2538         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2539             packet_queue_put(&is->audioq, pkt);
2540         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2541             packet_queue_put(&is->videoq, pkt);
2542         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2543             packet_queue_put(&is->subtitleq, pkt);
2544         } else {
2545             av_free_packet(pkt);
2546         }
2547     }
2548     /* wait until the end */
2549     while (!is->abort_request) {
2550         SDL_Delay(100);
2551     }
2552
2553     ret = 0;
2554  fail:
2555     /* disable interrupting */
2556     global_video_state = NULL;
2557
2558     /* close each stream */
2559     if (is->audio_stream >= 0)
2560         stream_component_close(is, is->audio_stream);
2561     if (is->video_stream >= 0)
2562         stream_component_close(is, is->video_stream);
2563     if (is->subtitle_stream >= 0)
2564         stream_component_close(is, is->subtitle_stream);
2565     if (is->ic) {
2566         av_close_input_file(is->ic);
2567         is->ic = NULL; /* safety */
2568     }
2569     avio_set_interrupt_cb(NULL);
2570
2571     if (ret != 0) {
2572         SDL_Event event;
2573
2574         event.type = FF_QUIT_EVENT;
2575         event.user.data1 = is;
2576         SDL_PushEvent(&event);
2577     }
2578     return 0;
2579 }
2580
2581 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2582 {
2583     VideoState *is;
2584
2585     is = av_mallocz(sizeof(VideoState));
2586     if (!is)
2587         return NULL;
2588     av_strlcpy(is->filename, filename, sizeof(is->filename));
2589     is->iformat = iformat;
2590     is->ytop = 0;
2591     is->xleft = 0;
2592
2593     /* start video display */
2594     is->pictq_mutex = SDL_CreateMutex();
2595     is->pictq_cond = SDL_CreateCond();
2596
2597     is->subpq_mutex = SDL_CreateMutex();
2598     is->subpq_cond = SDL_CreateCond();
2599
2600     is->av_sync_type = av_sync_type;
2601     is->read_tid = SDL_CreateThread(read_thread, is);
2602     if (!is->read_tid) {
2603         av_free(is);
2604         return NULL;
2605     }
2606     return is;
2607 }
2608
2609 static void stream_cycle_channel(VideoState *is, int codec_type)
2610 {
2611     AVFormatContext *ic = is->ic;
2612     int start_index, stream_index;
2613     AVStream *st;
2614
2615     if (codec_type == AVMEDIA_TYPE_VIDEO)
2616         start_index = is->video_stream;
2617     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2618         start_index = is->audio_stream;
2619     else
2620         start_index = is->subtitle_stream;
2621     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2622         return;
2623     stream_index = start_index;
2624     for(;;) {
2625         if (++stream_index >= is->ic->nb_streams)
2626         {
2627             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2628             {
2629                 stream_index = -1;
2630                 goto the_end;
2631             } else
2632                 stream_index = 0;
2633         }
2634         if (stream_index == start_index)
2635             return;
2636         st = ic->streams[stream_index];
2637         if (st->codec->codec_type == codec_type) {
2638             /* check that parameters are OK */
2639             switch(codec_type) {
2640             case AVMEDIA_TYPE_AUDIO:
2641                 if (st->codec->sample_rate != 0 &&
2642                     st->codec->channels != 0)
2643                     goto the_end;
2644                 break;
2645             case AVMEDIA_TYPE_VIDEO:
2646             case AVMEDIA_TYPE_SUBTITLE:
2647                 goto the_end;
2648             default:
2649                 break;
2650             }
2651         }
2652     }
2653  the_end:
2654     stream_component_close(is, start_index);
2655     stream_component_open(is, stream_index);
2656 }
2657
2658
2659 static void toggle_full_screen(VideoState *is)
2660 {
2661     is_full_screen = !is_full_screen;
2662     video_open(is);
2663 }
2664
2665 static void toggle_pause(VideoState *is)
2666 {
2667     stream_toggle_pause(is);
2668     is->step = 0;
2669 }
2670
2671 static void step_to_next_frame(VideoState *is)
2672 {
2673     /* if the stream is paused unpause it, then step */
2674     if (is->paused)
2675         stream_toggle_pause(is);
2676     is->step = 1;
2677 }
2678
2679 static void toggle_audio_display(VideoState *is)
2680 {
2681     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2682     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2683     fill_rectangle(screen,
2684                 is->xleft, is->ytop, is->width, is->height,
2685                 bgcolor);
2686     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2687 }
2688
2689 /* handle an event sent by the GUI */
2690 static void event_loop(VideoState *cur_stream)
2691 {
2692     SDL_Event event;
2693     double incr, pos, frac;
2694
2695     for(;;) {
2696         double x;
2697         SDL_WaitEvent(&event);
2698         switch(event.type) {
2699         case SDL_KEYDOWN:
2700             if (exit_on_keydown) {
2701                 do_exit(cur_stream);
2702                 break;
2703             }
2704             switch(event.key.keysym.sym) {
2705             case SDLK_ESCAPE:
2706             case SDLK_q:
2707                 do_exit(cur_stream);
2708                 break;
2709             case SDLK_f:
2710                 toggle_full_screen(cur_stream);
2711                 break;
2712             case SDLK_p:
2713             case SDLK_SPACE:
2714                 toggle_pause(cur_stream);
2715                 break;
2716             case SDLK_s: //S: Step to next frame
2717                 step_to_next_frame(cur_stream);
2718                 break;
2719             case SDLK_a:
2720                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2721                 break;
2722             case SDLK_v:
2723                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2724                 break;
2725             case SDLK_t:
2726                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2727                 break;
2728             case SDLK_w:
2729                 toggle_audio_display(cur_stream);
2730                 break;
2731             case SDLK_LEFT:
2732                 incr = -10.0;
2733                 goto do_seek;
2734             case SDLK_RIGHT:
2735                 incr = 10.0;
2736                 goto do_seek;
2737             case SDLK_UP:
2738                 incr = 60.0;
2739                 goto do_seek;
2740             case SDLK_DOWN:
2741                 incr = -60.0;
2742             do_seek:
2743                 if (seek_by_bytes) {
2744                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2745                         pos= cur_stream->video_current_pos;
2746                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2747                         pos= cur_stream->audio_pkt.pos;
2748                     }else
2749                         pos = avio_tell(cur_stream->ic->pb);
2750                     if (cur_stream->ic->bit_rate)
2751                         incr *= cur_stream->ic->bit_rate / 8.0;
2752                     else
2753                         incr *= 180000.0;
2754                     pos += incr;
2755                     stream_seek(cur_stream, pos, incr, 1);
2756                 } else {
2757                     pos = get_master_clock(cur_stream);
2758                     pos += incr;
2759                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2760                 }
2761                 break;
2762             default:
2763                 break;
2764             }
2765             break;
2766         case SDL_MOUSEBUTTONDOWN:
2767             if (exit_on_mousedown) {
2768                 do_exit(cur_stream);
2769                 break;
2770             }
2771         case SDL_MOUSEMOTION:
2772             if(event.type ==SDL_MOUSEBUTTONDOWN){
2773                 x= event.button.x;
2774             }else{
2775                 if(event.motion.state != SDL_PRESSED)
2776                     break;
2777                 x= event.motion.x;
2778             }
2779             if(seek_by_bytes || cur_stream->ic->duration<=0){
2780                 uint64_t size=  avio_size(cur_stream->ic->pb);
2781                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2782             }else{
2783                 int64_t ts;
2784                 int ns, hh, mm, ss;
2785                 int tns, thh, tmm, tss;
2786                 tns = cur_stream->ic->duration/1000000LL;
2787                 thh = tns/3600;
2788                 tmm = (tns%3600)/60;
2789                 tss = (tns%60);
2790                 frac = x/cur_stream->width;
2791                 ns = frac*tns;
2792                 hh = ns/3600;
2793                 mm = (ns%3600)/60;
2794                 ss = (ns%60);
2795                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2796                         hh, mm, ss, thh, tmm, tss);
2797                 ts = frac*cur_stream->ic->duration;
2798                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2799                     ts += cur_stream->ic->start_time;
2800                 stream_seek(cur_stream, ts, 0, 0);
2801             }
2802             break;
2803         case SDL_VIDEORESIZE:
2804             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2805                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2806             screen_width = cur_stream->width = event.resize.w;
2807             screen_height= cur_stream->height= event.resize.h;
2808             break;
2809         case SDL_QUIT:
2810         case FF_QUIT_EVENT:
2811             do_exit(cur_stream);
2812             break;
2813         case FF_ALLOC_EVENT:
2814             video_open(event.user.data1);
2815             alloc_picture(event.user.data1);
2816             break;
2817         case FF_REFRESH_EVENT:
2818             video_refresh(event.user.data1);
2819             cur_stream->refresh=0;
2820             break;
2821         default:
2822             break;
2823         }
2824     }
2825 }
2826
2827 static int opt_frame_size(const char *opt, const char *arg)
2828 {
2829     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2830     return opt_default("video_size", arg);
2831 }
2832
2833 static int opt_width(const char *opt, const char *arg)
2834 {
2835     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2836     return 0;
2837 }
2838
2839 static int opt_height(const char *opt, const char *arg)
2840 {
2841     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2842     return 0;
2843 }
2844
2845 static int opt_format(const char *opt, const char *arg)
2846 {
2847     file_iformat = av_find_input_format(arg);
2848     if (!file_iformat) {
2849         fprintf(stderr, "Unknown input format: %s\n", arg);
2850         return AVERROR(EINVAL);
2851     }
2852     return 0;
2853 }
2854
2855 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2856 {
2857     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2858     return opt_default("pixel_format", arg);
2859 }
2860
2861 static int opt_sync(const char *opt, const char *arg)
2862 {
2863     if (!strcmp(arg, "audio"))
2864         av_sync_type = AV_SYNC_AUDIO_MASTER;
2865     else if (!strcmp(arg, "video"))
2866         av_sync_type = AV_SYNC_VIDEO_MASTER;
2867     else if (!strcmp(arg, "ext"))
2868         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2869     else {
2870         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2871         exit(1);
2872     }
2873     return 0;
2874 }
2875
2876 static int opt_seek(const char *opt, const char *arg)
2877 {
2878     start_time = parse_time_or_die(opt, arg, 1);
2879     return 0;
2880 }
2881
2882 static int opt_duration(const char *opt, const char *arg)
2883 {
2884     duration = parse_time_or_die(opt, arg, 1);
2885     return 0;
2886 }
2887
2888 static int opt_show_mode(const char *opt, const char *arg)
2889 {
2890     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2891                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2892                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2893                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2894     return 0;
2895 }
2896
2897 static void opt_input_file(void *optctx, const char *filename)
2898 {
2899     if (input_filename) {
2900         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2901                 filename, input_filename);
2902         exit_program(1);
2903     }
2904     if (!strcmp(filename, "-"))
2905         filename = "pipe:";
2906     input_filename = filename;
2907 }
2908
2909 static int dummy;
2910
2911 static const OptionDef options[] = {
2912 #include "cmdutils_common_opts.h"
2913     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2914     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2915     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2916     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2917     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2918     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2919     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2920     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2921     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2922     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2923     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2924     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2925     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2926     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2927     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2928     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2929     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2930     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2931     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2932     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2933     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2934     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2935     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2936     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2937     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2938     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2939     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2940     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2941     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2942     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2943     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2944     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2945     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2946     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2947 #if CONFIG_AVFILTER
2948     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2949 #endif
2950     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2951     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2952     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2953     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
2954     { NULL, },
2955 };
2956
2957 static void show_usage(void)
2958 {
2959     printf("Simple media player\n");
2960     printf("usage: %s [options] input_file\n", program_name);
2961     printf("\n");
2962 }
2963
2964 static int opt_help(const char *opt, const char *arg)
2965 {
2966     const AVClass *class;
2967     av_log_set_callback(log_callback_help);
2968     show_usage();
2969     show_help_options(options, "Main options:\n",
2970                       OPT_EXPERT, 0);
2971     show_help_options(options, "\nAdvanced options:\n",
2972                       OPT_EXPERT, OPT_EXPERT);
2973     printf("\n");
2974     class = avcodec_get_class();
2975     av_opt_show2(&class, NULL,
2976                  AV_OPT_FLAG_DECODING_PARAM, 0);
2977     printf("\n");
2978     class = avformat_get_class();
2979     av_opt_show2(&class, NULL,
2980                  AV_OPT_FLAG_DECODING_PARAM, 0);
2981 #if !CONFIG_AVFILTER
2982     printf("\n");
2983     class = sws_get_class();
2984     av_opt_show2(&class, NULL,
2985                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2986 #endif
2987     printf("\nWhile playing:\n"
2988            "q, ESC              quit\n"
2989            "f                   toggle full screen\n"
2990            "p, SPC              pause\n"
2991            "a                   cycle audio channel\n"
2992            "v                   cycle video channel\n"
2993            "t                   cycle subtitle channel\n"
2994            "w                   show audio waves\n"
2995            "s                   activate frame-step mode\n"
2996            "left/right          seek backward/forward 10 seconds\n"
2997            "down/up             seek backward/forward 1 minute\n"
2998            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2999            );
3000     return 0;
3001 }
3002
3003 static int lockmgr(void **mtx, enum AVLockOp op)
3004 {
3005    switch(op) {
3006       case AV_LOCK_CREATE:
3007           *mtx = SDL_CreateMutex();
3008           if(!*mtx)
3009               return 1;
3010           return 0;
3011       case AV_LOCK_OBTAIN:
3012           return !!SDL_LockMutex(*mtx);
3013       case AV_LOCK_RELEASE:
3014           return !!SDL_UnlockMutex(*mtx);
3015       case AV_LOCK_DESTROY:
3016           SDL_DestroyMutex(*mtx);
3017           return 0;
3018    }
3019    return 1;
3020 }
3021
3022 /* Called from the main */
3023 int main(int argc, char **argv)
3024 {
3025     int flags;
3026     VideoState *is;
3027
3028     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3029
3030     /* register all codecs, demux and protocols */
3031     avcodec_register_all();
3032 #if CONFIG_AVDEVICE
3033     avdevice_register_all();
3034 #endif
3035 #if CONFIG_AVFILTER
3036     avfilter_register_all();
3037 #endif
3038     av_register_all();
3039
3040     init_opts();
3041
3042     show_banner();
3043
3044     parse_options(NULL, argc, argv, options, opt_input_file);
3045
3046     if (!input_filename) {
3047         show_usage();
3048         fprintf(stderr, "An input file must be specified\n");
3049         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3050         exit(1);
3051     }
3052
3053     if (display_disable) {
3054         video_disable = 1;
3055     }
3056     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3057     if (audio_disable)
3058         flags &= ~SDL_INIT_AUDIO;
3059 #if !defined(__MINGW32__) && !defined(__APPLE__)
3060     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3061 #endif
3062     if (SDL_Init (flags)) {
3063         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3064         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3065         exit(1);
3066     }
3067
3068     if (!display_disable) {
3069 #if HAVE_SDL_VIDEO_SIZE
3070         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3071         fs_screen_width = vi->current_w;
3072         fs_screen_height = vi->current_h;
3073 #endif
3074     }
3075
3076     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3077     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3078     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3079
3080     if (av_lockmgr_register(lockmgr)) {
3081         fprintf(stderr, "Could not initialize lock manager!\n");
3082         do_exit(NULL);
3083     }
3084
3085     av_init_packet(&flush_pkt);
3086     flush_pkt.data= "FLUSH";
3087
3088     is = stream_open(input_filename, file_iformat);
3089     if (!is) {
3090         fprintf(stderr, "Failed to initialize VideoState!\n");
3091         do_exit(NULL);
3092     }
3093
3094     event_loop(is);
3095
3096     /* never returns */
3097
3098     return 0;
3099 }