]> git.sesse.net Git - ffmpeg/blob - ffplay.c
a365b1c94a8e1897b6c94432c74c733d07fd3b71
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavcodec/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 #endif
46
47 #include "cmdutils.h"
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #ifdef __MINGW32__
53 #undef main /* We don't want SDL to override our main() */
54 #endif
55
56 #include <unistd.h>
57 #include <assert.h>
58
59 const char program_name[] = "FFplay";
60 const int program_birth_year = 2003;
61
62 //#define DEBUG
63 //#define DEBUG_SYNC
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2*65536)
88
89 static int sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;                                  ///<presentation time stamp for this picture
105     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106     int64_t pos;                                 ///<byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *parse_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143     int dtg_active_format;
144
145     int audio_stream;
146
147     int av_sync_type;
148     double external_clock; /* external clock base */
149     int64_t external_clock_time;
150
151     double audio_clock;
152     double audio_diff_cum; /* used for AV difference average computation */
153     double audio_diff_avg_coef;
154     double audio_diff_threshold;
155     int audio_diff_avg_count;
156     AVStream *audio_st;
157     PacketQueue audioq;
158     int audio_hw_buf_size;
159     /* samples output by the codec. we reserve more space for avsync
160        compensation */
161     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
163     uint8_t *audio_buf;
164     unsigned int audio_buf_size; /* in bytes */
165     int audio_buf_index; /* in bytes */
166     AVPacket audio_pkt_temp;
167     AVPacket audio_pkt;
168     enum AVSampleFormat audio_src_fmt;
169     AVAudioConvert *reformat_ctx;
170
171     int show_audio; /* if true, display audio samples */
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     //    QETimer *video_timer;
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
288 {
289     AVPacketList *pkt1;
290
291     /* duplicate the packet */
292     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
293         return -1;
294
295     pkt1 = av_malloc(sizeof(AVPacketList));
296     if (!pkt1)
297         return -1;
298     pkt1->pkt = *pkt;
299     pkt1->next = NULL;
300
301
302     SDL_LockMutex(q->mutex);
303
304     if (!q->last_pkt)
305
306         q->first_pkt = pkt1;
307     else
308         q->last_pkt->next = pkt1;
309     q->last_pkt = pkt1;
310     q->nb_packets++;
311     q->size += pkt1->pkt.size + sizeof(*pkt1);
312     /* XXX: should duplicate packet data in DV case */
313     SDL_CondSignal(q->cond);
314
315     SDL_UnlockMutex(q->mutex);
316     return 0;
317 }
318
319 /* packet queue handling */
320 static void packet_queue_init(PacketQueue *q)
321 {
322     memset(q, 0, sizeof(PacketQueue));
323     q->mutex = SDL_CreateMutex();
324     q->cond = SDL_CreateCond();
325     packet_queue_put(q, &flush_pkt);
326 }
327
328 static void packet_queue_flush(PacketQueue *q)
329 {
330     AVPacketList *pkt, *pkt1;
331
332     SDL_LockMutex(q->mutex);
333     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
334         pkt1 = pkt->next;
335         av_free_packet(&pkt->pkt);
336         av_freep(&pkt);
337     }
338     q->last_pkt = NULL;
339     q->first_pkt = NULL;
340     q->nb_packets = 0;
341     q->size = 0;
342     SDL_UnlockMutex(q->mutex);
343 }
344
345 static void packet_queue_end(PacketQueue *q)
346 {
347     packet_queue_flush(q);
348     SDL_DestroyMutex(q->mutex);
349     SDL_DestroyCond(q->cond);
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414     int w1, w2, h1, h2;
415
416     /* fill the background */
417     w1 = x;
418     if (w1 < 0)
419         w1 = 0;
420     w2 = s->width - (x + w);
421     if (w2 < 0)
422         w2 = 0;
423     h1 = y;
424     if (h1 < 0)
425         h1 = 0;
426     h2 = s->height - (y + h);
427     if (h2 < 0)
428         h2 = 0;
429     fill_rectangle(screen,
430                    s->xleft, s->ytop,
431                    w1, s->height,
432                    color);
433     fill_rectangle(screen,
434                    s->xleft + s->width - w2, s->ytop,
435                    w2, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + w1, s->ytop,
439                    s->width - w1 - w2, h1,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop + s->height - h2,
443                    s->width - w1 - w2, h2,
444                    color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694 #if CONFIG_AVFILTER
695          if (vp->picref->video->pixel_aspect.num == 0)
696              aspect_ratio = 0;
697          else
698              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
699 #else
700
701         /* XXX: use variable in the frame */
702         if (is->video_st->sample_aspect_ratio.num)
703             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
704         else if (is->video_st->codec->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
706         else
707             aspect_ratio = 0;
708 #endif
709         if (aspect_ratio <= 0.0)
710             aspect_ratio = 1.0;
711         aspect_ratio *= (float)vp->width / (float)vp->height;
712
713         if (is->subtitle_st)
714         {
715             if (is->subpq_size > 0)
716             {
717                 sp = &is->subpq[is->subpq_rindex];
718
719                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
720                 {
721                     SDL_LockYUVOverlay (vp->bmp);
722
723                     pict.data[0] = vp->bmp->pixels[0];
724                     pict.data[1] = vp->bmp->pixels[2];
725                     pict.data[2] = vp->bmp->pixels[1];
726
727                     pict.linesize[0] = vp->bmp->pitches[0];
728                     pict.linesize[1] = vp->bmp->pitches[2];
729                     pict.linesize[2] = vp->bmp->pitches[1];
730
731                     for (i = 0; i < sp->sub.num_rects; i++)
732                         blend_subrect(&pict, sp->sub.rects[i],
733                                       vp->bmp->w, vp->bmp->h);
734
735                     SDL_UnlockYUVOverlay (vp->bmp);
736                 }
737             }
738         }
739
740
741         /* XXX: we suppose the screen has a 1.0 pixel ratio */
742         height = is->height;
743         width = ((int)rint(height * aspect_ratio)) & ~1;
744         if (width > is->width) {
745             width = is->width;
746             height = ((int)rint(width / aspect_ratio)) & ~1;
747         }
748         x = (is->width - width) / 2;
749         y = (is->height - height) / 2;
750         if (!is->no_background) {
751             /* fill the background */
752             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
753         } else {
754             is->no_background = 0;
755         }
756         rect.x = is->xleft + x;
757         rect.y = is->ytop  + y;
758         rect.w = width;
759         rect.h = height;
760         SDL_DisplayYUVOverlay(vp->bmp, &rect);
761     } else {
762 #if 0
763         fill_rectangle(screen,
764                        is->xleft, is->ytop, is->width, is->height,
765                        QERGB(0x00, 0x00, 0x00));
766 #endif
767     }
768 }
769
770 static inline int compute_mod(int a, int b)
771 {
772     return a < 0 ? a%b + b : a%b;
773 }
774
775 static void video_audio_display(VideoState *s)
776 {
777     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
778     int ch, channels, h, h2, bgcolor, fgcolor;
779     int16_t time_diff;
780     int rdft_bits, nb_freq;
781
782     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
783         ;
784     nb_freq= 1<<(rdft_bits-1);
785
786     /* compute display index : center on currently output samples */
787     channels = s->audio_st->codec->channels;
788     nb_display_channels = channels;
789     if (!s->paused) {
790         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
791         n = 2 * channels;
792         delay = audio_write_get_buf_size(s);
793         delay /= n;
794
795         /* to be more precise, we take into account the time spent since
796            the last buffer computation */
797         if (audio_callback_time) {
798             time_diff = av_gettime() - audio_callback_time;
799             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
800         }
801
802         delay += 2*data_used;
803         if (delay < data_used)
804             delay = data_used;
805
806         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
807         if(s->show_audio==1){
808             h= INT_MIN;
809             for(i=0; i<1000; i+=channels){
810                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
811                 int a= s->sample_array[idx];
812                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
813                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
814                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
815                 int score= a-d;
816                 if(h<score && (b^c)<0){
817                     h= score;
818                     i_start= idx;
819                 }
820             }
821         }
822
823         s->last_i_start = i_start;
824     } else {
825         i_start = s->last_i_start;
826     }
827
828     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
829     if(s->show_audio==1){
830         fill_rectangle(screen,
831                        s->xleft, s->ytop, s->width, s->height,
832                        bgcolor);
833
834         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
835
836         /* total height for one channel */
837         h = s->height / nb_display_channels;
838         /* graph height / 2 */
839         h2 = (h * 9) / 20;
840         for(ch = 0;ch < nb_display_channels; ch++) {
841             i = i_start + ch;
842             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
843             for(x = 0; x < s->width; x++) {
844                 y = (s->sample_array[i] * h2) >> 15;
845                 if (y < 0) {
846                     y = -y;
847                     ys = y1 - y;
848                 } else {
849                     ys = y1;
850                 }
851                 fill_rectangle(screen,
852                                s->xleft + x, ys, 1, y,
853                                fgcolor);
854                 i += channels;
855                 if (i >= SAMPLE_ARRAY_SIZE)
856                     i -= SAMPLE_ARRAY_SIZE;
857             }
858         }
859
860         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
861
862         for(ch = 1;ch < nb_display_channels; ch++) {
863             y = s->ytop + ch * h;
864             fill_rectangle(screen,
865                            s->xleft, y, s->width, 1,
866                            fgcolor);
867         }
868         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
869     }else{
870         nb_display_channels= FFMIN(nb_display_channels, 2);
871         if(rdft_bits != s->rdft_bits){
872             av_rdft_end(s->rdft);
873             av_free(s->rdft_data);
874             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
875             s->rdft_bits= rdft_bits;
876             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
877         }
878         {
879             FFTSample *data[2];
880             for(ch = 0;ch < nb_display_channels; ch++) {
881                 data[ch] = s->rdft_data + 2*nb_freq*ch;
882                 i = i_start + ch;
883                 for(x = 0; x < 2*nb_freq; x++) {
884                     double w= (x-nb_freq)*(1.0/nb_freq);
885                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
886                     i += channels;
887                     if (i >= SAMPLE_ARRAY_SIZE)
888                         i -= SAMPLE_ARRAY_SIZE;
889                 }
890                 av_rdft_calc(s->rdft, data[ch]);
891             }
892             //least efficient way to do this, we should of course directly access it but its more than fast enough
893             for(y=0; y<s->height; y++){
894                 double w= 1/sqrt(nb_freq);
895                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
896                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
897                        + data[1][2*y+1]*data[1][2*y+1])) : a;
898                 a= FFMIN(a,255);
899                 b= FFMIN(b,255);
900                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
901
902                 fill_rectangle(screen,
903                             s->xpos, s->height-y, 1, 1,
904                             fgcolor);
905             }
906         }
907         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
908         s->xpos++;
909         if(s->xpos >= s->width)
910             s->xpos= s->xleft;
911     }
912 }
913
914 static int video_open(VideoState *is){
915     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
916     int w,h;
917
918     if(is_full_screen) flags |= SDL_FULLSCREEN;
919     else               flags |= SDL_RESIZABLE;
920
921     if (is_full_screen && fs_screen_width) {
922         w = fs_screen_width;
923         h = fs_screen_height;
924     } else if(!is_full_screen && screen_width){
925         w = screen_width;
926         h = screen_height;
927 #if CONFIG_AVFILTER
928     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
929         w = is->out_video_filter->inputs[0]->w;
930         h = is->out_video_filter->inputs[0]->h;
931 #else
932     }else if (is->video_st && is->video_st->codec->width){
933         w = is->video_st->codec->width;
934         h = is->video_st->codec->height;
935 #endif
936     } else {
937         w = 640;
938         h = 480;
939     }
940     if(screen && is->width == screen->w && screen->w == w
941        && is->height== screen->h && screen->h == h)
942         return 0;
943
944 #ifndef __APPLE__
945     screen = SDL_SetVideoMode(w, h, 0, flags);
946 #else
947     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
948     screen = SDL_SetVideoMode(w, h, 24, flags);
949 #endif
950     if (!screen) {
951         fprintf(stderr, "SDL: could not set video mode - exiting\n");
952         return -1;
953     }
954     if (!window_title)
955         window_title = input_filename;
956     SDL_WM_SetCaption(window_title, window_title);
957
958     is->width = screen->w;
959     is->height = screen->h;
960
961     return 0;
962 }
963
964 /* display the current picture, if any */
965 static void video_display(VideoState *is)
966 {
967     if(!screen)
968         video_open(cur_stream);
969     if (is->audio_st && is->show_audio)
970         video_audio_display(is);
971     else if (is->video_st)
972         video_image_display(is);
973 }
974
975 static int refresh_thread(void *opaque)
976 {
977     VideoState *is= opaque;
978     while(!is->abort_request){
979         SDL_Event event;
980         event.type = FF_REFRESH_EVENT;
981         event.user.data1 = opaque;
982         if(!is->refresh){
983             is->refresh=1;
984             SDL_PushEvent(&event);
985         }
986         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
987     }
988     return 0;
989 }
990
991 /* get the current audio clock value */
992 static double get_audio_clock(VideoState *is)
993 {
994     double pts;
995     int hw_buf_size, bytes_per_sec;
996     pts = is->audio_clock;
997     hw_buf_size = audio_write_get_buf_size(is);
998     bytes_per_sec = 0;
999     if (is->audio_st) {
1000         bytes_per_sec = is->audio_st->codec->sample_rate *
1001             2 * is->audio_st->codec->channels;
1002     }
1003     if (bytes_per_sec)
1004         pts -= (double)hw_buf_size / bytes_per_sec;
1005     return pts;
1006 }
1007
1008 /* get the current video clock value */
1009 static double get_video_clock(VideoState *is)
1010 {
1011     if (is->paused) {
1012         return is->video_current_pts;
1013     } else {
1014         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1015     }
1016 }
1017
1018 /* get the current external clock value */
1019 static double get_external_clock(VideoState *is)
1020 {
1021     int64_t ti;
1022     ti = av_gettime();
1023     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1024 }
1025
1026 /* get the current master clock value */
1027 static double get_master_clock(VideoState *is)
1028 {
1029     double val;
1030
1031     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1032         if (is->video_st)
1033             val = get_video_clock(is);
1034         else
1035             val = get_audio_clock(is);
1036     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1037         if (is->audio_st)
1038             val = get_audio_clock(is);
1039         else
1040             val = get_video_clock(is);
1041     } else {
1042         val = get_external_clock(is);
1043     }
1044     return val;
1045 }
1046
1047 /* seek in the stream */
1048 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1049 {
1050     if (!is->seek_req) {
1051         is->seek_pos = pos;
1052         is->seek_rel = rel;
1053         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1054         if (seek_by_bytes)
1055             is->seek_flags |= AVSEEK_FLAG_BYTE;
1056         is->seek_req = 1;
1057     }
1058 }
1059
1060 /* pause or resume the video */
1061 static void stream_pause(VideoState *is)
1062 {
1063     if (is->paused) {
1064         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1065         if(is->read_pause_return != AVERROR(ENOSYS)){
1066             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1067         }
1068         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1069     }
1070     is->paused = !is->paused;
1071 }
1072
1073 static double compute_target_time(double frame_current_pts, VideoState *is)
1074 {
1075     double delay, sync_threshold, diff;
1076
1077     /* compute nominal delay */
1078     delay = frame_current_pts - is->frame_last_pts;
1079     if (delay <= 0 || delay >= 10.0) {
1080         /* if incorrect delay, use previous one */
1081         delay = is->frame_last_delay;
1082     } else {
1083         is->frame_last_delay = delay;
1084     }
1085     is->frame_last_pts = frame_current_pts;
1086
1087     /* update delay to follow master synchronisation source */
1088     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1089          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1090         /* if video is slave, we try to correct big delays by
1091            duplicating or deleting a frame */
1092         diff = get_video_clock(is) - get_master_clock(is);
1093
1094         /* skip or repeat frame. We take into account the
1095            delay to compute the threshold. I still don't know
1096            if it is the best guess */
1097         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1098         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1099             if (diff <= -sync_threshold)
1100                 delay = 0;
1101             else if (diff >= sync_threshold)
1102                 delay = 2 * delay;
1103         }
1104     }
1105     is->frame_timer += delay;
1106 #if defined(DEBUG_SYNC)
1107     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1108             delay, actual_delay, frame_current_pts, -diff);
1109 #endif
1110
1111     return is->frame_timer;
1112 }
1113
1114 /* called to display each frame */
1115 static void video_refresh_timer(void *opaque)
1116 {
1117     VideoState *is = opaque;
1118     VideoPicture *vp;
1119
1120     SubPicture *sp, *sp2;
1121
1122     if (is->video_st) {
1123 retry:
1124         if (is->pictq_size == 0) {
1125             //nothing to do, no picture to display in the que
1126         } else {
1127             double time= av_gettime()/1000000.0;
1128             double next_target;
1129             /* dequeue the picture */
1130             vp = &is->pictq[is->pictq_rindex];
1131
1132             if(time < vp->target_clock)
1133                 return;
1134             /* update current video pts */
1135             is->video_current_pts = vp->pts;
1136             is->video_current_pts_drift = is->video_current_pts - time;
1137             is->video_current_pos = vp->pos;
1138             if(is->pictq_size > 1){
1139                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1140                 assert(nextvp->target_clock >= vp->target_clock);
1141                 next_target= nextvp->target_clock;
1142             }else{
1143                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1144             }
1145             if(framedrop && time > next_target){
1146                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1147                 if(is->pictq_size > 1 || time > next_target + 0.5){
1148                     /* update queue size and signal for next picture */
1149                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1150                         is->pictq_rindex = 0;
1151
1152                     SDL_LockMutex(is->pictq_mutex);
1153                     is->pictq_size--;
1154                     SDL_CondSignal(is->pictq_cond);
1155                     SDL_UnlockMutex(is->pictq_mutex);
1156                     goto retry;
1157                 }
1158             }
1159
1160             if(is->subtitle_st) {
1161                 if (is->subtitle_stream_changed) {
1162                     SDL_LockMutex(is->subpq_mutex);
1163
1164                     while (is->subpq_size) {
1165                         free_subpicture(&is->subpq[is->subpq_rindex]);
1166
1167                         /* update queue size and signal for next picture */
1168                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1169                             is->subpq_rindex = 0;
1170
1171                         is->subpq_size--;
1172                     }
1173                     is->subtitle_stream_changed = 0;
1174
1175                     SDL_CondSignal(is->subpq_cond);
1176                     SDL_UnlockMutex(is->subpq_mutex);
1177                 } else {
1178                     if (is->subpq_size > 0) {
1179                         sp = &is->subpq[is->subpq_rindex];
1180
1181                         if (is->subpq_size > 1)
1182                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1183                         else
1184                             sp2 = NULL;
1185
1186                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1187                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1188                         {
1189                             free_subpicture(sp);
1190
1191                             /* update queue size and signal for next picture */
1192                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1193                                 is->subpq_rindex = 0;
1194
1195                             SDL_LockMutex(is->subpq_mutex);
1196                             is->subpq_size--;
1197                             SDL_CondSignal(is->subpq_cond);
1198                             SDL_UnlockMutex(is->subpq_mutex);
1199                         }
1200                     }
1201                 }
1202             }
1203
1204             /* display picture */
1205             if (!display_disable)
1206                 video_display(is);
1207
1208             /* update queue size and signal for next picture */
1209             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1210                 is->pictq_rindex = 0;
1211
1212             SDL_LockMutex(is->pictq_mutex);
1213             is->pictq_size--;
1214             SDL_CondSignal(is->pictq_cond);
1215             SDL_UnlockMutex(is->pictq_mutex);
1216         }
1217     } else if (is->audio_st) {
1218         /* draw the next audio frame */
1219
1220         /* if only audio stream, then display the audio bars (better
1221            than nothing, just to test the implementation */
1222
1223         /* display picture */
1224         if (!display_disable)
1225             video_display(is);
1226     }
1227     if (show_status) {
1228         static int64_t last_time;
1229         int64_t cur_time;
1230         int aqsize, vqsize, sqsize;
1231         double av_diff;
1232
1233         cur_time = av_gettime();
1234         if (!last_time || (cur_time - last_time) >= 30000) {
1235             aqsize = 0;
1236             vqsize = 0;
1237             sqsize = 0;
1238             if (is->audio_st)
1239                 aqsize = is->audioq.size;
1240             if (is->video_st)
1241                 vqsize = is->videoq.size;
1242             if (is->subtitle_st)
1243                 sqsize = is->subtitleq.size;
1244             av_diff = 0;
1245             if (is->audio_st && is->video_st)
1246                 av_diff = get_audio_clock(is) - get_video_clock(is);
1247             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1248                    get_master_clock(is),
1249                    av_diff,
1250                    FFMAX(is->skip_frames-1, 0),
1251                    aqsize / 1024,
1252                    vqsize / 1024,
1253                    sqsize,
1254                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1255                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1256             fflush(stdout);
1257             last_time = cur_time;
1258         }
1259     }
1260 }
1261
1262 static void stream_close(VideoState *is)
1263 {
1264     VideoPicture *vp;
1265     int i;
1266     /* XXX: use a special url_shutdown call to abort parse cleanly */
1267     is->abort_request = 1;
1268     SDL_WaitThread(is->parse_tid, NULL);
1269     SDL_WaitThread(is->refresh_tid, NULL);
1270
1271     /* free all pictures */
1272     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1273         vp = &is->pictq[i];
1274 #if CONFIG_AVFILTER
1275         if (vp->picref) {
1276             avfilter_unref_buffer(vp->picref);
1277             vp->picref = NULL;
1278         }
1279 #endif
1280         if (vp->bmp) {
1281             SDL_FreeYUVOverlay(vp->bmp);
1282             vp->bmp = NULL;
1283         }
1284     }
1285     SDL_DestroyMutex(is->pictq_mutex);
1286     SDL_DestroyCond(is->pictq_cond);
1287     SDL_DestroyMutex(is->subpq_mutex);
1288     SDL_DestroyCond(is->subpq_cond);
1289 #if !CONFIG_AVFILTER
1290     if (is->img_convert_ctx)
1291         sws_freeContext(is->img_convert_ctx);
1292 #endif
1293     av_free(is);
1294 }
1295
1296 static void do_exit(void)
1297 {
1298     if (cur_stream) {
1299         stream_close(cur_stream);
1300         cur_stream = NULL;
1301     }
1302     uninit_opts();
1303 #if CONFIG_AVFILTER
1304     avfilter_uninit();
1305 #endif
1306     if (show_status)
1307         printf("\n");
1308     SDL_Quit();
1309     av_log(NULL, AV_LOG_QUIET, "");
1310     exit(0);
1311 }
1312
1313 /* allocate a picture (needs to do that in main thread to avoid
1314    potential locking problems */
1315 static void alloc_picture(void *opaque)
1316 {
1317     VideoState *is = opaque;
1318     VideoPicture *vp;
1319
1320     vp = &is->pictq[is->pictq_windex];
1321
1322     if (vp->bmp)
1323         SDL_FreeYUVOverlay(vp->bmp);
1324
1325 #if CONFIG_AVFILTER
1326     if (vp->picref)
1327         avfilter_unref_buffer(vp->picref);
1328     vp->picref = NULL;
1329
1330     vp->width   = is->out_video_filter->inputs[0]->w;
1331     vp->height  = is->out_video_filter->inputs[0]->h;
1332     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1333 #else
1334     vp->width   = is->video_st->codec->width;
1335     vp->height  = is->video_st->codec->height;
1336     vp->pix_fmt = is->video_st->codec->pix_fmt;
1337 #endif
1338
1339     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1340                                    SDL_YV12_OVERLAY,
1341                                    screen);
1342     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1343         /* SDL allocates a buffer smaller than requested if the video
1344          * overlay hardware is unable to support the requested size. */
1345         fprintf(stderr, "Error: the video system does not support an image\n"
1346                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1347                         "to reduce the image size.\n", vp->width, vp->height );
1348         do_exit();
1349     }
1350
1351     SDL_LockMutex(is->pictq_mutex);
1352     vp->allocated = 1;
1353     SDL_CondSignal(is->pictq_cond);
1354     SDL_UnlockMutex(is->pictq_mutex);
1355 }
1356
1357 /**
1358  *
1359  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1360  */
1361 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1362 {
1363     VideoPicture *vp;
1364     int dst_pix_fmt;
1365 #if CONFIG_AVFILTER
1366     AVPicture pict_src;
1367 #endif
1368     /* wait until we have space to put a new picture */
1369     SDL_LockMutex(is->pictq_mutex);
1370
1371     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1372         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1373
1374     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1375            !is->videoq.abort_request) {
1376         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1377     }
1378     SDL_UnlockMutex(is->pictq_mutex);
1379
1380     if (is->videoq.abort_request)
1381         return -1;
1382
1383     vp = &is->pictq[is->pictq_windex];
1384
1385     /* alloc or resize hardware picture buffer */
1386     if (!vp->bmp ||
1387 #if CONFIG_AVFILTER
1388         vp->width  != is->out_video_filter->inputs[0]->w ||
1389         vp->height != is->out_video_filter->inputs[0]->h) {
1390 #else
1391         vp->width != is->video_st->codec->width ||
1392         vp->height != is->video_st->codec->height) {
1393 #endif
1394         SDL_Event event;
1395
1396         vp->allocated = 0;
1397
1398         /* the allocation must be done in the main thread to avoid
1399            locking problems */
1400         event.type = FF_ALLOC_EVENT;
1401         event.user.data1 = is;
1402         SDL_PushEvent(&event);
1403
1404         /* wait until the picture is allocated */
1405         SDL_LockMutex(is->pictq_mutex);
1406         while (!vp->allocated && !is->videoq.abort_request) {
1407             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408         }
1409         SDL_UnlockMutex(is->pictq_mutex);
1410
1411         if (is->videoq.abort_request)
1412             return -1;
1413     }
1414
1415     /* if the frame is not skipped, then display it */
1416     if (vp->bmp) {
1417         AVPicture pict;
1418 #if CONFIG_AVFILTER
1419         if(vp->picref)
1420             avfilter_unref_buffer(vp->picref);
1421         vp->picref = src_frame->opaque;
1422 #endif
1423
1424         /* get a pointer on the bitmap */
1425         SDL_LockYUVOverlay (vp->bmp);
1426
1427         dst_pix_fmt = PIX_FMT_YUV420P;
1428         memset(&pict,0,sizeof(AVPicture));
1429         pict.data[0] = vp->bmp->pixels[0];
1430         pict.data[1] = vp->bmp->pixels[2];
1431         pict.data[2] = vp->bmp->pixels[1];
1432
1433         pict.linesize[0] = vp->bmp->pitches[0];
1434         pict.linesize[1] = vp->bmp->pitches[2];
1435         pict.linesize[2] = vp->bmp->pitches[1];
1436
1437 #if CONFIG_AVFILTER
1438         pict_src.data[0] = src_frame->data[0];
1439         pict_src.data[1] = src_frame->data[1];
1440         pict_src.data[2] = src_frame->data[2];
1441
1442         pict_src.linesize[0] = src_frame->linesize[0];
1443         pict_src.linesize[1] = src_frame->linesize[1];
1444         pict_src.linesize[2] = src_frame->linesize[2];
1445
1446         //FIXME use direct rendering
1447         av_picture_copy(&pict, &pict_src,
1448                         vp->pix_fmt, vp->width, vp->height);
1449 #else
1450         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1451         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1452             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1453             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1454         if (is->img_convert_ctx == NULL) {
1455             fprintf(stderr, "Cannot initialize the conversion context\n");
1456             exit(1);
1457         }
1458         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1459                   0, vp->height, pict.data, pict.linesize);
1460 #endif
1461         /* update the bitmap content */
1462         SDL_UnlockYUVOverlay(vp->bmp);
1463
1464         vp->pts = pts;
1465         vp->pos = pos;
1466
1467         /* now we can update the picture count */
1468         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1469             is->pictq_windex = 0;
1470         SDL_LockMutex(is->pictq_mutex);
1471         vp->target_clock= compute_target_time(vp->pts, is);
1472
1473         is->pictq_size++;
1474         SDL_UnlockMutex(is->pictq_mutex);
1475     }
1476     return 0;
1477 }
1478
1479 /**
1480  * compute the exact PTS for the picture if it is omitted in the stream
1481  * @param pts1 the dts of the pkt / pts of the frame
1482  */
1483 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1484 {
1485     double frame_delay, pts;
1486
1487     pts = pts1;
1488
1489     if (pts != 0) {
1490         /* update video clock with pts, if present */
1491         is->video_clock = pts;
1492     } else {
1493         pts = is->video_clock;
1494     }
1495     /* update video clock for next frame */
1496     frame_delay = av_q2d(is->video_st->codec->time_base);
1497     /* for MPEG2, the frame can be repeated, so we update the
1498        clock accordingly */
1499     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1500     is->video_clock += frame_delay;
1501
1502 #if defined(DEBUG_SYNC) && 0
1503     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1504            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1505 #endif
1506     return queue_picture(is, src_frame, pts, pos);
1507 }
1508
1509 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1510 {
1511     int len1, got_picture, i;
1512
1513     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1514         return -1;
1515
1516     if (pkt->data == flush_pkt.data) {
1517         avcodec_flush_buffers(is->video_st->codec);
1518
1519         SDL_LockMutex(is->pictq_mutex);
1520         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1521         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1522             is->pictq[i].target_clock= 0;
1523         }
1524         while (is->pictq_size && !is->videoq.abort_request) {
1525             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1526         }
1527         is->video_current_pos = -1;
1528         SDL_UnlockMutex(is->pictq_mutex);
1529
1530         is->frame_last_pts = AV_NOPTS_VALUE;
1531         is->frame_last_delay = 0;
1532         is->frame_timer = (double)av_gettime() / 1000000.0;
1533         is->skip_frames = 1;
1534         is->skip_frames_index = 0;
1535         return 0;
1536     }
1537
1538     len1 = avcodec_decode_video2(is->video_st->codec,
1539                                  frame, &got_picture,
1540                                  pkt);
1541
1542     if (got_picture) {
1543         if (decoder_reorder_pts == -1) {
1544             *pts = frame->best_effort_timestamp;
1545         } else if (decoder_reorder_pts) {
1546             *pts = frame->pkt_pts;
1547         } else {
1548             *pts = frame->pkt_dts;
1549         }
1550
1551         if (*pts == AV_NOPTS_VALUE) {
1552             *pts = 0;
1553         }
1554
1555         is->skip_frames_index += 1;
1556         if(is->skip_frames_index >= is->skip_frames){
1557             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1558             return 1;
1559         }
1560
1561     }
1562     return 0;
1563 }
1564
1565 #if CONFIG_AVFILTER
1566 typedef struct {
1567     VideoState *is;
1568     AVFrame *frame;
1569     int use_dr1;
1570 } FilterPriv;
1571
1572 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1573 {
1574     AVFilterContext *ctx = codec->opaque;
1575     AVFilterBufferRef  *ref;
1576     int perms = AV_PERM_WRITE;
1577     int i, w, h, stride[4];
1578     unsigned edge;
1579     int pixel_size;
1580
1581     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1582
1583     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1584         perms |= AV_PERM_NEG_LINESIZES;
1585
1586     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1587         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1588         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1589         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1590     }
1591     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1592
1593     w = codec->width;
1594     h = codec->height;
1595
1596     if(av_image_check_size(w, h, 0, codec))
1597         return -1;
1598
1599     avcodec_align_dimensions2(codec, &w, &h, stride);
1600     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1601     w += edge << 1;
1602     h += edge << 1;
1603
1604     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1605         return -1;
1606
1607     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1608     ref->video->w = codec->width;
1609     ref->video->h = codec->height;
1610     for(i = 0; i < 4; i ++) {
1611         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1612         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1613
1614         if (ref->data[i]) {
1615             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1616         }
1617         pic->data[i]     = ref->data[i];
1618         pic->linesize[i] = ref->linesize[i];
1619     }
1620     pic->opaque = ref;
1621     pic->age    = INT_MAX;
1622     pic->type   = FF_BUFFER_TYPE_USER;
1623     pic->reordered_opaque = codec->reordered_opaque;
1624     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1625     else           pic->pkt_pts = AV_NOPTS_VALUE;
1626     return 0;
1627 }
1628
1629 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1630 {
1631     memset(pic->data, 0, sizeof(pic->data));
1632     avfilter_unref_buffer(pic->opaque);
1633 }
1634
1635 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1636 {
1637     AVFilterBufferRef *ref = pic->opaque;
1638
1639     if (pic->data[0] == NULL) {
1640         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1641         return codec->get_buffer(codec, pic);
1642     }
1643
1644     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1645         (codec->pix_fmt != ref->format)) {
1646         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1647         return -1;
1648     }
1649
1650     pic->reordered_opaque = codec->reordered_opaque;
1651     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1652     else           pic->pkt_pts = AV_NOPTS_VALUE;
1653     return 0;
1654 }
1655
1656 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1657 {
1658     FilterPriv *priv = ctx->priv;
1659     AVCodecContext *codec;
1660     if(!opaque) return -1;
1661
1662     priv->is = opaque;
1663     codec    = priv->is->video_st->codec;
1664     codec->opaque = ctx;
1665     if((codec->codec->capabilities & CODEC_CAP_DR1)
1666     ) {
1667         codec->flags |= CODEC_FLAG_EMU_EDGE;
1668         priv->use_dr1 = 1;
1669         codec->get_buffer     = input_get_buffer;
1670         codec->release_buffer = input_release_buffer;
1671         codec->reget_buffer   = input_reget_buffer;
1672         codec->thread_safe_callbacks = 1;
1673     }
1674
1675     priv->frame = avcodec_alloc_frame();
1676
1677     return 0;
1678 }
1679
1680 static void input_uninit(AVFilterContext *ctx)
1681 {
1682     FilterPriv *priv = ctx->priv;
1683     av_free(priv->frame);
1684 }
1685
1686 static int input_request_frame(AVFilterLink *link)
1687 {
1688     FilterPriv *priv = link->src->priv;
1689     AVFilterBufferRef *picref;
1690     int64_t pts = 0;
1691     AVPacket pkt;
1692     int ret;
1693
1694     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1695         av_free_packet(&pkt);
1696     if (ret < 0)
1697         return -1;
1698
1699     if(priv->use_dr1) {
1700         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1701     } else {
1702         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1703         av_image_copy(picref->data, picref->linesize,
1704                       priv->frame->data, priv->frame->linesize,
1705                       picref->format, link->w, link->h);
1706     }
1707     av_free_packet(&pkt);
1708
1709     picref->pts = pts;
1710     picref->pos = pkt.pos;
1711     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1712     avfilter_start_frame(link, picref);
1713     avfilter_draw_slice(link, 0, link->h, 1);
1714     avfilter_end_frame(link);
1715
1716     return 0;
1717 }
1718
1719 static int input_query_formats(AVFilterContext *ctx)
1720 {
1721     FilterPriv *priv = ctx->priv;
1722     enum PixelFormat pix_fmts[] = {
1723         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1724     };
1725
1726     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1727     return 0;
1728 }
1729
1730 static int input_config_props(AVFilterLink *link)
1731 {
1732     FilterPriv *priv  = link->src->priv;
1733     AVCodecContext *c = priv->is->video_st->codec;
1734
1735     link->w = c->width;
1736     link->h = c->height;
1737     link->time_base = priv->is->video_st->time_base;
1738
1739     return 0;
1740 }
1741
1742 static AVFilter input_filter =
1743 {
1744     .name      = "ffplay_input",
1745
1746     .priv_size = sizeof(FilterPriv),
1747
1748     .init      = input_init,
1749     .uninit    = input_uninit,
1750
1751     .query_formats = input_query_formats,
1752
1753     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1754     .outputs   = (AVFilterPad[]) {{ .name = "default",
1755                                     .type = AVMEDIA_TYPE_VIDEO,
1756                                     .request_frame = input_request_frame,
1757                                     .config_props  = input_config_props, },
1758                                   { .name = NULL }},
1759 };
1760
1761 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1762 {
1763     char sws_flags_str[128];
1764     int ret;
1765     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1766     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1767     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1768     graph->scale_sws_opts = av_strdup(sws_flags_str);
1769
1770     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1771                                             NULL, is, graph)) < 0)
1772         goto the_end;
1773     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1774                                             NULL, &ffsink_ctx, graph)) < 0)
1775         goto the_end;
1776
1777     if(vfilters) {
1778         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1779         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1780
1781         outputs->name    = av_strdup("in");
1782         outputs->filter_ctx = filt_src;
1783         outputs->pad_idx = 0;
1784         outputs->next    = NULL;
1785
1786         inputs->name    = av_strdup("out");
1787         inputs->filter_ctx = filt_out;
1788         inputs->pad_idx = 0;
1789         inputs->next    = NULL;
1790
1791         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1792             goto the_end;
1793         av_freep(&vfilters);
1794     } else {
1795         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1796             goto the_end;
1797     }
1798
1799     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1800         goto the_end;
1801
1802     is->out_video_filter = filt_out;
1803 the_end:
1804     return ret;
1805 }
1806
1807 #endif  /* CONFIG_AVFILTER */
1808
1809 static int video_thread(void *arg)
1810 {
1811     VideoState *is = arg;
1812     AVFrame *frame= avcodec_alloc_frame();
1813     int64_t pts_int;
1814     double pts;
1815     int ret;
1816
1817 #if CONFIG_AVFILTER
1818     AVFilterGraph *graph = avfilter_graph_alloc();
1819     AVFilterContext *filt_out = NULL;
1820     int64_t pos;
1821
1822     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1823         goto the_end;
1824     filt_out = is->out_video_filter;
1825 #endif
1826
1827     for(;;) {
1828 #if !CONFIG_AVFILTER
1829         AVPacket pkt;
1830 #else
1831         AVFilterBufferRef *picref;
1832         AVRational tb;
1833 #endif
1834         while (is->paused && !is->videoq.abort_request)
1835             SDL_Delay(10);
1836 #if CONFIG_AVFILTER
1837         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1838         if (picref) {
1839             pts_int = picref->pts;
1840             pos     = picref->pos;
1841             frame->opaque = picref;
1842         }
1843
1844         if (av_cmp_q(tb, is->video_st->time_base)) {
1845             av_unused int64_t pts1 = pts_int;
1846             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1847             av_dlog(NULL, "video_thread(): "
1848                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1849                     tb.num, tb.den, pts1,
1850                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1851         }
1852 #else
1853         ret = get_video_frame(is, frame, &pts_int, &pkt);
1854 #endif
1855
1856         if (ret < 0) goto the_end;
1857
1858         if (!ret)
1859             continue;
1860
1861         pts = pts_int*av_q2d(is->video_st->time_base);
1862
1863 #if CONFIG_AVFILTER
1864         ret = output_picture2(is, frame, pts, pos);
1865 #else
1866         ret = output_picture2(is, frame, pts,  pkt.pos);
1867         av_free_packet(&pkt);
1868 #endif
1869         if (ret < 0)
1870             goto the_end;
1871
1872         if (step)
1873             if (cur_stream)
1874                 stream_pause(cur_stream);
1875     }
1876  the_end:
1877 #if CONFIG_AVFILTER
1878     avfilter_graph_free(&graph);
1879 #endif
1880     av_free(frame);
1881     return 0;
1882 }
1883
1884 static int subtitle_thread(void *arg)
1885 {
1886     VideoState *is = arg;
1887     SubPicture *sp;
1888     AVPacket pkt1, *pkt = &pkt1;
1889     int len1, got_subtitle;
1890     double pts;
1891     int i, j;
1892     int r, g, b, y, u, v, a;
1893
1894     for(;;) {
1895         while (is->paused && !is->subtitleq.abort_request) {
1896             SDL_Delay(10);
1897         }
1898         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1899             break;
1900
1901         if(pkt->data == flush_pkt.data){
1902             avcodec_flush_buffers(is->subtitle_st->codec);
1903             continue;
1904         }
1905         SDL_LockMutex(is->subpq_mutex);
1906         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1907                !is->subtitleq.abort_request) {
1908             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1909         }
1910         SDL_UnlockMutex(is->subpq_mutex);
1911
1912         if (is->subtitleq.abort_request)
1913             goto the_end;
1914
1915         sp = &is->subpq[is->subpq_windex];
1916
1917        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1918            this packet, if any */
1919         pts = 0;
1920         if (pkt->pts != AV_NOPTS_VALUE)
1921             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1922
1923         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1924                                     &sp->sub, &got_subtitle,
1925                                     pkt);
1926 //            if (len1 < 0)
1927 //                break;
1928         if (got_subtitle && sp->sub.format == 0) {
1929             sp->pts = pts;
1930
1931             for (i = 0; i < sp->sub.num_rects; i++)
1932             {
1933                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1934                 {
1935                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1936                     y = RGB_TO_Y_CCIR(r, g, b);
1937                     u = RGB_TO_U_CCIR(r, g, b, 0);
1938                     v = RGB_TO_V_CCIR(r, g, b, 0);
1939                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1940                 }
1941             }
1942
1943             /* now we can update the picture count */
1944             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1945                 is->subpq_windex = 0;
1946             SDL_LockMutex(is->subpq_mutex);
1947             is->subpq_size++;
1948             SDL_UnlockMutex(is->subpq_mutex);
1949         }
1950         av_free_packet(pkt);
1951 //        if (step)
1952 //            if (cur_stream)
1953 //                stream_pause(cur_stream);
1954     }
1955  the_end:
1956     return 0;
1957 }
1958
1959 /* copy samples for viewing in editor window */
1960 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1961 {
1962     int size, len, channels;
1963
1964     channels = is->audio_st->codec->channels;
1965
1966     size = samples_size / sizeof(short);
1967     while (size > 0) {
1968         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1969         if (len > size)
1970             len = size;
1971         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1972         samples += len;
1973         is->sample_array_index += len;
1974         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1975             is->sample_array_index = 0;
1976         size -= len;
1977     }
1978 }
1979
1980 /* return the new audio buffer size (samples can be added or deleted
1981    to get better sync if video or external master clock) */
1982 static int synchronize_audio(VideoState *is, short *samples,
1983                              int samples_size1, double pts)
1984 {
1985     int n, samples_size;
1986     double ref_clock;
1987
1988     n = 2 * is->audio_st->codec->channels;
1989     samples_size = samples_size1;
1990
1991     /* if not master, then we try to remove or add samples to correct the clock */
1992     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1993          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1994         double diff, avg_diff;
1995         int wanted_size, min_size, max_size, nb_samples;
1996
1997         ref_clock = get_master_clock(is);
1998         diff = get_audio_clock(is) - ref_clock;
1999
2000         if (diff < AV_NOSYNC_THRESHOLD) {
2001             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2002             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2003                 /* not enough measures to have a correct estimate */
2004                 is->audio_diff_avg_count++;
2005             } else {
2006                 /* estimate the A-V difference */
2007                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2008
2009                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2010                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2011                     nb_samples = samples_size / n;
2012
2013                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2014                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2015                     if (wanted_size < min_size)
2016                         wanted_size = min_size;
2017                     else if (wanted_size > max_size)
2018                         wanted_size = max_size;
2019
2020                     /* add or remove samples to correction the synchro */
2021                     if (wanted_size < samples_size) {
2022                         /* remove samples */
2023                         samples_size = wanted_size;
2024                     } else if (wanted_size > samples_size) {
2025                         uint8_t *samples_end, *q;
2026                         int nb;
2027
2028                         /* add samples */
2029                         nb = (samples_size - wanted_size);
2030                         samples_end = (uint8_t *)samples + samples_size - n;
2031                         q = samples_end + n;
2032                         while (nb > 0) {
2033                             memcpy(q, samples_end, n);
2034                             q += n;
2035                             nb -= n;
2036                         }
2037                         samples_size = wanted_size;
2038                     }
2039                 }
2040 #if 0
2041                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2042                        diff, avg_diff, samples_size - samples_size1,
2043                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2044 #endif
2045             }
2046         } else {
2047             /* too big difference : may be initial PTS errors, so
2048                reset A-V filter */
2049             is->audio_diff_avg_count = 0;
2050             is->audio_diff_cum = 0;
2051         }
2052     }
2053
2054     return samples_size;
2055 }
2056
2057 /* decode one audio frame and returns its uncompressed size */
2058 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2059 {
2060     AVPacket *pkt_temp = &is->audio_pkt_temp;
2061     AVPacket *pkt = &is->audio_pkt;
2062     AVCodecContext *dec= is->audio_st->codec;
2063     int n, len1, data_size;
2064     double pts;
2065
2066     for(;;) {
2067         /* NOTE: the audio packet can contain several frames */
2068         while (pkt_temp->size > 0) {
2069             data_size = sizeof(is->audio_buf1);
2070             len1 = avcodec_decode_audio3(dec,
2071                                         (int16_t *)is->audio_buf1, &data_size,
2072                                         pkt_temp);
2073             if (len1 < 0) {
2074                 /* if error, we skip the frame */
2075                 pkt_temp->size = 0;
2076                 break;
2077             }
2078
2079             pkt_temp->data += len1;
2080             pkt_temp->size -= len1;
2081             if (data_size <= 0)
2082                 continue;
2083
2084             if (dec->sample_fmt != is->audio_src_fmt) {
2085                 if (is->reformat_ctx)
2086                     av_audio_convert_free(is->reformat_ctx);
2087                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2088                                                          dec->sample_fmt, 1, NULL, 0);
2089                 if (!is->reformat_ctx) {
2090                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2091                         av_get_sample_fmt_name(dec->sample_fmt),
2092                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2093                         break;
2094                 }
2095                 is->audio_src_fmt= dec->sample_fmt;
2096             }
2097
2098             if (is->reformat_ctx) {
2099                 const void *ibuf[6]= {is->audio_buf1};
2100                 void *obuf[6]= {is->audio_buf2};
2101                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2102                 int ostride[6]= {2};
2103                 int len= data_size/istride[0];
2104                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2105                     printf("av_audio_convert() failed\n");
2106                     break;
2107                 }
2108                 is->audio_buf= is->audio_buf2;
2109                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2110                           remove this legacy cruft */
2111                 data_size= len*2;
2112             }else{
2113                 is->audio_buf= is->audio_buf1;
2114             }
2115
2116             /* if no pts, then compute it */
2117             pts = is->audio_clock;
2118             *pts_ptr = pts;
2119             n = 2 * dec->channels;
2120             is->audio_clock += (double)data_size /
2121                 (double)(n * dec->sample_rate);
2122 #if defined(DEBUG_SYNC)
2123             {
2124                 static double last_clock;
2125                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2126                        is->audio_clock - last_clock,
2127                        is->audio_clock, pts);
2128                 last_clock = is->audio_clock;
2129             }
2130 #endif
2131             return data_size;
2132         }
2133
2134         /* free the current packet */
2135         if (pkt->data)
2136             av_free_packet(pkt);
2137
2138         if (is->paused || is->audioq.abort_request) {
2139             return -1;
2140         }
2141
2142         /* read next packet */
2143         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2144             return -1;
2145         if(pkt->data == flush_pkt.data){
2146             avcodec_flush_buffers(dec);
2147             continue;
2148         }
2149
2150         pkt_temp->data = pkt->data;
2151         pkt_temp->size = pkt->size;
2152
2153         /* if update the audio clock with the pts */
2154         if (pkt->pts != AV_NOPTS_VALUE) {
2155             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2156         }
2157     }
2158 }
2159
2160 /* get the current audio output buffer size, in samples. With SDL, we
2161    cannot have a precise information */
2162 static int audio_write_get_buf_size(VideoState *is)
2163 {
2164     return is->audio_buf_size - is->audio_buf_index;
2165 }
2166
2167
2168 /* prepare a new audio buffer */
2169 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2170 {
2171     VideoState *is = opaque;
2172     int audio_size, len1;
2173     double pts;
2174
2175     audio_callback_time = av_gettime();
2176
2177     while (len > 0) {
2178         if (is->audio_buf_index >= is->audio_buf_size) {
2179            audio_size = audio_decode_frame(is, &pts);
2180            if (audio_size < 0) {
2181                 /* if error, just output silence */
2182                is->audio_buf = is->audio_buf1;
2183                is->audio_buf_size = 1024;
2184                memset(is->audio_buf, 0, is->audio_buf_size);
2185            } else {
2186                if (is->show_audio)
2187                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2188                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2189                                               pts);
2190                is->audio_buf_size = audio_size;
2191            }
2192            is->audio_buf_index = 0;
2193         }
2194         len1 = is->audio_buf_size - is->audio_buf_index;
2195         if (len1 > len)
2196             len1 = len;
2197         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2198         len -= len1;
2199         stream += len1;
2200         is->audio_buf_index += len1;
2201     }
2202 }
2203
2204 /* open a given stream. Return 0 if OK */
2205 static int stream_component_open(VideoState *is, int stream_index)
2206 {
2207     AVFormatContext *ic = is->ic;
2208     AVCodecContext *avctx;
2209     AVCodec *codec;
2210     SDL_AudioSpec wanted_spec, spec;
2211
2212     if (stream_index < 0 || stream_index >= ic->nb_streams)
2213         return -1;
2214     avctx = ic->streams[stream_index]->codec;
2215
2216     /* prepare audio output */
2217     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2218         if (avctx->channels > 0) {
2219             avctx->request_channels = FFMIN(2, avctx->channels);
2220         } else {
2221             avctx->request_channels = 2;
2222         }
2223     }
2224
2225     codec = avcodec_find_decoder(avctx->codec_id);
2226     avctx->debug_mv = debug_mv;
2227     avctx->debug = debug;
2228     avctx->workaround_bugs = workaround_bugs;
2229     avctx->lowres = lowres;
2230     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2231     avctx->idct_algo= idct;
2232     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2233     avctx->skip_frame= skip_frame;
2234     avctx->skip_idct= skip_idct;
2235     avctx->skip_loop_filter= skip_loop_filter;
2236     avctx->error_recognition= error_recognition;
2237     avctx->error_concealment= error_concealment;
2238     avctx->thread_count= thread_count;
2239
2240     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2241
2242     if (!codec ||
2243         avcodec_open(avctx, codec) < 0)
2244         return -1;
2245
2246     /* prepare audio output */
2247     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2248         wanted_spec.freq = avctx->sample_rate;
2249         wanted_spec.format = AUDIO_S16SYS;
2250         wanted_spec.channels = avctx->channels;
2251         wanted_spec.silence = 0;
2252         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2253         wanted_spec.callback = sdl_audio_callback;
2254         wanted_spec.userdata = is;
2255         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2256             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2257             return -1;
2258         }
2259         is->audio_hw_buf_size = spec.size;
2260         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2261     }
2262
2263     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2264     switch(avctx->codec_type) {
2265     case AVMEDIA_TYPE_AUDIO:
2266         is->audio_stream = stream_index;
2267         is->audio_st = ic->streams[stream_index];
2268         is->audio_buf_size = 0;
2269         is->audio_buf_index = 0;
2270
2271         /* init averaging filter */
2272         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2273         is->audio_diff_avg_count = 0;
2274         /* since we do not have a precise anough audio fifo fullness,
2275            we correct audio sync only if larger than this threshold */
2276         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2277
2278         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2279         packet_queue_init(&is->audioq);
2280         SDL_PauseAudio(0);
2281         break;
2282     case AVMEDIA_TYPE_VIDEO:
2283         is->video_stream = stream_index;
2284         is->video_st = ic->streams[stream_index];
2285
2286 //        is->video_current_pts_time = av_gettime();
2287
2288         packet_queue_init(&is->videoq);
2289         is->video_tid = SDL_CreateThread(video_thread, is);
2290         break;
2291     case AVMEDIA_TYPE_SUBTITLE:
2292         is->subtitle_stream = stream_index;
2293         is->subtitle_st = ic->streams[stream_index];
2294         packet_queue_init(&is->subtitleq);
2295
2296         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2297         break;
2298     default:
2299         break;
2300     }
2301     return 0;
2302 }
2303
2304 static void stream_component_close(VideoState *is, int stream_index)
2305 {
2306     AVFormatContext *ic = is->ic;
2307     AVCodecContext *avctx;
2308
2309     if (stream_index < 0 || stream_index >= ic->nb_streams)
2310         return;
2311     avctx = ic->streams[stream_index]->codec;
2312
2313     switch(avctx->codec_type) {
2314     case AVMEDIA_TYPE_AUDIO:
2315         packet_queue_abort(&is->audioq);
2316
2317         SDL_CloseAudio();
2318
2319         packet_queue_end(&is->audioq);
2320         if (is->reformat_ctx)
2321             av_audio_convert_free(is->reformat_ctx);
2322         is->reformat_ctx = NULL;
2323         break;
2324     case AVMEDIA_TYPE_VIDEO:
2325         packet_queue_abort(&is->videoq);
2326
2327         /* note: we also signal this mutex to make sure we deblock the
2328            video thread in all cases */
2329         SDL_LockMutex(is->pictq_mutex);
2330         SDL_CondSignal(is->pictq_cond);
2331         SDL_UnlockMutex(is->pictq_mutex);
2332
2333         SDL_WaitThread(is->video_tid, NULL);
2334
2335         packet_queue_end(&is->videoq);
2336         break;
2337     case AVMEDIA_TYPE_SUBTITLE:
2338         packet_queue_abort(&is->subtitleq);
2339
2340         /* note: we also signal this mutex to make sure we deblock the
2341            video thread in all cases */
2342         SDL_LockMutex(is->subpq_mutex);
2343         is->subtitle_stream_changed = 1;
2344
2345         SDL_CondSignal(is->subpq_cond);
2346         SDL_UnlockMutex(is->subpq_mutex);
2347
2348         SDL_WaitThread(is->subtitle_tid, NULL);
2349
2350         packet_queue_end(&is->subtitleq);
2351         break;
2352     default:
2353         break;
2354     }
2355
2356     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2357     avcodec_close(avctx);
2358     switch(avctx->codec_type) {
2359     case AVMEDIA_TYPE_AUDIO:
2360         is->audio_st = NULL;
2361         is->audio_stream = -1;
2362         break;
2363     case AVMEDIA_TYPE_VIDEO:
2364         is->video_st = NULL;
2365         is->video_stream = -1;
2366         break;
2367     case AVMEDIA_TYPE_SUBTITLE:
2368         is->subtitle_st = NULL;
2369         is->subtitle_stream = -1;
2370         break;
2371     default:
2372         break;
2373     }
2374 }
2375
2376 /* since we have only one decoding thread, we can use a global
2377    variable instead of a thread local variable */
2378 static VideoState *global_video_state;
2379
2380 static int decode_interrupt_cb(void)
2381 {
2382     return (global_video_state && global_video_state->abort_request);
2383 }
2384
2385 /* this thread gets the stream from the disk or the network */
2386 static int decode_thread(void *arg)
2387 {
2388     VideoState *is = arg;
2389     AVFormatContext *ic;
2390     int err, i, ret;
2391     int st_index[AVMEDIA_TYPE_NB];
2392     AVPacket pkt1, *pkt = &pkt1;
2393     AVFormatParameters params, *ap = &params;
2394     int eof=0;
2395     int pkt_in_play_range = 0;
2396
2397     ic = avformat_alloc_context();
2398
2399     memset(st_index, -1, sizeof(st_index));
2400     is->video_stream = -1;
2401     is->audio_stream = -1;
2402     is->subtitle_stream = -1;
2403
2404     global_video_state = is;
2405     avio_set_interrupt_cb(decode_interrupt_cb);
2406
2407     memset(ap, 0, sizeof(*ap));
2408
2409     ap->prealloced_context = 1;
2410     ap->width = frame_width;
2411     ap->height= frame_height;
2412     ap->time_base= (AVRational){1, 25};
2413     ap->pix_fmt = frame_pix_fmt;
2414
2415     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2416
2417     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2418     if (err < 0) {
2419         print_error(is->filename, err);
2420         ret = -1;
2421         goto fail;
2422     }
2423     is->ic = ic;
2424
2425     if(genpts)
2426         ic->flags |= AVFMT_FLAG_GENPTS;
2427
2428     err = av_find_stream_info(ic);
2429     if (err < 0) {
2430         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2431         ret = -1;
2432         goto fail;
2433     }
2434     if(ic->pb)
2435         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2436
2437     if(seek_by_bytes<0)
2438         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2439
2440     /* if seeking requested, we execute it */
2441     if (start_time != AV_NOPTS_VALUE) {
2442         int64_t timestamp;
2443
2444         timestamp = start_time;
2445         /* add the stream start time */
2446         if (ic->start_time != AV_NOPTS_VALUE)
2447             timestamp += ic->start_time;
2448         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2449         if (ret < 0) {
2450             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2451                     is->filename, (double)timestamp / AV_TIME_BASE);
2452         }
2453     }
2454
2455     for (i = 0; i < ic->nb_streams; i++)
2456         ic->streams[i]->discard = AVDISCARD_ALL;
2457     if (!video_disable)
2458         st_index[AVMEDIA_TYPE_VIDEO] =
2459             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2460                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2461     if (!audio_disable)
2462         st_index[AVMEDIA_TYPE_AUDIO] =
2463             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2464                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2465                                 st_index[AVMEDIA_TYPE_VIDEO],
2466                                 NULL, 0);
2467     if (!video_disable)
2468         st_index[AVMEDIA_TYPE_SUBTITLE] =
2469             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2470                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2471                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2472                                  st_index[AVMEDIA_TYPE_AUDIO] :
2473                                  st_index[AVMEDIA_TYPE_VIDEO]),
2474                                 NULL, 0);
2475     if (show_status) {
2476         av_dump_format(ic, 0, is->filename, 0);
2477     }
2478
2479     /* open the streams */
2480     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2481         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2482     }
2483
2484     ret=-1;
2485     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2486         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2487     }
2488     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2489     if(ret<0) {
2490         if (!display_disable)
2491             is->show_audio = 2;
2492     }
2493
2494     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2495         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2496     }
2497
2498     if (is->video_stream < 0 && is->audio_stream < 0) {
2499         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2500         ret = -1;
2501         goto fail;
2502     }
2503
2504     for(;;) {
2505         if (is->abort_request)
2506             break;
2507         if (is->paused != is->last_paused) {
2508             is->last_paused = is->paused;
2509             if (is->paused)
2510                 is->read_pause_return= av_read_pause(ic);
2511             else
2512                 av_read_play(ic);
2513         }
2514 #if CONFIG_RTSP_DEMUXER
2515         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2516             /* wait 10 ms to avoid trying to get another packet */
2517             /* XXX: horrible */
2518             SDL_Delay(10);
2519             continue;
2520         }
2521 #endif
2522         if (is->seek_req) {
2523             int64_t seek_target= is->seek_pos;
2524             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2525             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2526 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2527 //      of the seek_pos/seek_rel variables
2528
2529             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2530             if (ret < 0) {
2531                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2532             }else{
2533                 if (is->audio_stream >= 0) {
2534                     packet_queue_flush(&is->audioq);
2535                     packet_queue_put(&is->audioq, &flush_pkt);
2536                 }
2537                 if (is->subtitle_stream >= 0) {
2538                     packet_queue_flush(&is->subtitleq);
2539                     packet_queue_put(&is->subtitleq, &flush_pkt);
2540                 }
2541                 if (is->video_stream >= 0) {
2542                     packet_queue_flush(&is->videoq);
2543                     packet_queue_put(&is->videoq, &flush_pkt);
2544                 }
2545             }
2546             is->seek_req = 0;
2547             eof= 0;
2548         }
2549
2550         /* if the queue are full, no need to read more */
2551         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2552             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2553                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2554                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2555             /* wait 10 ms */
2556             SDL_Delay(10);
2557             continue;
2558         }
2559         if(eof) {
2560             if(is->video_stream >= 0){
2561                 av_init_packet(pkt);
2562                 pkt->data=NULL;
2563                 pkt->size=0;
2564                 pkt->stream_index= is->video_stream;
2565                 packet_queue_put(&is->videoq, pkt);
2566             }
2567             SDL_Delay(10);
2568             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2569                 if(loop!=1 && (!loop || --loop)){
2570                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2571                 }else if(autoexit){
2572                     ret=AVERROR_EOF;
2573                     goto fail;
2574                 }
2575             }
2576             eof=0;
2577             continue;
2578         }
2579         ret = av_read_frame(ic, pkt);
2580         if (ret < 0) {
2581             if (ret == AVERROR_EOF || url_feof(ic->pb))
2582                 eof=1;
2583             if (ic->pb && ic->pb->error)
2584                 break;
2585             SDL_Delay(100); /* wait for user event */
2586             continue;
2587         }
2588         /* check if packet is in play range specified by user, then queue, otherwise discard */
2589         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2590                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2591                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2592                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2593                 <= ((double)duration/1000000);
2594         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2595             packet_queue_put(&is->audioq, pkt);
2596         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2597             packet_queue_put(&is->videoq, pkt);
2598         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2599             packet_queue_put(&is->subtitleq, pkt);
2600         } else {
2601             av_free_packet(pkt);
2602         }
2603     }
2604     /* wait until the end */
2605     while (!is->abort_request) {
2606         SDL_Delay(100);
2607     }
2608
2609     ret = 0;
2610  fail:
2611     /* disable interrupting */
2612     global_video_state = NULL;
2613
2614     /* close each stream */
2615     if (is->audio_stream >= 0)
2616         stream_component_close(is, is->audio_stream);
2617     if (is->video_stream >= 0)
2618         stream_component_close(is, is->video_stream);
2619     if (is->subtitle_stream >= 0)
2620         stream_component_close(is, is->subtitle_stream);
2621     if (is->ic) {
2622         av_close_input_file(is->ic);
2623         is->ic = NULL; /* safety */
2624     }
2625     avio_set_interrupt_cb(NULL);
2626
2627     if (ret != 0) {
2628         SDL_Event event;
2629
2630         event.type = FF_QUIT_EVENT;
2631         event.user.data1 = is;
2632         SDL_PushEvent(&event);
2633     }
2634     return 0;
2635 }
2636
2637 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2638 {
2639     VideoState *is;
2640
2641     is = av_mallocz(sizeof(VideoState));
2642     if (!is)
2643         return NULL;
2644     av_strlcpy(is->filename, filename, sizeof(is->filename));
2645     is->iformat = iformat;
2646     is->ytop = 0;
2647     is->xleft = 0;
2648
2649     /* start video display */
2650     is->pictq_mutex = SDL_CreateMutex();
2651     is->pictq_cond = SDL_CreateCond();
2652
2653     is->subpq_mutex = SDL_CreateMutex();
2654     is->subpq_cond = SDL_CreateCond();
2655
2656     is->av_sync_type = av_sync_type;
2657     is->parse_tid = SDL_CreateThread(decode_thread, is);
2658     if (!is->parse_tid) {
2659         av_free(is);
2660         return NULL;
2661     }
2662     return is;
2663 }
2664
2665 static void stream_cycle_channel(VideoState *is, int codec_type)
2666 {
2667     AVFormatContext *ic = is->ic;
2668     int start_index, stream_index;
2669     AVStream *st;
2670
2671     if (codec_type == AVMEDIA_TYPE_VIDEO)
2672         start_index = is->video_stream;
2673     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2674         start_index = is->audio_stream;
2675     else
2676         start_index = is->subtitle_stream;
2677     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2678         return;
2679     stream_index = start_index;
2680     for(;;) {
2681         if (++stream_index >= is->ic->nb_streams)
2682         {
2683             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2684             {
2685                 stream_index = -1;
2686                 goto the_end;
2687             } else
2688                 stream_index = 0;
2689         }
2690         if (stream_index == start_index)
2691             return;
2692         st = ic->streams[stream_index];
2693         if (st->codec->codec_type == codec_type) {
2694             /* check that parameters are OK */
2695             switch(codec_type) {
2696             case AVMEDIA_TYPE_AUDIO:
2697                 if (st->codec->sample_rate != 0 &&
2698                     st->codec->channels != 0)
2699                     goto the_end;
2700                 break;
2701             case AVMEDIA_TYPE_VIDEO:
2702             case AVMEDIA_TYPE_SUBTITLE:
2703                 goto the_end;
2704             default:
2705                 break;
2706             }
2707         }
2708     }
2709  the_end:
2710     stream_component_close(is, start_index);
2711     stream_component_open(is, stream_index);
2712 }
2713
2714
2715 static void toggle_full_screen(void)
2716 {
2717     is_full_screen = !is_full_screen;
2718     if (!fs_screen_width) {
2719         /* use default SDL method */
2720 //        SDL_WM_ToggleFullScreen(screen);
2721     }
2722     video_open(cur_stream);
2723 }
2724
2725 static void toggle_pause(void)
2726 {
2727     if (cur_stream)
2728         stream_pause(cur_stream);
2729     step = 0;
2730 }
2731
2732 static void step_to_next_frame(void)
2733 {
2734     if (cur_stream) {
2735         /* if the stream is paused unpause it, then step */
2736         if (cur_stream->paused)
2737             stream_pause(cur_stream);
2738     }
2739     step = 1;
2740 }
2741
2742 static void toggle_audio_display(void)
2743 {
2744     if (cur_stream) {
2745         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2746         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2747         fill_rectangle(screen,
2748                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2749                     bgcolor);
2750         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2751     }
2752 }
2753
2754 /* handle an event sent by the GUI */
2755 static void event_loop(void)
2756 {
2757     SDL_Event event;
2758     double incr, pos, frac;
2759
2760     for(;;) {
2761         double x;
2762         SDL_WaitEvent(&event);
2763         switch(event.type) {
2764         case SDL_KEYDOWN:
2765             if (exit_on_keydown) {
2766                 do_exit();
2767                 break;
2768             }
2769             switch(event.key.keysym.sym) {
2770             case SDLK_ESCAPE:
2771             case SDLK_q:
2772                 do_exit();
2773                 break;
2774             case SDLK_f:
2775                 toggle_full_screen();
2776                 break;
2777             case SDLK_p:
2778             case SDLK_SPACE:
2779                 toggle_pause();
2780                 break;
2781             case SDLK_s: //S: Step to next frame
2782                 step_to_next_frame();
2783                 break;
2784             case SDLK_a:
2785                 if (cur_stream)
2786                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2787                 break;
2788             case SDLK_v:
2789                 if (cur_stream)
2790                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2791                 break;
2792             case SDLK_t:
2793                 if (cur_stream)
2794                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2795                 break;
2796             case SDLK_w:
2797                 toggle_audio_display();
2798                 break;
2799             case SDLK_LEFT:
2800                 incr = -10.0;
2801                 goto do_seek;
2802             case SDLK_RIGHT:
2803                 incr = 10.0;
2804                 goto do_seek;
2805             case SDLK_UP:
2806                 incr = 60.0;
2807                 goto do_seek;
2808             case SDLK_DOWN:
2809                 incr = -60.0;
2810             do_seek:
2811                 if (cur_stream) {
2812                     if (seek_by_bytes) {
2813                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2814                             pos= cur_stream->video_current_pos;
2815                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2816                             pos= cur_stream->audio_pkt.pos;
2817                         }else
2818                             pos = avio_tell(cur_stream->ic->pb);
2819                         if (cur_stream->ic->bit_rate)
2820                             incr *= cur_stream->ic->bit_rate / 8.0;
2821                         else
2822                             incr *= 180000.0;
2823                         pos += incr;
2824                         stream_seek(cur_stream, pos, incr, 1);
2825                     } else {
2826                         pos = get_master_clock(cur_stream);
2827                         pos += incr;
2828                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2829                     }
2830                 }
2831                 break;
2832             default:
2833                 break;
2834             }
2835             break;
2836         case SDL_MOUSEBUTTONDOWN:
2837             if (exit_on_mousedown) {
2838                 do_exit();
2839                 break;
2840             }
2841         case SDL_MOUSEMOTION:
2842             if(event.type ==SDL_MOUSEBUTTONDOWN){
2843                 x= event.button.x;
2844             }else{
2845                 if(event.motion.state != SDL_PRESSED)
2846                     break;
2847                 x= event.motion.x;
2848             }
2849             if (cur_stream) {
2850                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2851                     uint64_t size=  avio_size(cur_stream->ic->pb);
2852                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2853                 }else{
2854                     int64_t ts;
2855                     int ns, hh, mm, ss;
2856                     int tns, thh, tmm, tss;
2857                     tns = cur_stream->ic->duration/1000000LL;
2858                     thh = tns/3600;
2859                     tmm = (tns%3600)/60;
2860                     tss = (tns%60);
2861                     frac = x/cur_stream->width;
2862                     ns = frac*tns;
2863                     hh = ns/3600;
2864                     mm = (ns%3600)/60;
2865                     ss = (ns%60);
2866                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2867                             hh, mm, ss, thh, tmm, tss);
2868                     ts = frac*cur_stream->ic->duration;
2869                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2870                         ts += cur_stream->ic->start_time;
2871                     stream_seek(cur_stream, ts, 0, 0);
2872                 }
2873             }
2874             break;
2875         case SDL_VIDEORESIZE:
2876             if (cur_stream) {
2877                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2878                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2879                 screen_width = cur_stream->width = event.resize.w;
2880                 screen_height= cur_stream->height= event.resize.h;
2881             }
2882             break;
2883         case SDL_QUIT:
2884         case FF_QUIT_EVENT:
2885             do_exit();
2886             break;
2887         case FF_ALLOC_EVENT:
2888             video_open(event.user.data1);
2889             alloc_picture(event.user.data1);
2890             break;
2891         case FF_REFRESH_EVENT:
2892             video_refresh_timer(event.user.data1);
2893             cur_stream->refresh=0;
2894             break;
2895         default:
2896             break;
2897         }
2898     }
2899 }
2900
2901 static void opt_frame_size(const char *arg)
2902 {
2903     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2904         fprintf(stderr, "Incorrect frame size\n");
2905         exit(1);
2906     }
2907     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2908         fprintf(stderr, "Frame size must be a multiple of 2\n");
2909         exit(1);
2910     }
2911 }
2912
2913 static int opt_width(const char *opt, const char *arg)
2914 {
2915     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2916     return 0;
2917 }
2918
2919 static int opt_height(const char *opt, const char *arg)
2920 {
2921     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2922     return 0;
2923 }
2924
2925 static void opt_format(const char *arg)
2926 {
2927     file_iformat = av_find_input_format(arg);
2928     if (!file_iformat) {
2929         fprintf(stderr, "Unknown input format: %s\n", arg);
2930         exit(1);
2931     }
2932 }
2933
2934 static void opt_frame_pix_fmt(const char *arg)
2935 {
2936     frame_pix_fmt = av_get_pix_fmt(arg);
2937 }
2938
2939 static int opt_sync(const char *opt, const char *arg)
2940 {
2941     if (!strcmp(arg, "audio"))
2942         av_sync_type = AV_SYNC_AUDIO_MASTER;
2943     else if (!strcmp(arg, "video"))
2944         av_sync_type = AV_SYNC_VIDEO_MASTER;
2945     else if (!strcmp(arg, "ext"))
2946         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2947     else {
2948         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2949         exit(1);
2950     }
2951     return 0;
2952 }
2953
2954 static int opt_seek(const char *opt, const char *arg)
2955 {
2956     start_time = parse_time_or_die(opt, arg, 1);
2957     return 0;
2958 }
2959
2960 static int opt_duration(const char *opt, const char *arg)
2961 {
2962     duration = parse_time_or_die(opt, arg, 1);
2963     return 0;
2964 }
2965
2966 static int opt_debug(const char *opt, const char *arg)
2967 {
2968     av_log_set_level(99);
2969     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2970     return 0;
2971 }
2972
2973 static int opt_vismv(const char *opt, const char *arg)
2974 {
2975     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2976     return 0;
2977 }
2978
2979 static int opt_thread_count(const char *opt, const char *arg)
2980 {
2981     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2982 #if !HAVE_THREADS
2983     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2984 #endif
2985     return 0;
2986 }
2987
2988 static const OptionDef options[] = {
2989 #include "cmdutils_common_opts.h"
2990     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2991     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2992     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2993     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2994     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2995     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2996     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2997     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2998     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2999     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3000     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3001     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3002     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3003     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3004     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3005     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3006     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3007     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3008     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3009     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3010     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3011     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3012     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3013     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3014     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3015     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3016     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3017     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3018     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3019     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3020     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3021     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3022     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3023     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3024     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3025     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3026     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3027 #if CONFIG_AVFILTER
3028     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3029 #endif
3030     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3031     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3032     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3033     { NULL, },
3034 };
3035
3036 static void show_usage(void)
3037 {
3038     printf("Simple media player\n");
3039     printf("usage: ffplay [options] input_file\n");
3040     printf("\n");
3041 }
3042
3043 static void show_help(void)
3044 {
3045     av_log_set_callback(log_callback_help);
3046     show_usage();
3047     show_help_options(options, "Main options:\n",
3048                       OPT_EXPERT, 0);
3049     show_help_options(options, "\nAdvanced options:\n",
3050                       OPT_EXPERT, OPT_EXPERT);
3051     printf("\n");
3052     av_opt_show2(avcodec_opts[0], NULL,
3053                  AV_OPT_FLAG_DECODING_PARAM, 0);
3054     printf("\n");
3055     av_opt_show2(avformat_opts, NULL,
3056                  AV_OPT_FLAG_DECODING_PARAM, 0);
3057 #if !CONFIG_AVFILTER
3058     printf("\n");
3059     av_opt_show2(sws_opts, NULL,
3060                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3061 #endif
3062     printf("\nWhile playing:\n"
3063            "q, ESC              quit\n"
3064            "f                   toggle full screen\n"
3065            "p, SPC              pause\n"
3066            "a                   cycle audio channel\n"
3067            "v                   cycle video channel\n"
3068            "t                   cycle subtitle channel\n"
3069            "w                   show audio waves\n"
3070            "s                   activate frame-step mode\n"
3071            "left/right          seek backward/forward 10 seconds\n"
3072            "down/up             seek backward/forward 1 minute\n"
3073            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3074            );
3075 }
3076
3077 static void opt_input_file(const char *filename)
3078 {
3079     if (input_filename) {
3080         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3081                 filename, input_filename);
3082         exit(1);
3083     }
3084     if (!strcmp(filename, "-"))
3085         filename = "pipe:";
3086     input_filename = filename;
3087 }
3088
3089 /* Called from the main */
3090 int main(int argc, char **argv)
3091 {
3092     int flags;
3093
3094     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3095
3096     /* register all codecs, demux and protocols */
3097     avcodec_register_all();
3098 #if CONFIG_AVDEVICE
3099     avdevice_register_all();
3100 #endif
3101 #if CONFIG_AVFILTER
3102     avfilter_register_all();
3103 #endif
3104     av_register_all();
3105
3106     init_opts();
3107
3108     show_banner();
3109
3110     parse_options(argc, argv, options, opt_input_file);
3111
3112     if (!input_filename) {
3113         show_usage();
3114         fprintf(stderr, "An input file must be specified\n");
3115         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3116         exit(1);
3117     }
3118
3119     if (display_disable) {
3120         video_disable = 1;
3121     }
3122     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3123 #if !defined(__MINGW32__) && !defined(__APPLE__)
3124     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3125 #endif
3126     if (SDL_Init (flags)) {
3127         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3128         exit(1);
3129     }
3130
3131     if (!display_disable) {
3132 #if HAVE_SDL_VIDEO_SIZE
3133         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3134         fs_screen_width = vi->current_w;
3135         fs_screen_height = vi->current_h;
3136 #endif
3137     }
3138
3139     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3140     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3141     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3142
3143     av_init_packet(&flush_pkt);
3144     flush_pkt.data= "FLUSH";
3145
3146     cur_stream = stream_open(input_filename, file_iformat);
3147
3148     event_loop();
3149
3150     /* never returns */
3151
3152     return 0;
3153 }