]> git.sesse.net Git - ffmpeg/blob - ffplay.c
lavfi: add libavfilter/avcodec.h and avfilter_copy_frame_props()
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 #endif
47
48 #include "cmdutils.h"
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #ifdef __MINGW32__
54 #undef main /* We don't want SDL to override our main() */
55 #endif
56
57 #include <unistd.h>
58 #include <assert.h>
59
60 const char program_name[] = "ffplay";
61 const int program_birth_year = 2003;
62
63 //#define DEBUG
64 //#define DEBUG_SYNC
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
68 #define MIN_FRAMES 5
69
70 /* SDL audio buffer size, in samples. Should be small to have precise
71    A/V sync as SDL does not have hardware buffer fullness info. */
72 #define SDL_AUDIO_BUFFER_SIZE 1024
73
74 /* no AV sync correction is done if below the AV sync threshold */
75 #define AV_SYNC_THRESHOLD 0.01
76 /* no AV correction is done if too big error */
77 #define AV_NOSYNC_THRESHOLD 10.0
78
79 #define FRAME_SKIP_FACTOR 0.05
80
81 /* maximum audio speed change to get correct sync */
82 #define SAMPLE_CORRECTION_PERCENT_MAX 10
83
84 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
85 #define AUDIO_DIFF_AVG_NB   20
86
87 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
88 #define SAMPLE_ARRAY_SIZE (2*65536)
89
90 static int sws_flags = SWS_BICUBIC;
91
92 typedef struct PacketQueue {
93     AVPacketList *first_pkt, *last_pkt;
94     int nb_packets;
95     int size;
96     int abort_request;
97     SDL_mutex *mutex;
98     SDL_cond *cond;
99 } PacketQueue;
100
101 #define VIDEO_PICTURE_QUEUE_SIZE 2
102 #define SUBPICTURE_QUEUE_SIZE 4
103
104 typedef struct VideoPicture {
105     double pts;                                  ///<presentation time stamp for this picture
106     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
107     int64_t pos;                                 ///<byte position in file
108     SDL_Overlay *bmp;
109     int width, height; /* source height & width */
110     int allocated;
111     enum PixelFormat pix_fmt;
112
113 #if CONFIG_AVFILTER
114     AVFilterBufferRef *picref;
115 #endif
116 } VideoPicture;
117
118 typedef struct SubPicture {
119     double pts; /* presentation time stamp for this picture */
120     AVSubtitle sub;
121 } SubPicture;
122
123 enum {
124     AV_SYNC_AUDIO_MASTER, /* default choice */
125     AV_SYNC_VIDEO_MASTER,
126     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
127 };
128
129 typedef struct VideoState {
130     SDL_Thread *read_tid;
131     SDL_Thread *video_tid;
132     SDL_Thread *refresh_tid;
133     AVInputFormat *iformat;
134     int no_background;
135     int abort_request;
136     int paused;
137     int last_paused;
138     int seek_req;
139     int seek_flags;
140     int64_t seek_pos;
141     int64_t seek_rel;
142     int read_pause_return;
143     AVFormatContext *ic;
144
145     int audio_stream;
146
147     int av_sync_type;
148     double external_clock; /* external clock base */
149     int64_t external_clock_time;
150
151     double audio_clock;
152     double audio_diff_cum; /* used for AV difference average computation */
153     double audio_diff_avg_coef;
154     double audio_diff_threshold;
155     int audio_diff_avg_count;
156     AVStream *audio_st;
157     PacketQueue audioq;
158     int audio_hw_buf_size;
159     /* samples output by the codec. we reserve more space for avsync
160        compensation */
161     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
163     uint8_t *audio_buf;
164     unsigned int audio_buf_size; /* in bytes */
165     int audio_buf_index; /* in bytes */
166     AVPacket audio_pkt_temp;
167     AVPacket audio_pkt;
168     enum AVSampleFormat audio_src_fmt;
169     AVAudioConvert *reformat_ctx;
170
171     enum ShowMode {
172         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
173     } show_mode;
174     int16_t sample_array[SAMPLE_ARRAY_SIZE];
175     int sample_array_index;
176     int last_i_start;
177     RDFTContext *rdft;
178     int rdft_bits;
179     FFTSample *rdft_data;
180     int xpos;
181
182     SDL_Thread *subtitle_tid;
183     int subtitle_stream;
184     int subtitle_stream_changed;
185     AVStream *subtitle_st;
186     PacketQueue subtitleq;
187     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
188     int subpq_size, subpq_rindex, subpq_windex;
189     SDL_mutex *subpq_mutex;
190     SDL_cond *subpq_cond;
191
192     double frame_timer;
193     double frame_last_pts;
194     double frame_last_delay;
195     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
196     int video_stream;
197     AVStream *video_st;
198     PacketQueue videoq;
199     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
200     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
201     int64_t video_current_pos;                   ///<current displayed file pos
202     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
203     int pictq_size, pictq_rindex, pictq_windex;
204     SDL_mutex *pictq_mutex;
205     SDL_cond *pictq_cond;
206 #if !CONFIG_AVFILTER
207     struct SwsContext *img_convert_ctx;
208 #endif
209
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213 #if CONFIG_AVFILTER
214     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
215 #endif
216
217     float skip_frames;
218     float skip_frames_index;
219     int refresh;
220 } VideoState;
221
222 static void show_help(void);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268 static enum ShowMode show_mode = SHOW_MODE_NONE;
269
270 static int rdftspeed=20;
271 #if CONFIG_AVFILTER
272 static char *vfilters = NULL;
273 #endif
274
275 /* current context */
276 static int is_full_screen;
277 static VideoState *cur_stream;
278 static int64_t audio_callback_time;
279
280 static AVPacket flush_pkt;
281
282 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
283 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
284 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
285
286 static SDL_Surface *screen;
287
288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
289 {
290     AVPacketList *pkt1;
291
292     /* duplicate the packet */
293     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
294         return -1;
295
296     pkt1 = av_malloc(sizeof(AVPacketList));
297     if (!pkt1)
298         return -1;
299     pkt1->pkt = *pkt;
300     pkt1->next = NULL;
301
302
303     SDL_LockMutex(q->mutex);
304
305     if (!q->last_pkt)
306
307         q->first_pkt = pkt1;
308     else
309         q->last_pkt->next = pkt1;
310     q->last_pkt = pkt1;
311     q->nb_packets++;
312     q->size += pkt1->pkt.size + sizeof(*pkt1);
313     /* XXX: should duplicate packet data in DV case */
314     SDL_CondSignal(q->cond);
315
316     SDL_UnlockMutex(q->mutex);
317     return 0;
318 }
319
320 /* packet queue handling */
321 static void packet_queue_init(PacketQueue *q)
322 {
323     memset(q, 0, sizeof(PacketQueue));
324     q->mutex = SDL_CreateMutex();
325     q->cond = SDL_CreateCond();
326     packet_queue_put(q, &flush_pkt);
327 }
328
329 static void packet_queue_flush(PacketQueue *q)
330 {
331     AVPacketList *pkt, *pkt1;
332
333     SDL_LockMutex(q->mutex);
334     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
335         pkt1 = pkt->next;
336         av_free_packet(&pkt->pkt);
337         av_freep(&pkt);
338     }
339     q->last_pkt = NULL;
340     q->first_pkt = NULL;
341     q->nb_packets = 0;
342     q->size = 0;
343     SDL_UnlockMutex(q->mutex);
344 }
345
346 static void packet_queue_end(PacketQueue *q)
347 {
348     packet_queue_flush(q);
349     SDL_DestroyMutex(q->mutex);
350     SDL_DestroyCond(q->cond);
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for(;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #if 0
412 /* draw only the border of a rectangle */
413 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414 {
415     int w1, w2, h1, h2;
416
417     /* fill the background */
418     w1 = x;
419     if (w1 < 0)
420         w1 = 0;
421     w2 = s->width - (x + w);
422     if (w2 < 0)
423         w2 = 0;
424     h1 = y;
425     if (h1 < 0)
426         h1 = 0;
427     h2 = s->height - (y + h);
428     if (h2 < 0)
429         h2 = 0;
430     fill_rectangle(screen,
431                    s->xleft, s->ytop,
432                    w1, s->height,
433                    color);
434     fill_rectangle(screen,
435                    s->xleft + s->width - w2, s->ytop,
436                    w2, s->height,
437                    color);
438     fill_rectangle(screen,
439                    s->xleft + w1, s->ytop,
440                    s->width - w1 - w2, h1,
441                    color);
442     fill_rectangle(screen,
443                    s->xleft + w1, s->ytop + s->height - h2,
444                    s->width - w1 - w2, h2,
445                    color);
446 }
447 #endif
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454     unsigned int v = ((const uint32_t *)(s))[0];\
455     a = (v >> 24) & 0xff;\
456     r = (v >> 16) & 0xff;\
457     g = (v >> 8) & 0xff;\
458     b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464     a = (val >> 24) & 0xff;\
465     y = (val >> 16) & 0xff;\
466     u = (val >> 8) & 0xff;\
467     v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480     int wrap, wrap3, width2, skip2;
481     int y, u, v, a, u1, v1, a1, w, h;
482     uint8_t *lum, *cb, *cr;
483     const uint8_t *p;
484     const uint32_t *pal;
485     int dstx, dsty, dstw, dsth;
486
487     dstw = av_clip(rect->w, 0, imgw);
488     dsth = av_clip(rect->h, 0, imgh);
489     dstx = av_clip(rect->x, 0, imgw - dstw);
490     dsty = av_clip(rect->y, 0, imgh - dsth);
491     lum = dst->data[0] + dsty * dst->linesize[0];
492     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496     skip2 = dstx >> 1;
497     wrap = dst->linesize[0];
498     wrap3 = rect->pict.linesize[0];
499     p = rect->pict.data[0];
500     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501
502     if (dsty & 1) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512             cb++;
513             cr++;
514             lum++;
515             p += BPP;
516         }
517         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524             YUVA_IN(y, u, v, a, p + BPP, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += 2 * BPP;
534             lum += 2;
535         }
536         if (w) {
537             YUVA_IN(y, u, v, a, p, pal);
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541             p++;
542             lum++;
543         }
544         p += wrap3 - dstw * BPP;
545         lum += wrap - dstw - dstx;
546         cb += dst->linesize[1] - width2 - skip2;
547         cr += dst->linesize[2] - width2 - skip2;
548     }
549     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550         lum += dstx;
551         cb += skip2;
552         cr += skip2;
553
554         if (dstx & 1) {
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 = u;
557             v1 = v;
558             a1 = a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560             p += wrap3;
561             lum += wrap;
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 += u;
564             v1 += v;
565             a1 += a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569             cb++;
570             cr++;
571             p += -wrap3 + BPP;
572             lum += -wrap + 1;
573         }
574         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581             YUVA_IN(y, u, v, a, p + BPP, pal);
582             u1 += u;
583             v1 += v;
584             a1 += a;
585             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586             p += wrap3;
587             lum += wrap;
588
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595             YUVA_IN(y, u, v, a, p + BPP, pal);
596             u1 += u;
597             v1 += v;
598             a1 += a;
599             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604             cb++;
605             cr++;
606             p += -wrap3 + 2 * BPP;
607             lum += -wrap + 2;
608         }
609         if (w) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             p += wrap3;
616             lum += wrap;
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624             cb++;
625             cr++;
626             p += -wrap3 + BPP;
627             lum += -wrap + 1;
628         }
629         p += wrap3 + (wrap3 - dstw * BPP);
630         lum += wrap + (wrap - dstw - dstx);
631         cb += dst->linesize[1] - width2 - skip2;
632         cr += dst->linesize[2] - width2 - skip2;
633     }
634     /* handle odd height */
635     if (h) {
636         lum += dstx;
637         cb += skip2;
638         cr += skip2;
639
640         if (dstx & 1) {
641             YUVA_IN(y, u, v, a, p, pal);
642             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645             cb++;
646             cr++;
647             lum++;
648             p += BPP;
649         }
650         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 = u;
653             v1 = v;
654             a1 = a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657             YUVA_IN(y, u, v, a, p + BPP, pal);
658             u1 += u;
659             v1 += v;
660             a1 += a;
661             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664             cb++;
665             cr++;
666             p += 2 * BPP;
667             lum += 2;
668         }
669         if (w) {
670             YUVA_IN(y, u, v, a, p, pal);
671             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674         }
675     }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680     avsubtitle_free(&sp->sub);
681 }
682
683 static void video_image_display(VideoState *is)
684 {
685     VideoPicture *vp;
686     SubPicture *sp;
687     AVPicture pict;
688     float aspect_ratio;
689     int width, height, x, y;
690     SDL_Rect rect;
691     int i;
692
693     vp = &is->pictq[is->pictq_rindex];
694     if (vp->bmp) {
695 #if CONFIG_AVFILTER
696          if (vp->picref->video->sample_aspect_ratio.num == 0)
697              aspect_ratio = 0;
698          else
699              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
700 #else
701
702         /* XXX: use variable in the frame */
703         if (is->video_st->sample_aspect_ratio.num)
704             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705         else if (is->video_st->codec->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707         else
708             aspect_ratio = 0;
709 #endif
710         if (aspect_ratio <= 0.0)
711             aspect_ratio = 1.0;
712         aspect_ratio *= (float)vp->width / (float)vp->height;
713
714         if (is->subtitle_st) {
715             if (is->subpq_size > 0) {
716                 sp = &is->subpq[is->subpq_rindex];
717
718                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
719                     SDL_LockYUVOverlay (vp->bmp);
720
721                     pict.data[0] = vp->bmp->pixels[0];
722                     pict.data[1] = vp->bmp->pixels[2];
723                     pict.data[2] = vp->bmp->pixels[1];
724
725                     pict.linesize[0] = vp->bmp->pitches[0];
726                     pict.linesize[1] = vp->bmp->pitches[2];
727                     pict.linesize[2] = vp->bmp->pitches[1];
728
729                     for (i = 0; i < sp->sub.num_rects; i++)
730                         blend_subrect(&pict, sp->sub.rects[i],
731                                       vp->bmp->w, vp->bmp->h);
732
733                     SDL_UnlockYUVOverlay (vp->bmp);
734                 }
735             }
736         }
737
738
739         /* XXX: we suppose the screen has a 1.0 pixel ratio */
740         height = is->height;
741         width = ((int)rint(height * aspect_ratio)) & ~1;
742         if (width > is->width) {
743             width = is->width;
744             height = ((int)rint(width / aspect_ratio)) & ~1;
745         }
746         x = (is->width - width) / 2;
747         y = (is->height - height) / 2;
748         if (!is->no_background) {
749             /* fill the background */
750             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
751         } else {
752             is->no_background = 0;
753         }
754         rect.x = is->xleft + x;
755         rect.y = is->ytop  + y;
756         rect.w = FFMAX(width,  1);
757         rect.h = FFMAX(height, 1);
758         SDL_DisplayYUVOverlay(vp->bmp, &rect);
759     } else {
760 #if 0
761         fill_rectangle(screen,
762                        is->xleft, is->ytop, is->width, is->height,
763                        QERGB(0x00, 0x00, 0x00));
764 #endif
765     }
766 }
767
768 /* get the current audio output buffer size, in samples. With SDL, we
769    cannot have a precise information */
770 static int audio_write_get_buf_size(VideoState *is)
771 {
772     return is->audio_buf_size - is->audio_buf_index;
773 }
774
775 static inline int compute_mod(int a, int b)
776 {
777     return a < 0 ? a%b + b : a%b;
778 }
779
780 static void video_audio_display(VideoState *s)
781 {
782     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
783     int ch, channels, h, h2, bgcolor, fgcolor;
784     int16_t time_diff;
785     int rdft_bits, nb_freq;
786
787     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
788         ;
789     nb_freq= 1<<(rdft_bits-1);
790
791     /* compute display index : center on currently output samples */
792     channels = s->audio_st->codec->channels;
793     nb_display_channels = channels;
794     if (!s->paused) {
795         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
796         n = 2 * channels;
797         delay = audio_write_get_buf_size(s);
798         delay /= n;
799
800         /* to be more precise, we take into account the time spent since
801            the last buffer computation */
802         if (audio_callback_time) {
803             time_diff = av_gettime() - audio_callback_time;
804             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
805         }
806
807         delay += 2*data_used;
808         if (delay < data_used)
809             delay = data_used;
810
811         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
812         if (s->show_mode == SHOW_MODE_WAVES) {
813             h= INT_MIN;
814             for(i=0; i<1000; i+=channels){
815                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
816                 int a= s->sample_array[idx];
817                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
818                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
819                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
820                 int score= a-d;
821                 if(h<score && (b^c)<0){
822                     h= score;
823                     i_start= idx;
824                 }
825             }
826         }
827
828         s->last_i_start = i_start;
829     } else {
830         i_start = s->last_i_start;
831     }
832
833     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
834     if (s->show_mode == SHOW_MODE_WAVES) {
835         fill_rectangle(screen,
836                        s->xleft, s->ytop, s->width, s->height,
837                        bgcolor);
838
839         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
840
841         /* total height for one channel */
842         h = s->height / nb_display_channels;
843         /* graph height / 2 */
844         h2 = (h * 9) / 20;
845         for(ch = 0;ch < nb_display_channels; ch++) {
846             i = i_start + ch;
847             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
848             for(x = 0; x < s->width; x++) {
849                 y = (s->sample_array[i] * h2) >> 15;
850                 if (y < 0) {
851                     y = -y;
852                     ys = y1 - y;
853                 } else {
854                     ys = y1;
855                 }
856                 fill_rectangle(screen,
857                                s->xleft + x, ys, 1, y,
858                                fgcolor);
859                 i += channels;
860                 if (i >= SAMPLE_ARRAY_SIZE)
861                     i -= SAMPLE_ARRAY_SIZE;
862             }
863         }
864
865         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
866
867         for(ch = 1;ch < nb_display_channels; ch++) {
868             y = s->ytop + ch * h;
869             fill_rectangle(screen,
870                            s->xleft, y, s->width, 1,
871                            fgcolor);
872         }
873         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
874     }else{
875         nb_display_channels= FFMIN(nb_display_channels, 2);
876         if(rdft_bits != s->rdft_bits){
877             av_rdft_end(s->rdft);
878             av_free(s->rdft_data);
879             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
880             s->rdft_bits= rdft_bits;
881             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
882         }
883         {
884             FFTSample *data[2];
885             for(ch = 0;ch < nb_display_channels; ch++) {
886                 data[ch] = s->rdft_data + 2*nb_freq*ch;
887                 i = i_start + ch;
888                 for(x = 0; x < 2*nb_freq; x++) {
889                     double w= (x-nb_freq)*(1.0/nb_freq);
890                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
891                     i += channels;
892                     if (i >= SAMPLE_ARRAY_SIZE)
893                         i -= SAMPLE_ARRAY_SIZE;
894                 }
895                 av_rdft_calc(s->rdft, data[ch]);
896             }
897             //least efficient way to do this, we should of course directly access it but its more than fast enough
898             for(y=0; y<s->height; y++){
899                 double w= 1/sqrt(nb_freq);
900                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
901                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
902                        + data[1][2*y+1]*data[1][2*y+1])) : a;
903                 a= FFMIN(a,255);
904                 b= FFMIN(b,255);
905                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
906
907                 fill_rectangle(screen,
908                             s->xpos, s->height-y, 1, 1,
909                             fgcolor);
910             }
911         }
912         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
913         s->xpos++;
914         if(s->xpos >= s->width)
915             s->xpos= s->xleft;
916     }
917 }
918
919 static int video_open(VideoState *is){
920     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
921     int w,h;
922
923     if(is_full_screen) flags |= SDL_FULLSCREEN;
924     else               flags |= SDL_RESIZABLE;
925
926     if (is_full_screen && fs_screen_width) {
927         w = fs_screen_width;
928         h = fs_screen_height;
929     } else if(!is_full_screen && screen_width){
930         w = screen_width;
931         h = screen_height;
932 #if CONFIG_AVFILTER
933     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
934         w = is->out_video_filter->inputs[0]->w;
935         h = is->out_video_filter->inputs[0]->h;
936 #else
937     }else if (is->video_st && is->video_st->codec->width){
938         w = is->video_st->codec->width;
939         h = is->video_st->codec->height;
940 #endif
941     } else {
942         w = 640;
943         h = 480;
944     }
945     if(screen && is->width == screen->w && screen->w == w
946        && is->height== screen->h && screen->h == h)
947         return 0;
948
949 #ifndef __APPLE__
950     screen = SDL_SetVideoMode(w, h, 0, flags);
951 #else
952     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
953     screen = SDL_SetVideoMode(w, h, 24, flags);
954 #endif
955     if (!screen) {
956         fprintf(stderr, "SDL: could not set video mode - exiting\n");
957         return -1;
958     }
959     if (!window_title)
960         window_title = input_filename;
961     SDL_WM_SetCaption(window_title, window_title);
962
963     is->width = screen->w;
964     is->height = screen->h;
965
966     return 0;
967 }
968
969 /* display the current picture, if any */
970 static void video_display(VideoState *is)
971 {
972     if(!screen)
973         video_open(cur_stream);
974     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
975         video_audio_display(is);
976     else if (is->video_st)
977         video_image_display(is);
978 }
979
980 static int refresh_thread(void *opaque)
981 {
982     VideoState *is= opaque;
983     while(!is->abort_request){
984         SDL_Event event;
985         event.type = FF_REFRESH_EVENT;
986         event.user.data1 = opaque;
987         if(!is->refresh){
988             is->refresh=1;
989             SDL_PushEvent(&event);
990         }
991         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
992         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
993     }
994     return 0;
995 }
996
997 /* get the current audio clock value */
998 static double get_audio_clock(VideoState *is)
999 {
1000     double pts;
1001     int hw_buf_size, bytes_per_sec;
1002     pts = is->audio_clock;
1003     hw_buf_size = audio_write_get_buf_size(is);
1004     bytes_per_sec = 0;
1005     if (is->audio_st) {
1006         bytes_per_sec = is->audio_st->codec->sample_rate *
1007             2 * is->audio_st->codec->channels;
1008     }
1009     if (bytes_per_sec)
1010         pts -= (double)hw_buf_size / bytes_per_sec;
1011     return pts;
1012 }
1013
1014 /* get the current video clock value */
1015 static double get_video_clock(VideoState *is)
1016 {
1017     if (is->paused) {
1018         return is->video_current_pts;
1019     } else {
1020         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1021     }
1022 }
1023
1024 /* get the current external clock value */
1025 static double get_external_clock(VideoState *is)
1026 {
1027     int64_t ti;
1028     ti = av_gettime();
1029     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1030 }
1031
1032 /* get the current master clock value */
1033 static double get_master_clock(VideoState *is)
1034 {
1035     double val;
1036
1037     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1038         if (is->video_st)
1039             val = get_video_clock(is);
1040         else
1041             val = get_audio_clock(is);
1042     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1043         if (is->audio_st)
1044             val = get_audio_clock(is);
1045         else
1046             val = get_video_clock(is);
1047     } else {
1048         val = get_external_clock(is);
1049     }
1050     return val;
1051 }
1052
1053 /* seek in the stream */
1054 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1055 {
1056     if (!is->seek_req) {
1057         is->seek_pos = pos;
1058         is->seek_rel = rel;
1059         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1060         if (seek_by_bytes)
1061             is->seek_flags |= AVSEEK_FLAG_BYTE;
1062         is->seek_req = 1;
1063     }
1064 }
1065
1066 /* pause or resume the video */
1067 static void stream_toggle_pause(VideoState *is)
1068 {
1069     if (is->paused) {
1070         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1071         if(is->read_pause_return != AVERROR(ENOSYS)){
1072             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1073         }
1074         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1075     }
1076     is->paused = !is->paused;
1077 }
1078
1079 static double compute_target_time(double frame_current_pts, VideoState *is)
1080 {
1081     double delay, sync_threshold, diff;
1082
1083     /* compute nominal delay */
1084     delay = frame_current_pts - is->frame_last_pts;
1085     if (delay <= 0 || delay >= 10.0) {
1086         /* if incorrect delay, use previous one */
1087         delay = is->frame_last_delay;
1088     } else {
1089         is->frame_last_delay = delay;
1090     }
1091     is->frame_last_pts = frame_current_pts;
1092
1093     /* update delay to follow master synchronisation source */
1094     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1095          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1096         /* if video is slave, we try to correct big delays by
1097            duplicating or deleting a frame */
1098         diff = get_video_clock(is) - get_master_clock(is);
1099
1100         /* skip or repeat frame. We take into account the
1101            delay to compute the threshold. I still don't know
1102            if it is the best guess */
1103         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1104         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1105             if (diff <= -sync_threshold)
1106                 delay = 0;
1107             else if (diff >= sync_threshold)
1108                 delay = 2 * delay;
1109         }
1110     }
1111     is->frame_timer += delay;
1112 #if defined(DEBUG_SYNC)
1113     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1114             delay, actual_delay, frame_current_pts, -diff);
1115 #endif
1116
1117     return is->frame_timer;
1118 }
1119
1120 /* called to display each frame */
1121 static void video_refresh(void *opaque)
1122 {
1123     VideoState *is = opaque;
1124     VideoPicture *vp;
1125
1126     SubPicture *sp, *sp2;
1127
1128     if (is->video_st) {
1129 retry:
1130         if (is->pictq_size == 0) {
1131             //nothing to do, no picture to display in the que
1132         } else {
1133             double time= av_gettime()/1000000.0;
1134             double next_target;
1135             /* dequeue the picture */
1136             vp = &is->pictq[is->pictq_rindex];
1137
1138             if(time < vp->target_clock)
1139                 return;
1140             /* update current video pts */
1141             is->video_current_pts = vp->pts;
1142             is->video_current_pts_drift = is->video_current_pts - time;
1143             is->video_current_pos = vp->pos;
1144             if(is->pictq_size > 1){
1145                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1146                 assert(nextvp->target_clock >= vp->target_clock);
1147                 next_target= nextvp->target_clock;
1148             }else{
1149                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1150             }
1151             if(framedrop && time > next_target){
1152                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1153                 if(is->pictq_size > 1 || time > next_target + 0.5){
1154                     /* update queue size and signal for next picture */
1155                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1156                         is->pictq_rindex = 0;
1157
1158                     SDL_LockMutex(is->pictq_mutex);
1159                     is->pictq_size--;
1160                     SDL_CondSignal(is->pictq_cond);
1161                     SDL_UnlockMutex(is->pictq_mutex);
1162                     goto retry;
1163                 }
1164             }
1165
1166             if(is->subtitle_st) {
1167                 if (is->subtitle_stream_changed) {
1168                     SDL_LockMutex(is->subpq_mutex);
1169
1170                     while (is->subpq_size) {
1171                         free_subpicture(&is->subpq[is->subpq_rindex]);
1172
1173                         /* update queue size and signal for next picture */
1174                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1175                             is->subpq_rindex = 0;
1176
1177                         is->subpq_size--;
1178                     }
1179                     is->subtitle_stream_changed = 0;
1180
1181                     SDL_CondSignal(is->subpq_cond);
1182                     SDL_UnlockMutex(is->subpq_mutex);
1183                 } else {
1184                     if (is->subpq_size > 0) {
1185                         sp = &is->subpq[is->subpq_rindex];
1186
1187                         if (is->subpq_size > 1)
1188                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1189                         else
1190                             sp2 = NULL;
1191
1192                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1193                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1194                         {
1195                             free_subpicture(sp);
1196
1197                             /* update queue size and signal for next picture */
1198                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1199                                 is->subpq_rindex = 0;
1200
1201                             SDL_LockMutex(is->subpq_mutex);
1202                             is->subpq_size--;
1203                             SDL_CondSignal(is->subpq_cond);
1204                             SDL_UnlockMutex(is->subpq_mutex);
1205                         }
1206                     }
1207                 }
1208             }
1209
1210             /* display picture */
1211             if (!display_disable)
1212                 video_display(is);
1213
1214             /* update queue size and signal for next picture */
1215             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1216                 is->pictq_rindex = 0;
1217
1218             SDL_LockMutex(is->pictq_mutex);
1219             is->pictq_size--;
1220             SDL_CondSignal(is->pictq_cond);
1221             SDL_UnlockMutex(is->pictq_mutex);
1222         }
1223     } else if (is->audio_st) {
1224         /* draw the next audio frame */
1225
1226         /* if only audio stream, then display the audio bars (better
1227            than nothing, just to test the implementation */
1228
1229         /* display picture */
1230         if (!display_disable)
1231             video_display(is);
1232     }
1233     if (show_status) {
1234         static int64_t last_time;
1235         int64_t cur_time;
1236         int aqsize, vqsize, sqsize;
1237         double av_diff;
1238
1239         cur_time = av_gettime();
1240         if (!last_time || (cur_time - last_time) >= 30000) {
1241             aqsize = 0;
1242             vqsize = 0;
1243             sqsize = 0;
1244             if (is->audio_st)
1245                 aqsize = is->audioq.size;
1246             if (is->video_st)
1247                 vqsize = is->videoq.size;
1248             if (is->subtitle_st)
1249                 sqsize = is->subtitleq.size;
1250             av_diff = 0;
1251             if (is->audio_st && is->video_st)
1252                 av_diff = get_audio_clock(is) - get_video_clock(is);
1253             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1254                    get_master_clock(is),
1255                    av_diff,
1256                    FFMAX(is->skip_frames-1, 0),
1257                    aqsize / 1024,
1258                    vqsize / 1024,
1259                    sqsize,
1260                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1261                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1262             fflush(stdout);
1263             last_time = cur_time;
1264         }
1265     }
1266 }
1267
1268 static void stream_close(VideoState *is)
1269 {
1270     VideoPicture *vp;
1271     int i;
1272     /* XXX: use a special url_shutdown call to abort parse cleanly */
1273     is->abort_request = 1;
1274     SDL_WaitThread(is->read_tid, NULL);
1275     SDL_WaitThread(is->refresh_tid, NULL);
1276
1277     /* free all pictures */
1278     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1279         vp = &is->pictq[i];
1280 #if CONFIG_AVFILTER
1281         if (vp->picref) {
1282             avfilter_unref_buffer(vp->picref);
1283             vp->picref = NULL;
1284         }
1285 #endif
1286         if (vp->bmp) {
1287             SDL_FreeYUVOverlay(vp->bmp);
1288             vp->bmp = NULL;
1289         }
1290     }
1291     SDL_DestroyMutex(is->pictq_mutex);
1292     SDL_DestroyCond(is->pictq_cond);
1293     SDL_DestroyMutex(is->subpq_mutex);
1294     SDL_DestroyCond(is->subpq_cond);
1295 #if !CONFIG_AVFILTER
1296     if (is->img_convert_ctx)
1297         sws_freeContext(is->img_convert_ctx);
1298 #endif
1299     av_free(is);
1300 }
1301
1302 static void do_exit(void)
1303 {
1304     if (cur_stream) {
1305         stream_close(cur_stream);
1306         cur_stream = NULL;
1307     }
1308     uninit_opts();
1309 #if CONFIG_AVFILTER
1310     avfilter_uninit();
1311 #endif
1312     if (show_status)
1313         printf("\n");
1314     SDL_Quit();
1315     av_log(NULL, AV_LOG_QUIET, "");
1316     exit(0);
1317 }
1318
1319 /* allocate a picture (needs to do that in main thread to avoid
1320    potential locking problems */
1321 static void alloc_picture(void *opaque)
1322 {
1323     VideoState *is = opaque;
1324     VideoPicture *vp;
1325
1326     vp = &is->pictq[is->pictq_windex];
1327
1328     if (vp->bmp)
1329         SDL_FreeYUVOverlay(vp->bmp);
1330
1331 #if CONFIG_AVFILTER
1332     if (vp->picref)
1333         avfilter_unref_buffer(vp->picref);
1334     vp->picref = NULL;
1335
1336     vp->width   = is->out_video_filter->inputs[0]->w;
1337     vp->height  = is->out_video_filter->inputs[0]->h;
1338     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1339 #else
1340     vp->width   = is->video_st->codec->width;
1341     vp->height  = is->video_st->codec->height;
1342     vp->pix_fmt = is->video_st->codec->pix_fmt;
1343 #endif
1344
1345     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1346                                    SDL_YV12_OVERLAY,
1347                                    screen);
1348     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1349         /* SDL allocates a buffer smaller than requested if the video
1350          * overlay hardware is unable to support the requested size. */
1351         fprintf(stderr, "Error: the video system does not support an image\n"
1352                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1353                         "to reduce the image size.\n", vp->width, vp->height );
1354         do_exit();
1355     }
1356
1357     SDL_LockMutex(is->pictq_mutex);
1358     vp->allocated = 1;
1359     SDL_CondSignal(is->pictq_cond);
1360     SDL_UnlockMutex(is->pictq_mutex);
1361 }
1362
1363 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1364 {
1365     VideoPicture *vp;
1366     double frame_delay, pts = pts1;
1367
1368     /* compute the exact PTS for the picture if it is omitted in the stream
1369      * pts1 is the dts of the pkt / pts of the frame */
1370     if (pts != 0) {
1371         /* update video clock with pts, if present */
1372         is->video_clock = pts;
1373     } else {
1374         pts = is->video_clock;
1375     }
1376     /* update video clock for next frame */
1377     frame_delay = av_q2d(is->video_st->codec->time_base);
1378     /* for MPEG2, the frame can be repeated, so we update the
1379        clock accordingly */
1380     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1381     is->video_clock += frame_delay;
1382
1383 #if defined(DEBUG_SYNC) && 0
1384     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1385            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1386 #endif
1387
1388     /* wait until we have space to put a new picture */
1389     SDL_LockMutex(is->pictq_mutex);
1390
1391     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1392         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1393
1394     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1395            !is->videoq.abort_request) {
1396         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1397     }
1398     SDL_UnlockMutex(is->pictq_mutex);
1399
1400     if (is->videoq.abort_request)
1401         return -1;
1402
1403     vp = &is->pictq[is->pictq_windex];
1404
1405     /* alloc or resize hardware picture buffer */
1406     if (!vp->bmp ||
1407 #if CONFIG_AVFILTER
1408         vp->width  != is->out_video_filter->inputs[0]->w ||
1409         vp->height != is->out_video_filter->inputs[0]->h) {
1410 #else
1411         vp->width != is->video_st->codec->width ||
1412         vp->height != is->video_st->codec->height) {
1413 #endif
1414         SDL_Event event;
1415
1416         vp->allocated = 0;
1417
1418         /* the allocation must be done in the main thread to avoid
1419            locking problems */
1420         event.type = FF_ALLOC_EVENT;
1421         event.user.data1 = is;
1422         SDL_PushEvent(&event);
1423
1424         /* wait until the picture is allocated */
1425         SDL_LockMutex(is->pictq_mutex);
1426         while (!vp->allocated && !is->videoq.abort_request) {
1427             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1428         }
1429         SDL_UnlockMutex(is->pictq_mutex);
1430
1431         if (is->videoq.abort_request)
1432             return -1;
1433     }
1434
1435     /* if the frame is not skipped, then display it */
1436     if (vp->bmp) {
1437         AVPicture pict;
1438 #if CONFIG_AVFILTER
1439         if(vp->picref)
1440             avfilter_unref_buffer(vp->picref);
1441         vp->picref = src_frame->opaque;
1442 #endif
1443
1444         /* get a pointer on the bitmap */
1445         SDL_LockYUVOverlay (vp->bmp);
1446
1447         memset(&pict,0,sizeof(AVPicture));
1448         pict.data[0] = vp->bmp->pixels[0];
1449         pict.data[1] = vp->bmp->pixels[2];
1450         pict.data[2] = vp->bmp->pixels[1];
1451
1452         pict.linesize[0] = vp->bmp->pitches[0];
1453         pict.linesize[1] = vp->bmp->pitches[2];
1454         pict.linesize[2] = vp->bmp->pitches[1];
1455
1456 #if CONFIG_AVFILTER
1457         //FIXME use direct rendering
1458         av_picture_copy(&pict, (AVPicture *)src_frame,
1459                         vp->pix_fmt, vp->width, vp->height);
1460 #else
1461         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1462         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1463             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1464             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1465         if (is->img_convert_ctx == NULL) {
1466             fprintf(stderr, "Cannot initialize the conversion context\n");
1467             exit(1);
1468         }
1469         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1470                   0, vp->height, pict.data, pict.linesize);
1471 #endif
1472         /* update the bitmap content */
1473         SDL_UnlockYUVOverlay(vp->bmp);
1474
1475         vp->pts = pts;
1476         vp->pos = pos;
1477
1478         /* now we can update the picture count */
1479         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1480             is->pictq_windex = 0;
1481         SDL_LockMutex(is->pictq_mutex);
1482         vp->target_clock= compute_target_time(vp->pts, is);
1483
1484         is->pictq_size++;
1485         SDL_UnlockMutex(is->pictq_mutex);
1486     }
1487     return 0;
1488 }
1489
1490 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1491 {
1492     int len1, got_picture, i;
1493
1494     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1495         return -1;
1496
1497     if (pkt->data == flush_pkt.data) {
1498         avcodec_flush_buffers(is->video_st->codec);
1499
1500         SDL_LockMutex(is->pictq_mutex);
1501         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1502         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1503             is->pictq[i].target_clock= 0;
1504         }
1505         while (is->pictq_size && !is->videoq.abort_request) {
1506             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1507         }
1508         is->video_current_pos = -1;
1509         SDL_UnlockMutex(is->pictq_mutex);
1510
1511         is->frame_last_pts = AV_NOPTS_VALUE;
1512         is->frame_last_delay = 0;
1513         is->frame_timer = (double)av_gettime() / 1000000.0;
1514         is->skip_frames = 1;
1515         is->skip_frames_index = 0;
1516         return 0;
1517     }
1518
1519     len1 = avcodec_decode_video2(is->video_st->codec,
1520                                  frame, &got_picture,
1521                                  pkt);
1522
1523     if (got_picture) {
1524         if (decoder_reorder_pts == -1) {
1525             *pts = frame->best_effort_timestamp;
1526         } else if (decoder_reorder_pts) {
1527             *pts = frame->pkt_pts;
1528         } else {
1529             *pts = frame->pkt_dts;
1530         }
1531
1532         if (*pts == AV_NOPTS_VALUE) {
1533             *pts = 0;
1534         }
1535
1536         is->skip_frames_index += 1;
1537         if(is->skip_frames_index >= is->skip_frames){
1538             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1539             return 1;
1540         }
1541
1542     }
1543     return 0;
1544 }
1545
1546 #if CONFIG_AVFILTER
1547 typedef struct {
1548     VideoState *is;
1549     AVFrame *frame;
1550     int use_dr1;
1551 } FilterPriv;
1552
1553 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1554 {
1555     AVFilterContext *ctx = codec->opaque;
1556     AVFilterBufferRef  *ref;
1557     int perms = AV_PERM_WRITE;
1558     int i, w, h, stride[4];
1559     unsigned edge;
1560     int pixel_size;
1561
1562     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1563
1564     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1565         perms |= AV_PERM_NEG_LINESIZES;
1566
1567     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1568         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1569         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1570         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1571     }
1572     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1573
1574     w = codec->width;
1575     h = codec->height;
1576
1577     if(av_image_check_size(w, h, 0, codec))
1578         return -1;
1579
1580     avcodec_align_dimensions2(codec, &w, &h, stride);
1581     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1582     w += edge << 1;
1583     h += edge << 1;
1584
1585     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1586         return -1;
1587
1588     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1589     ref->video->w = codec->width;
1590     ref->video->h = codec->height;
1591     for(i = 0; i < 4; i ++) {
1592         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1593         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1594
1595         if (ref->data[i]) {
1596             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1597         }
1598         pic->data[i]     = ref->data[i];
1599         pic->linesize[i] = ref->linesize[i];
1600     }
1601     pic->opaque = ref;
1602     pic->age    = INT_MAX;
1603     pic->type   = FF_BUFFER_TYPE_USER;
1604     pic->reordered_opaque = codec->reordered_opaque;
1605     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1606     else           pic->pkt_pts = AV_NOPTS_VALUE;
1607     return 0;
1608 }
1609
1610 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1611 {
1612     memset(pic->data, 0, sizeof(pic->data));
1613     avfilter_unref_buffer(pic->opaque);
1614 }
1615
1616 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1617 {
1618     AVFilterBufferRef *ref = pic->opaque;
1619
1620     if (pic->data[0] == NULL) {
1621         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1622         return codec->get_buffer(codec, pic);
1623     }
1624
1625     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1626         (codec->pix_fmt != ref->format)) {
1627         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1628         return -1;
1629     }
1630
1631     pic->reordered_opaque = codec->reordered_opaque;
1632     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1633     else           pic->pkt_pts = AV_NOPTS_VALUE;
1634     return 0;
1635 }
1636
1637 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1638 {
1639     FilterPriv *priv = ctx->priv;
1640     AVCodecContext *codec;
1641     if(!opaque) return -1;
1642
1643     priv->is = opaque;
1644     codec    = priv->is->video_st->codec;
1645     codec->opaque = ctx;
1646     if((codec->codec->capabilities & CODEC_CAP_DR1)
1647     ) {
1648         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1649         priv->use_dr1 = 1;
1650         codec->get_buffer     = input_get_buffer;
1651         codec->release_buffer = input_release_buffer;
1652         codec->reget_buffer   = input_reget_buffer;
1653         codec->thread_safe_callbacks = 1;
1654     }
1655
1656     priv->frame = avcodec_alloc_frame();
1657
1658     return 0;
1659 }
1660
1661 static void input_uninit(AVFilterContext *ctx)
1662 {
1663     FilterPriv *priv = ctx->priv;
1664     av_free(priv->frame);
1665 }
1666
1667 static int input_request_frame(AVFilterLink *link)
1668 {
1669     FilterPriv *priv = link->src->priv;
1670     AVFilterBufferRef *picref;
1671     int64_t pts = 0;
1672     AVPacket pkt;
1673     int ret;
1674
1675     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1676         av_free_packet(&pkt);
1677     if (ret < 0)
1678         return -1;
1679
1680     if(priv->use_dr1) {
1681         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1682     } else {
1683         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1684         av_image_copy(picref->data, picref->linesize,
1685                       priv->frame->data, priv->frame->linesize,
1686                       picref->format, link->w, link->h);
1687     }
1688     av_free_packet(&pkt);
1689
1690     avfilter_copy_frame_props(picref, priv->frame);
1691     picref->pts = pts;
1692
1693     avfilter_start_frame(link, picref);
1694     avfilter_draw_slice(link, 0, link->h, 1);
1695     avfilter_end_frame(link);
1696
1697     return 0;
1698 }
1699
1700 static int input_query_formats(AVFilterContext *ctx)
1701 {
1702     FilterPriv *priv = ctx->priv;
1703     enum PixelFormat pix_fmts[] = {
1704         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1705     };
1706
1707     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1708     return 0;
1709 }
1710
1711 static int input_config_props(AVFilterLink *link)
1712 {
1713     FilterPriv *priv  = link->src->priv;
1714     AVCodecContext *c = priv->is->video_st->codec;
1715
1716     link->w = c->width;
1717     link->h = c->height;
1718     link->time_base = priv->is->video_st->time_base;
1719
1720     return 0;
1721 }
1722
1723 static AVFilter input_filter =
1724 {
1725     .name      = "ffplay_input",
1726
1727     .priv_size = sizeof(FilterPriv),
1728
1729     .init      = input_init,
1730     .uninit    = input_uninit,
1731
1732     .query_formats = input_query_formats,
1733
1734     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1735     .outputs   = (AVFilterPad[]) {{ .name = "default",
1736                                     .type = AVMEDIA_TYPE_VIDEO,
1737                                     .request_frame = input_request_frame,
1738                                     .config_props  = input_config_props, },
1739                                   { .name = NULL }},
1740 };
1741
1742 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1743 {
1744     char sws_flags_str[128];
1745     int ret;
1746     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1747     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1748     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1749     graph->scale_sws_opts = av_strdup(sws_flags_str);
1750
1751     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1752                                             NULL, is, graph)) < 0)
1753         goto the_end;
1754     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1755                                             NULL, &ffsink_ctx, graph)) < 0)
1756         goto the_end;
1757
1758     if(vfilters) {
1759         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1760         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1761
1762         outputs->name    = av_strdup("in");
1763         outputs->filter_ctx = filt_src;
1764         outputs->pad_idx = 0;
1765         outputs->next    = NULL;
1766
1767         inputs->name    = av_strdup("out");
1768         inputs->filter_ctx = filt_out;
1769         inputs->pad_idx = 0;
1770         inputs->next    = NULL;
1771
1772         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1773             goto the_end;
1774         av_freep(&vfilters);
1775     } else {
1776         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1777             goto the_end;
1778     }
1779
1780     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1781         goto the_end;
1782
1783     is->out_video_filter = filt_out;
1784 the_end:
1785     return ret;
1786 }
1787
1788 #endif  /* CONFIG_AVFILTER */
1789
1790 static int video_thread(void *arg)
1791 {
1792     VideoState *is = arg;
1793     AVFrame *frame= avcodec_alloc_frame();
1794     int64_t pts_int, pos;
1795     double pts;
1796     int ret;
1797
1798 #if CONFIG_AVFILTER
1799     AVFilterGraph *graph = avfilter_graph_alloc();
1800     AVFilterContext *filt_out = NULL;
1801
1802     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1803         goto the_end;
1804     filt_out = is->out_video_filter;
1805 #endif
1806
1807     for(;;) {
1808 #if !CONFIG_AVFILTER
1809         AVPacket pkt;
1810 #else
1811         AVFilterBufferRef *picref;
1812         AVRational tb;
1813 #endif
1814         while (is->paused && !is->videoq.abort_request)
1815             SDL_Delay(10);
1816 #if CONFIG_AVFILTER
1817         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1818         if (picref) {
1819             pts_int = picref->pts;
1820             pos     = picref->pos;
1821             frame->opaque = picref;
1822         }
1823
1824         if (av_cmp_q(tb, is->video_st->time_base)) {
1825             av_unused int64_t pts1 = pts_int;
1826             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1827             av_dlog(NULL, "video_thread(): "
1828                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1829                     tb.num, tb.den, pts1,
1830                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1831         }
1832 #else
1833         ret = get_video_frame(is, frame, &pts_int, &pkt);
1834         pos = pkt.pos;
1835         av_free_packet(&pkt);
1836 #endif
1837
1838         if (ret < 0) goto the_end;
1839
1840         if (!ret)
1841             continue;
1842
1843         pts = pts_int*av_q2d(is->video_st->time_base);
1844
1845         ret = queue_picture(is, frame, pts, pos);
1846
1847         if (ret < 0)
1848             goto the_end;
1849
1850         if (step)
1851             if (cur_stream)
1852                 stream_toggle_pause(cur_stream);
1853     }
1854  the_end:
1855 #if CONFIG_AVFILTER
1856     avfilter_graph_free(&graph);
1857 #endif
1858     av_free(frame);
1859     return 0;
1860 }
1861
1862 static int subtitle_thread(void *arg)
1863 {
1864     VideoState *is = arg;
1865     SubPicture *sp;
1866     AVPacket pkt1, *pkt = &pkt1;
1867     int len1, got_subtitle;
1868     double pts;
1869     int i, j;
1870     int r, g, b, y, u, v, a;
1871
1872     for(;;) {
1873         while (is->paused && !is->subtitleq.abort_request) {
1874             SDL_Delay(10);
1875         }
1876         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1877             break;
1878
1879         if(pkt->data == flush_pkt.data){
1880             avcodec_flush_buffers(is->subtitle_st->codec);
1881             continue;
1882         }
1883         SDL_LockMutex(is->subpq_mutex);
1884         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1885                !is->subtitleq.abort_request) {
1886             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1887         }
1888         SDL_UnlockMutex(is->subpq_mutex);
1889
1890         if (is->subtitleq.abort_request)
1891             goto the_end;
1892
1893         sp = &is->subpq[is->subpq_windex];
1894
1895        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1896            this packet, if any */
1897         pts = 0;
1898         if (pkt->pts != AV_NOPTS_VALUE)
1899             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1900
1901         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1902                                     &sp->sub, &got_subtitle,
1903                                     pkt);
1904 //            if (len1 < 0)
1905 //                break;
1906         if (got_subtitle && sp->sub.format == 0) {
1907             sp->pts = pts;
1908
1909             for (i = 0; i < sp->sub.num_rects; i++)
1910             {
1911                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1912                 {
1913                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1914                     y = RGB_TO_Y_CCIR(r, g, b);
1915                     u = RGB_TO_U_CCIR(r, g, b, 0);
1916                     v = RGB_TO_V_CCIR(r, g, b, 0);
1917                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1918                 }
1919             }
1920
1921             /* now we can update the picture count */
1922             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1923                 is->subpq_windex = 0;
1924             SDL_LockMutex(is->subpq_mutex);
1925             is->subpq_size++;
1926             SDL_UnlockMutex(is->subpq_mutex);
1927         }
1928         av_free_packet(pkt);
1929 //        if (step)
1930 //            if (cur_stream)
1931 //                stream_toggle_pause(cur_stream);
1932     }
1933  the_end:
1934     return 0;
1935 }
1936
1937 /* copy samples for viewing in editor window */
1938 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1939 {
1940     int size, len;
1941
1942     size = samples_size / sizeof(short);
1943     while (size > 0) {
1944         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1945         if (len > size)
1946             len = size;
1947         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1948         samples += len;
1949         is->sample_array_index += len;
1950         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1951             is->sample_array_index = 0;
1952         size -= len;
1953     }
1954 }
1955
1956 /* return the new audio buffer size (samples can be added or deleted
1957    to get better sync if video or external master clock) */
1958 static int synchronize_audio(VideoState *is, short *samples,
1959                              int samples_size1, double pts)
1960 {
1961     int n, samples_size;
1962     double ref_clock;
1963
1964     n = 2 * is->audio_st->codec->channels;
1965     samples_size = samples_size1;
1966
1967     /* if not master, then we try to remove or add samples to correct the clock */
1968     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1969          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1970         double diff, avg_diff;
1971         int wanted_size, min_size, max_size, nb_samples;
1972
1973         ref_clock = get_master_clock(is);
1974         diff = get_audio_clock(is) - ref_clock;
1975
1976         if (diff < AV_NOSYNC_THRESHOLD) {
1977             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1978             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1979                 /* not enough measures to have a correct estimate */
1980                 is->audio_diff_avg_count++;
1981             } else {
1982                 /* estimate the A-V difference */
1983                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1984
1985                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1986                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1987                     nb_samples = samples_size / n;
1988
1989                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1990                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1991                     if (wanted_size < min_size)
1992                         wanted_size = min_size;
1993                     else if (wanted_size > max_size)
1994                         wanted_size = max_size;
1995
1996                     /* add or remove samples to correction the synchro */
1997                     if (wanted_size < samples_size) {
1998                         /* remove samples */
1999                         samples_size = wanted_size;
2000                     } else if (wanted_size > samples_size) {
2001                         uint8_t *samples_end, *q;
2002                         int nb;
2003
2004                         /* add samples */
2005                         nb = (samples_size - wanted_size);
2006                         samples_end = (uint8_t *)samples + samples_size - n;
2007                         q = samples_end + n;
2008                         while (nb > 0) {
2009                             memcpy(q, samples_end, n);
2010                             q += n;
2011                             nb -= n;
2012                         }
2013                         samples_size = wanted_size;
2014                     }
2015                 }
2016 #if 0
2017                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2018                        diff, avg_diff, samples_size - samples_size1,
2019                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2020 #endif
2021             }
2022         } else {
2023             /* too big difference : may be initial PTS errors, so
2024                reset A-V filter */
2025             is->audio_diff_avg_count = 0;
2026             is->audio_diff_cum = 0;
2027         }
2028     }
2029
2030     return samples_size;
2031 }
2032
2033 /* decode one audio frame and returns its uncompressed size */
2034 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2035 {
2036     AVPacket *pkt_temp = &is->audio_pkt_temp;
2037     AVPacket *pkt = &is->audio_pkt;
2038     AVCodecContext *dec= is->audio_st->codec;
2039     int n, len1, data_size;
2040     double pts;
2041
2042     for(;;) {
2043         /* NOTE: the audio packet can contain several frames */
2044         while (pkt_temp->size > 0) {
2045             data_size = sizeof(is->audio_buf1);
2046             len1 = avcodec_decode_audio3(dec,
2047                                         (int16_t *)is->audio_buf1, &data_size,
2048                                         pkt_temp);
2049             if (len1 < 0) {
2050                 /* if error, we skip the frame */
2051                 pkt_temp->size = 0;
2052                 break;
2053             }
2054
2055             pkt_temp->data += len1;
2056             pkt_temp->size -= len1;
2057             if (data_size <= 0)
2058                 continue;
2059
2060             if (dec->sample_fmt != is->audio_src_fmt) {
2061                 if (is->reformat_ctx)
2062                     av_audio_convert_free(is->reformat_ctx);
2063                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2064                                                          dec->sample_fmt, 1, NULL, 0);
2065                 if (!is->reformat_ctx) {
2066                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2067                         av_get_sample_fmt_name(dec->sample_fmt),
2068                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2069                         break;
2070                 }
2071                 is->audio_src_fmt= dec->sample_fmt;
2072             }
2073
2074             if (is->reformat_ctx) {
2075                 const void *ibuf[6]= {is->audio_buf1};
2076                 void *obuf[6]= {is->audio_buf2};
2077                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2078                 int ostride[6]= {2};
2079                 int len= data_size/istride[0];
2080                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2081                     printf("av_audio_convert() failed\n");
2082                     break;
2083                 }
2084                 is->audio_buf= is->audio_buf2;
2085                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2086                           remove this legacy cruft */
2087                 data_size= len*2;
2088             }else{
2089                 is->audio_buf= is->audio_buf1;
2090             }
2091
2092             /* if no pts, then compute it */
2093             pts = is->audio_clock;
2094             *pts_ptr = pts;
2095             n = 2 * dec->channels;
2096             is->audio_clock += (double)data_size /
2097                 (double)(n * dec->sample_rate);
2098 #if defined(DEBUG_SYNC)
2099             {
2100                 static double last_clock;
2101                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2102                        is->audio_clock - last_clock,
2103                        is->audio_clock, pts);
2104                 last_clock = is->audio_clock;
2105             }
2106 #endif
2107             return data_size;
2108         }
2109
2110         /* free the current packet */
2111         if (pkt->data)
2112             av_free_packet(pkt);
2113
2114         if (is->paused || is->audioq.abort_request) {
2115             return -1;
2116         }
2117
2118         /* read next packet */
2119         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2120             return -1;
2121         if(pkt->data == flush_pkt.data){
2122             avcodec_flush_buffers(dec);
2123             continue;
2124         }
2125
2126         pkt_temp->data = pkt->data;
2127         pkt_temp->size = pkt->size;
2128
2129         /* if update the audio clock with the pts */
2130         if (pkt->pts != AV_NOPTS_VALUE) {
2131             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2132         }
2133     }
2134 }
2135
2136 /* prepare a new audio buffer */
2137 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2138 {
2139     VideoState *is = opaque;
2140     int audio_size, len1;
2141     double pts;
2142
2143     audio_callback_time = av_gettime();
2144
2145     while (len > 0) {
2146         if (is->audio_buf_index >= is->audio_buf_size) {
2147            audio_size = audio_decode_frame(is, &pts);
2148            if (audio_size < 0) {
2149                 /* if error, just output silence */
2150                is->audio_buf = is->audio_buf1;
2151                is->audio_buf_size = 1024;
2152                memset(is->audio_buf, 0, is->audio_buf_size);
2153            } else {
2154                if (is->show_mode != SHOW_MODE_VIDEO)
2155                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2156                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2157                                               pts);
2158                is->audio_buf_size = audio_size;
2159            }
2160            is->audio_buf_index = 0;
2161         }
2162         len1 = is->audio_buf_size - is->audio_buf_index;
2163         if (len1 > len)
2164             len1 = len;
2165         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2166         len -= len1;
2167         stream += len1;
2168         is->audio_buf_index += len1;
2169     }
2170 }
2171
2172 /* open a given stream. Return 0 if OK */
2173 static int stream_component_open(VideoState *is, int stream_index)
2174 {
2175     AVFormatContext *ic = is->ic;
2176     AVCodecContext *avctx;
2177     AVCodec *codec;
2178     SDL_AudioSpec wanted_spec, spec;
2179
2180     if (stream_index < 0 || stream_index >= ic->nb_streams)
2181         return -1;
2182     avctx = ic->streams[stream_index]->codec;
2183
2184     /* prepare audio output */
2185     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2186         if (avctx->channels > 0) {
2187             avctx->request_channels = FFMIN(2, avctx->channels);
2188         } else {
2189             avctx->request_channels = 2;
2190         }
2191     }
2192
2193     codec = avcodec_find_decoder(avctx->codec_id);
2194     avctx->debug_mv = debug_mv;
2195     avctx->debug = debug;
2196     avctx->workaround_bugs = workaround_bugs;
2197     avctx->lowres = lowres;
2198     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2199     avctx->idct_algo= idct;
2200     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2201     avctx->skip_frame= skip_frame;
2202     avctx->skip_idct= skip_idct;
2203     avctx->skip_loop_filter= skip_loop_filter;
2204     avctx->error_recognition= error_recognition;
2205     avctx->error_concealment= error_concealment;
2206     avctx->thread_count= thread_count;
2207
2208     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2209
2210     if(codec->capabilities & CODEC_CAP_DR1)
2211         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2212
2213     if (!codec ||
2214         avcodec_open(avctx, codec) < 0)
2215         return -1;
2216
2217     /* prepare audio output */
2218     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2219         wanted_spec.freq = avctx->sample_rate;
2220         wanted_spec.format = AUDIO_S16SYS;
2221         wanted_spec.channels = avctx->channels;
2222         wanted_spec.silence = 0;
2223         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2224         wanted_spec.callback = sdl_audio_callback;
2225         wanted_spec.userdata = is;
2226         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2227             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2228             return -1;
2229         }
2230         is->audio_hw_buf_size = spec.size;
2231         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2232     }
2233
2234     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2235     switch(avctx->codec_type) {
2236     case AVMEDIA_TYPE_AUDIO:
2237         is->audio_stream = stream_index;
2238         is->audio_st = ic->streams[stream_index];
2239         is->audio_buf_size = 0;
2240         is->audio_buf_index = 0;
2241
2242         /* init averaging filter */
2243         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2244         is->audio_diff_avg_count = 0;
2245         /* since we do not have a precise anough audio fifo fullness,
2246            we correct audio sync only if larger than this threshold */
2247         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2248
2249         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2250         packet_queue_init(&is->audioq);
2251         SDL_PauseAudio(0);
2252         break;
2253     case AVMEDIA_TYPE_VIDEO:
2254         is->video_stream = stream_index;
2255         is->video_st = ic->streams[stream_index];
2256
2257 //        is->video_current_pts_time = av_gettime();
2258
2259         packet_queue_init(&is->videoq);
2260         is->video_tid = SDL_CreateThread(video_thread, is);
2261         break;
2262     case AVMEDIA_TYPE_SUBTITLE:
2263         is->subtitle_stream = stream_index;
2264         is->subtitle_st = ic->streams[stream_index];
2265         packet_queue_init(&is->subtitleq);
2266
2267         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2268         break;
2269     default:
2270         break;
2271     }
2272     return 0;
2273 }
2274
2275 static void stream_component_close(VideoState *is, int stream_index)
2276 {
2277     AVFormatContext *ic = is->ic;
2278     AVCodecContext *avctx;
2279
2280     if (stream_index < 0 || stream_index >= ic->nb_streams)
2281         return;
2282     avctx = ic->streams[stream_index]->codec;
2283
2284     switch(avctx->codec_type) {
2285     case AVMEDIA_TYPE_AUDIO:
2286         packet_queue_abort(&is->audioq);
2287
2288         SDL_CloseAudio();
2289
2290         packet_queue_end(&is->audioq);
2291         if (is->reformat_ctx)
2292             av_audio_convert_free(is->reformat_ctx);
2293         is->reformat_ctx = NULL;
2294         break;
2295     case AVMEDIA_TYPE_VIDEO:
2296         packet_queue_abort(&is->videoq);
2297
2298         /* note: we also signal this mutex to make sure we deblock the
2299            video thread in all cases */
2300         SDL_LockMutex(is->pictq_mutex);
2301         SDL_CondSignal(is->pictq_cond);
2302         SDL_UnlockMutex(is->pictq_mutex);
2303
2304         SDL_WaitThread(is->video_tid, NULL);
2305
2306         packet_queue_end(&is->videoq);
2307         break;
2308     case AVMEDIA_TYPE_SUBTITLE:
2309         packet_queue_abort(&is->subtitleq);
2310
2311         /* note: we also signal this mutex to make sure we deblock the
2312            video thread in all cases */
2313         SDL_LockMutex(is->subpq_mutex);
2314         is->subtitle_stream_changed = 1;
2315
2316         SDL_CondSignal(is->subpq_cond);
2317         SDL_UnlockMutex(is->subpq_mutex);
2318
2319         SDL_WaitThread(is->subtitle_tid, NULL);
2320
2321         packet_queue_end(&is->subtitleq);
2322         break;
2323     default:
2324         break;
2325     }
2326
2327     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2328     avcodec_close(avctx);
2329     switch(avctx->codec_type) {
2330     case AVMEDIA_TYPE_AUDIO:
2331         is->audio_st = NULL;
2332         is->audio_stream = -1;
2333         break;
2334     case AVMEDIA_TYPE_VIDEO:
2335         is->video_st = NULL;
2336         is->video_stream = -1;
2337         break;
2338     case AVMEDIA_TYPE_SUBTITLE:
2339         is->subtitle_st = NULL;
2340         is->subtitle_stream = -1;
2341         break;
2342     default:
2343         break;
2344     }
2345 }
2346
2347 /* since we have only one decoding thread, we can use a global
2348    variable instead of a thread local variable */
2349 static VideoState *global_video_state;
2350
2351 static int decode_interrupt_cb(void)
2352 {
2353     return (global_video_state && global_video_state->abort_request);
2354 }
2355
2356 /* this thread gets the stream from the disk or the network */
2357 static int read_thread(void *arg)
2358 {
2359     VideoState *is = arg;
2360     AVFormatContext *ic;
2361     int err, i, ret;
2362     int st_index[AVMEDIA_TYPE_NB];
2363     AVPacket pkt1, *pkt = &pkt1;
2364     AVFormatParameters params, *ap = &params;
2365     int eof=0;
2366     int pkt_in_play_range = 0;
2367
2368     ic = avformat_alloc_context();
2369
2370     memset(st_index, -1, sizeof(st_index));
2371     is->video_stream = -1;
2372     is->audio_stream = -1;
2373     is->subtitle_stream = -1;
2374
2375     global_video_state = is;
2376     avio_set_interrupt_cb(decode_interrupt_cb);
2377
2378     memset(ap, 0, sizeof(*ap));
2379
2380     ap->prealloced_context = 1;
2381     ap->width = frame_width;
2382     ap->height= frame_height;
2383     ap->time_base= (AVRational){1, 25};
2384     ap->pix_fmt = frame_pix_fmt;
2385     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2386
2387
2388     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2389     if (err >= 0) {
2390         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2391         err = av_demuxer_open(ic, ap);
2392         if(err < 0){
2393             avformat_free_context(ic);
2394             ic= NULL;
2395         }
2396     }
2397     if (err < 0) {
2398         print_error(is->filename, err);
2399         ret = -1;
2400         goto fail;
2401     }
2402     is->ic = ic;
2403
2404     if(genpts)
2405         ic->flags |= AVFMT_FLAG_GENPTS;
2406
2407     err = av_find_stream_info(ic);
2408     if (err < 0) {
2409         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2410         ret = -1;
2411         goto fail;
2412     }
2413     if(ic->pb)
2414         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2415
2416     if(seek_by_bytes<0)
2417         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2418
2419     /* if seeking requested, we execute it */
2420     if (start_time != AV_NOPTS_VALUE) {
2421         int64_t timestamp;
2422
2423         timestamp = start_time;
2424         /* add the stream start time */
2425         if (ic->start_time != AV_NOPTS_VALUE)
2426             timestamp += ic->start_time;
2427         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2428         if (ret < 0) {
2429             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2430                     is->filename, (double)timestamp / AV_TIME_BASE);
2431         }
2432     }
2433
2434     for (i = 0; i < ic->nb_streams; i++)
2435         ic->streams[i]->discard = AVDISCARD_ALL;
2436     if (!video_disable)
2437         st_index[AVMEDIA_TYPE_VIDEO] =
2438             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2439                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2440     if (!audio_disable)
2441         st_index[AVMEDIA_TYPE_AUDIO] =
2442             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2443                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2444                                 st_index[AVMEDIA_TYPE_VIDEO],
2445                                 NULL, 0);
2446     if (!video_disable)
2447         st_index[AVMEDIA_TYPE_SUBTITLE] =
2448             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2449                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2450                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2451                                  st_index[AVMEDIA_TYPE_AUDIO] :
2452                                  st_index[AVMEDIA_TYPE_VIDEO]),
2453                                 NULL, 0);
2454     if (show_status) {
2455         av_dump_format(ic, 0, is->filename, 0);
2456     }
2457
2458     is->show_mode = show_mode;
2459
2460     /* open the streams */
2461     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2462         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2463     }
2464
2465     ret=-1;
2466     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2467         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2468     }
2469     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2470     if (is->show_mode == SHOW_MODE_NONE)
2471         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2472
2473     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2474         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2475     }
2476
2477     if (is->video_stream < 0 && is->audio_stream < 0) {
2478         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2479         ret = -1;
2480         goto fail;
2481     }
2482
2483     for(;;) {
2484         if (is->abort_request)
2485             break;
2486         if (is->paused != is->last_paused) {
2487             is->last_paused = is->paused;
2488             if (is->paused)
2489                 is->read_pause_return= av_read_pause(ic);
2490             else
2491                 av_read_play(ic);
2492         }
2493 #if CONFIG_RTSP_DEMUXER
2494         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2495             /* wait 10 ms to avoid trying to get another packet */
2496             /* XXX: horrible */
2497             SDL_Delay(10);
2498             continue;
2499         }
2500 #endif
2501         if (is->seek_req) {
2502             int64_t seek_target= is->seek_pos;
2503             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2504             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2505 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2506 //      of the seek_pos/seek_rel variables
2507
2508             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2509             if (ret < 0) {
2510                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2511             }else{
2512                 if (is->audio_stream >= 0) {
2513                     packet_queue_flush(&is->audioq);
2514                     packet_queue_put(&is->audioq, &flush_pkt);
2515                 }
2516                 if (is->subtitle_stream >= 0) {
2517                     packet_queue_flush(&is->subtitleq);
2518                     packet_queue_put(&is->subtitleq, &flush_pkt);
2519                 }
2520                 if (is->video_stream >= 0) {
2521                     packet_queue_flush(&is->videoq);
2522                     packet_queue_put(&is->videoq, &flush_pkt);
2523                 }
2524             }
2525             is->seek_req = 0;
2526             eof= 0;
2527         }
2528
2529         /* if the queue are full, no need to read more */
2530         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2531             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2532                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2533                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2534             /* wait 10 ms */
2535             SDL_Delay(10);
2536             continue;
2537         }
2538         if(eof) {
2539             if(is->video_stream >= 0){
2540                 av_init_packet(pkt);
2541                 pkt->data=NULL;
2542                 pkt->size=0;
2543                 pkt->stream_index= is->video_stream;
2544                 packet_queue_put(&is->videoq, pkt);
2545             }
2546             SDL_Delay(10);
2547             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2548                 if(loop!=1 && (!loop || --loop)){
2549                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2550                 }else if(autoexit){
2551                     ret=AVERROR_EOF;
2552                     goto fail;
2553                 }
2554             }
2555             eof=0;
2556             continue;
2557         }
2558         ret = av_read_frame(ic, pkt);
2559         if (ret < 0) {
2560             if (ret == AVERROR_EOF || url_feof(ic->pb))
2561                 eof=1;
2562             if (ic->pb && ic->pb->error)
2563                 break;
2564             SDL_Delay(100); /* wait for user event */
2565             continue;
2566         }
2567         /* check if packet is in play range specified by user, then queue, otherwise discard */
2568         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2569                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2570                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2571                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2572                 <= ((double)duration/1000000);
2573         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2574             packet_queue_put(&is->audioq, pkt);
2575         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2576             packet_queue_put(&is->videoq, pkt);
2577         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2578             packet_queue_put(&is->subtitleq, pkt);
2579         } else {
2580             av_free_packet(pkt);
2581         }
2582     }
2583     /* wait until the end */
2584     while (!is->abort_request) {
2585         SDL_Delay(100);
2586     }
2587
2588     ret = 0;
2589  fail:
2590     /* disable interrupting */
2591     global_video_state = NULL;
2592
2593     /* close each stream */
2594     if (is->audio_stream >= 0)
2595         stream_component_close(is, is->audio_stream);
2596     if (is->video_stream >= 0)
2597         stream_component_close(is, is->video_stream);
2598     if (is->subtitle_stream >= 0)
2599         stream_component_close(is, is->subtitle_stream);
2600     if (is->ic) {
2601         av_close_input_file(is->ic);
2602         is->ic = NULL; /* safety */
2603     }
2604     avio_set_interrupt_cb(NULL);
2605
2606     if (ret != 0) {
2607         SDL_Event event;
2608
2609         event.type = FF_QUIT_EVENT;
2610         event.user.data1 = is;
2611         SDL_PushEvent(&event);
2612     }
2613     return 0;
2614 }
2615
2616 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2617 {
2618     VideoState *is;
2619
2620     is = av_mallocz(sizeof(VideoState));
2621     if (!is)
2622         return NULL;
2623     av_strlcpy(is->filename, filename, sizeof(is->filename));
2624     is->iformat = iformat;
2625     is->ytop = 0;
2626     is->xleft = 0;
2627
2628     /* start video display */
2629     is->pictq_mutex = SDL_CreateMutex();
2630     is->pictq_cond = SDL_CreateCond();
2631
2632     is->subpq_mutex = SDL_CreateMutex();
2633     is->subpq_cond = SDL_CreateCond();
2634
2635     is->av_sync_type = av_sync_type;
2636     is->read_tid = SDL_CreateThread(read_thread, is);
2637     if (!is->read_tid) {
2638         av_free(is);
2639         return NULL;
2640     }
2641     return is;
2642 }
2643
2644 static void stream_cycle_channel(VideoState *is, int codec_type)
2645 {
2646     AVFormatContext *ic = is->ic;
2647     int start_index, stream_index;
2648     AVStream *st;
2649
2650     if (codec_type == AVMEDIA_TYPE_VIDEO)
2651         start_index = is->video_stream;
2652     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2653         start_index = is->audio_stream;
2654     else
2655         start_index = is->subtitle_stream;
2656     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2657         return;
2658     stream_index = start_index;
2659     for(;;) {
2660         if (++stream_index >= is->ic->nb_streams)
2661         {
2662             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2663             {
2664                 stream_index = -1;
2665                 goto the_end;
2666             } else
2667                 stream_index = 0;
2668         }
2669         if (stream_index == start_index)
2670             return;
2671         st = ic->streams[stream_index];
2672         if (st->codec->codec_type == codec_type) {
2673             /* check that parameters are OK */
2674             switch(codec_type) {
2675             case AVMEDIA_TYPE_AUDIO:
2676                 if (st->codec->sample_rate != 0 &&
2677                     st->codec->channels != 0)
2678                     goto the_end;
2679                 break;
2680             case AVMEDIA_TYPE_VIDEO:
2681             case AVMEDIA_TYPE_SUBTITLE:
2682                 goto the_end;
2683             default:
2684                 break;
2685             }
2686         }
2687     }
2688  the_end:
2689     stream_component_close(is, start_index);
2690     stream_component_open(is, stream_index);
2691 }
2692
2693
2694 static void toggle_full_screen(void)
2695 {
2696     is_full_screen = !is_full_screen;
2697     if (!fs_screen_width) {
2698         /* use default SDL method */
2699 //        SDL_WM_ToggleFullScreen(screen);
2700     }
2701     video_open(cur_stream);
2702 }
2703
2704 static void toggle_pause(void)
2705 {
2706     if (cur_stream)
2707         stream_toggle_pause(cur_stream);
2708     step = 0;
2709 }
2710
2711 static void step_to_next_frame(void)
2712 {
2713     if (cur_stream) {
2714         /* if the stream is paused unpause it, then step */
2715         if (cur_stream->paused)
2716             stream_toggle_pause(cur_stream);
2717     }
2718     step = 1;
2719 }
2720
2721 static void toggle_audio_display(void)
2722 {
2723     if (cur_stream) {
2724         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2725         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2726         fill_rectangle(screen,
2727                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2728                     bgcolor);
2729         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2730     }
2731 }
2732
2733 /* handle an event sent by the GUI */
2734 static void event_loop(void)
2735 {
2736     SDL_Event event;
2737     double incr, pos, frac;
2738
2739     for(;;) {
2740         double x;
2741         SDL_WaitEvent(&event);
2742         switch(event.type) {
2743         case SDL_KEYDOWN:
2744             if (exit_on_keydown) {
2745                 do_exit();
2746                 break;
2747             }
2748             switch(event.key.keysym.sym) {
2749             case SDLK_ESCAPE:
2750             case SDLK_q:
2751                 do_exit();
2752                 break;
2753             case SDLK_f:
2754                 toggle_full_screen();
2755                 break;
2756             case SDLK_p:
2757             case SDLK_SPACE:
2758                 toggle_pause();
2759                 break;
2760             case SDLK_s: //S: Step to next frame
2761                 step_to_next_frame();
2762                 break;
2763             case SDLK_a:
2764                 if (cur_stream)
2765                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2766                 break;
2767             case SDLK_v:
2768                 if (cur_stream)
2769                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2770                 break;
2771             case SDLK_t:
2772                 if (cur_stream)
2773                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2774                 break;
2775             case SDLK_w:
2776                 toggle_audio_display();
2777                 break;
2778             case SDLK_LEFT:
2779                 incr = -10.0;
2780                 goto do_seek;
2781             case SDLK_RIGHT:
2782                 incr = 10.0;
2783                 goto do_seek;
2784             case SDLK_UP:
2785                 incr = 60.0;
2786                 goto do_seek;
2787             case SDLK_DOWN:
2788                 incr = -60.0;
2789             do_seek:
2790                 if (cur_stream) {
2791                     if (seek_by_bytes) {
2792                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2793                             pos= cur_stream->video_current_pos;
2794                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2795                             pos= cur_stream->audio_pkt.pos;
2796                         }else
2797                             pos = avio_tell(cur_stream->ic->pb);
2798                         if (cur_stream->ic->bit_rate)
2799                             incr *= cur_stream->ic->bit_rate / 8.0;
2800                         else
2801                             incr *= 180000.0;
2802                         pos += incr;
2803                         stream_seek(cur_stream, pos, incr, 1);
2804                     } else {
2805                         pos = get_master_clock(cur_stream);
2806                         pos += incr;
2807                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2808                     }
2809                 }
2810                 break;
2811             default:
2812                 break;
2813             }
2814             break;
2815         case SDL_MOUSEBUTTONDOWN:
2816             if (exit_on_mousedown) {
2817                 do_exit();
2818                 break;
2819             }
2820         case SDL_MOUSEMOTION:
2821             if(event.type ==SDL_MOUSEBUTTONDOWN){
2822                 x= event.button.x;
2823             }else{
2824                 if(event.motion.state != SDL_PRESSED)
2825                     break;
2826                 x= event.motion.x;
2827             }
2828             if (cur_stream) {
2829                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2830                     uint64_t size=  avio_size(cur_stream->ic->pb);
2831                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2832                 }else{
2833                     int64_t ts;
2834                     int ns, hh, mm, ss;
2835                     int tns, thh, tmm, tss;
2836                     tns = cur_stream->ic->duration/1000000LL;
2837                     thh = tns/3600;
2838                     tmm = (tns%3600)/60;
2839                     tss = (tns%60);
2840                     frac = x/cur_stream->width;
2841                     ns = frac*tns;
2842                     hh = ns/3600;
2843                     mm = (ns%3600)/60;
2844                     ss = (ns%60);
2845                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2846                             hh, mm, ss, thh, tmm, tss);
2847                     ts = frac*cur_stream->ic->duration;
2848                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2849                         ts += cur_stream->ic->start_time;
2850                     stream_seek(cur_stream, ts, 0, 0);
2851                 }
2852             }
2853             break;
2854         case SDL_VIDEORESIZE:
2855             if (cur_stream) {
2856                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2857                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2858                 screen_width = cur_stream->width = event.resize.w;
2859                 screen_height= cur_stream->height= event.resize.h;
2860             }
2861             break;
2862         case SDL_QUIT:
2863         case FF_QUIT_EVENT:
2864             do_exit();
2865             break;
2866         case FF_ALLOC_EVENT:
2867             video_open(event.user.data1);
2868             alloc_picture(event.user.data1);
2869             break;
2870         case FF_REFRESH_EVENT:
2871             video_refresh(event.user.data1);
2872             cur_stream->refresh=0;
2873             break;
2874         default:
2875             break;
2876         }
2877     }
2878 }
2879
2880 static void opt_frame_size(const char *arg)
2881 {
2882     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2883         fprintf(stderr, "Incorrect frame size\n");
2884         exit(1);
2885     }
2886     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2887         fprintf(stderr, "Frame size must be a multiple of 2\n");
2888         exit(1);
2889     }
2890 }
2891
2892 static int opt_width(const char *opt, const char *arg)
2893 {
2894     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2895     return 0;
2896 }
2897
2898 static int opt_height(const char *opt, const char *arg)
2899 {
2900     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2901     return 0;
2902 }
2903
2904 static void opt_format(const char *arg)
2905 {
2906     file_iformat = av_find_input_format(arg);
2907     if (!file_iformat) {
2908         fprintf(stderr, "Unknown input format: %s\n", arg);
2909         exit(1);
2910     }
2911 }
2912
2913 static void opt_frame_pix_fmt(const char *arg)
2914 {
2915     frame_pix_fmt = av_get_pix_fmt(arg);
2916 }
2917
2918 static int opt_sync(const char *opt, const char *arg)
2919 {
2920     if (!strcmp(arg, "audio"))
2921         av_sync_type = AV_SYNC_AUDIO_MASTER;
2922     else if (!strcmp(arg, "video"))
2923         av_sync_type = AV_SYNC_VIDEO_MASTER;
2924     else if (!strcmp(arg, "ext"))
2925         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2926     else {
2927         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2928         exit(1);
2929     }
2930     return 0;
2931 }
2932
2933 static int opt_seek(const char *opt, const char *arg)
2934 {
2935     start_time = parse_time_or_die(opt, arg, 1);
2936     return 0;
2937 }
2938
2939 static int opt_duration(const char *opt, const char *arg)
2940 {
2941     duration = parse_time_or_die(opt, arg, 1);
2942     return 0;
2943 }
2944
2945 static int opt_debug(const char *opt, const char *arg)
2946 {
2947     av_log_set_level(99);
2948     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2949     return 0;
2950 }
2951
2952 static int opt_vismv(const char *opt, const char *arg)
2953 {
2954     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2955     return 0;
2956 }
2957
2958 static int opt_thread_count(const char *opt, const char *arg)
2959 {
2960     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2961 #if !HAVE_THREADS
2962     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2963 #endif
2964     return 0;
2965 }
2966
2967 static int opt_show_mode(const char *opt, const char *arg)
2968 {
2969     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2970                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2971                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2972                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2973     return 0;
2974 }
2975
2976 static const OptionDef options[] = {
2977 #include "cmdutils_common_opts.h"
2978     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2979     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2980     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2981     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2982     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2983     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2984     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2985     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2986     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2987     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2988     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2989     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2990     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2991     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2992     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2993     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2994     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2995     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2996     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2997     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2998     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2999     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3000     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3001     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3002     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3003     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3004     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3005     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3006     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3007     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3008     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3009     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3010     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3011     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3012     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3013     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3014     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3015 #if CONFIG_AVFILTER
3016     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3017 #endif
3018     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3019     { "showmode", HAS_ARG | OPT_FUNC2, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3020     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3021     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3022     { NULL, },
3023 };
3024
3025 static void show_usage(void)
3026 {
3027     printf("Simple media player\n");
3028     printf("usage: ffplay [options] input_file\n");
3029     printf("\n");
3030 }
3031
3032 static void show_help(void)
3033 {
3034     av_log_set_callback(log_callback_help);
3035     show_usage();
3036     show_help_options(options, "Main options:\n",
3037                       OPT_EXPERT, 0);
3038     show_help_options(options, "\nAdvanced options:\n",
3039                       OPT_EXPERT, OPT_EXPERT);
3040     printf("\n");
3041     av_opt_show2(avcodec_opts[0], NULL,
3042                  AV_OPT_FLAG_DECODING_PARAM, 0);
3043     printf("\n");
3044     av_opt_show2(avformat_opts, NULL,
3045                  AV_OPT_FLAG_DECODING_PARAM, 0);
3046 #if !CONFIG_AVFILTER
3047     printf("\n");
3048     av_opt_show2(sws_opts, NULL,
3049                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3050 #endif
3051     printf("\nWhile playing:\n"
3052            "q, ESC              quit\n"
3053            "f                   toggle full screen\n"
3054            "p, SPC              pause\n"
3055            "a                   cycle audio channel\n"
3056            "v                   cycle video channel\n"
3057            "t                   cycle subtitle channel\n"
3058            "w                   show audio waves\n"
3059            "s                   activate frame-step mode\n"
3060            "left/right          seek backward/forward 10 seconds\n"
3061            "down/up             seek backward/forward 1 minute\n"
3062            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3063            );
3064 }
3065
3066 static void opt_input_file(const char *filename)
3067 {
3068     if (input_filename) {
3069         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3070                 filename, input_filename);
3071         exit(1);
3072     }
3073     if (!strcmp(filename, "-"))
3074         filename = "pipe:";
3075     input_filename = filename;
3076 }
3077
3078 /* Called from the main */
3079 int main(int argc, char **argv)
3080 {
3081     int flags;
3082
3083     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3084
3085     /* register all codecs, demux and protocols */
3086     avcodec_register_all();
3087 #if CONFIG_AVDEVICE
3088     avdevice_register_all();
3089 #endif
3090 #if CONFIG_AVFILTER
3091     avfilter_register_all();
3092 #endif
3093     av_register_all();
3094
3095     init_opts();
3096
3097     show_banner();
3098
3099     parse_options(argc, argv, options, opt_input_file);
3100
3101     if (!input_filename) {
3102         show_usage();
3103         fprintf(stderr, "An input file must be specified\n");
3104         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3105         exit(1);
3106     }
3107
3108     if (display_disable) {
3109         video_disable = 1;
3110     }
3111     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3112 #if !defined(__MINGW32__) && !defined(__APPLE__)
3113     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3114 #endif
3115     if (SDL_Init (flags)) {
3116         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3117         exit(1);
3118     }
3119
3120     if (!display_disable) {
3121 #if HAVE_SDL_VIDEO_SIZE
3122         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3123         fs_screen_width = vi->current_w;
3124         fs_screen_height = vi->current_h;
3125 #endif
3126     }
3127
3128     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3129     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3130     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3131
3132     av_init_packet(&flush_pkt);
3133     flush_pkt.data= "FLUSH";
3134
3135     cur_stream = stream_open(input_filename, file_iformat);
3136
3137     event_loop();
3138
3139     /* never returns */
3140
3141     return 0;
3142 }