]> git.sesse.net Git - ffmpeg/blob - ffplay.c
2e50920b9fddf8be67d852912af92dacb4623784
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavcore/imgutils.h"
32 #include "libavcore/parseutils.h"
33 #include "libavcore/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavcodec/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "FFplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG
62 //#define DEBUG_SYNC
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2*65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///<presentation time stamp for this picture
104     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105     int64_t pos;                                 ///<byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142     int dtg_active_format;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     int show_audio; /* if true, display audio samples */
171     int16_t sample_array[SAMPLE_ARRAY_SIZE];
172     int sample_array_index;
173     int last_i_start;
174     RDFTContext *rdft;
175     int rdft_bits;
176     FFTSample *rdft_data;
177     int xpos;
178
179     SDL_Thread *subtitle_tid;
180     int subtitle_stream;
181     int subtitle_stream_changed;
182     AVStream *subtitle_st;
183     PacketQueue subtitleq;
184     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185     int subpq_size, subpq_rindex, subpq_windex;
186     SDL_mutex *subpq_mutex;
187     SDL_cond *subpq_cond;
188
189     double frame_timer;
190     double frame_last_pts;
191     double frame_last_delay;
192     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193     int video_stream;
194     AVStream *video_st;
195     PacketQueue videoq;
196     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198     int64_t video_current_pos;                   ///<current displayed file pos
199     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200     int pictq_size, pictq_rindex, pictq_windex;
201     SDL_mutex *pictq_mutex;
202     SDL_cond *pictq_cond;
203 #if !CONFIG_AVFILTER
204     struct SwsContext *img_convert_ctx;
205 #endif
206
207     //    QETimer *video_timer;
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221 static int audio_write_get_buf_size(VideoState *is);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB]={
237     [AVMEDIA_TYPE_AUDIO]=-1,
238     [AVMEDIA_TYPE_VIDEO]=-1,
239     [AVMEDIA_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=1;
267
268 static int rdftspeed=20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291     memset(q, 0, sizeof(PacketQueue));
292     q->mutex = SDL_CreateMutex();
293     q->cond = SDL_CreateCond();
294     packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299     AVPacketList *pkt, *pkt1;
300
301     SDL_LockMutex(q->mutex);
302     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303         pkt1 = pkt->next;
304         av_free_packet(&pkt->pkt);
305         av_freep(&pkt);
306     }
307     q->last_pkt = NULL;
308     q->first_pkt = NULL;
309     q->nb_packets = 0;
310     q->size = 0;
311     SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316     packet_queue_flush(q);
317     SDL_DestroyMutex(q->mutex);
318     SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323     AVPacketList *pkt1;
324
325     /* duplicate the packet */
326     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327         return -1;
328
329     pkt1 = av_malloc(sizeof(AVPacketList));
330     if (!pkt1)
331         return -1;
332     pkt1->pkt = *pkt;
333     pkt1->next = NULL;
334
335
336     SDL_LockMutex(q->mutex);
337
338     if (!q->last_pkt)
339
340         q->first_pkt = pkt1;
341     else
342         q->last_pkt->next = pkt1;
343     q->last_pkt = pkt1;
344     q->nb_packets++;
345     q->size += pkt1->pkt.size + sizeof(*pkt1);
346     /* XXX: should duplicate packet data in DV case */
347     SDL_CondSignal(q->cond);
348
349     SDL_UnlockMutex(q->mutex);
350     return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for(;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #if 0
412 /* draw only the border of a rectangle */
413 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414 {
415     int w1, w2, h1, h2;
416
417     /* fill the background */
418     w1 = x;
419     if (w1 < 0)
420         w1 = 0;
421     w2 = s->width - (x + w);
422     if (w2 < 0)
423         w2 = 0;
424     h1 = y;
425     if (h1 < 0)
426         h1 = 0;
427     h2 = s->height - (y + h);
428     if (h2 < 0)
429         h2 = 0;
430     fill_rectangle(screen,
431                    s->xleft, s->ytop,
432                    w1, s->height,
433                    color);
434     fill_rectangle(screen,
435                    s->xleft + s->width - w2, s->ytop,
436                    w2, s->height,
437                    color);
438     fill_rectangle(screen,
439                    s->xleft + w1, s->ytop,
440                    s->width - w1 - w2, h1,
441                    color);
442     fill_rectangle(screen,
443                    s->xleft + w1, s->ytop + s->height - h2,
444                    s->width - w1 - w2, h2,
445                    color);
446 }
447 #endif
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454     unsigned int v = ((const uint32_t *)(s))[0];\
455     a = (v >> 24) & 0xff;\
456     r = (v >> 16) & 0xff;\
457     g = (v >> 8) & 0xff;\
458     b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464     a = (val >> 24) & 0xff;\
465     y = (val >> 16) & 0xff;\
466     u = (val >> 8) & 0xff;\
467     v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480     int wrap, wrap3, width2, skip2;
481     int y, u, v, a, u1, v1, a1, w, h;
482     uint8_t *lum, *cb, *cr;
483     const uint8_t *p;
484     const uint32_t *pal;
485     int dstx, dsty, dstw, dsth;
486
487     dstw = av_clip(rect->w, 0, imgw);
488     dsth = av_clip(rect->h, 0, imgh);
489     dstx = av_clip(rect->x, 0, imgw - dstw);
490     dsty = av_clip(rect->y, 0, imgh - dsth);
491     lum = dst->data[0] + dsty * dst->linesize[0];
492     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496     skip2 = dstx >> 1;
497     wrap = dst->linesize[0];
498     wrap3 = rect->pict.linesize[0];
499     p = rect->pict.data[0];
500     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501
502     if (dsty & 1) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512             cb++;
513             cr++;
514             lum++;
515             p += BPP;
516         }
517         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524             YUVA_IN(y, u, v, a, p + BPP, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += 2 * BPP;
534             lum += 2;
535         }
536         if (w) {
537             YUVA_IN(y, u, v, a, p, pal);
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541             p++;
542             lum++;
543         }
544         p += wrap3 - dstw * BPP;
545         lum += wrap - dstw - dstx;
546         cb += dst->linesize[1] - width2 - skip2;
547         cr += dst->linesize[2] - width2 - skip2;
548     }
549     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550         lum += dstx;
551         cb += skip2;
552         cr += skip2;
553
554         if (dstx & 1) {
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 = u;
557             v1 = v;
558             a1 = a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560             p += wrap3;
561             lum += wrap;
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 += u;
564             v1 += v;
565             a1 += a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569             cb++;
570             cr++;
571             p += -wrap3 + BPP;
572             lum += -wrap + 1;
573         }
574         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581             YUVA_IN(y, u, v, a, p + BPP, pal);
582             u1 += u;
583             v1 += v;
584             a1 += a;
585             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586             p += wrap3;
587             lum += wrap;
588
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595             YUVA_IN(y, u, v, a, p + BPP, pal);
596             u1 += u;
597             v1 += v;
598             a1 += a;
599             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604             cb++;
605             cr++;
606             p += -wrap3 + 2 * BPP;
607             lum += -wrap + 2;
608         }
609         if (w) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             p += wrap3;
616             lum += wrap;
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624             cb++;
625             cr++;
626             p += -wrap3 + BPP;
627             lum += -wrap + 1;
628         }
629         p += wrap3 + (wrap3 - dstw * BPP);
630         lum += wrap + (wrap - dstw - dstx);
631         cb += dst->linesize[1] - width2 - skip2;
632         cr += dst->linesize[2] - width2 - skip2;
633     }
634     /* handle odd height */
635     if (h) {
636         lum += dstx;
637         cb += skip2;
638         cr += skip2;
639
640         if (dstx & 1) {
641             YUVA_IN(y, u, v, a, p, pal);
642             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645             cb++;
646             cr++;
647             lum++;
648             p += BPP;
649         }
650         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 = u;
653             v1 = v;
654             a1 = a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657             YUVA_IN(y, u, v, a, p + BPP, pal);
658             u1 += u;
659             v1 += v;
660             a1 += a;
661             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664             cb++;
665             cr++;
666             p += 2 * BPP;
667             lum += 2;
668         }
669         if (w) {
670             YUVA_IN(y, u, v, a, p, pal);
671             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674         }
675     }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680     avsubtitle_free(&sp->sub);
681 }
682
683 static void video_image_display(VideoState *is)
684 {
685     VideoPicture *vp;
686     SubPicture *sp;
687     AVPicture pict;
688     float aspect_ratio;
689     int width, height, x, y;
690     SDL_Rect rect;
691     int i;
692
693     vp = &is->pictq[is->pictq_rindex];
694     if (vp->bmp) {
695 #if CONFIG_AVFILTER
696          if (vp->picref->video->pixel_aspect.num == 0)
697              aspect_ratio = 0;
698          else
699              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
700 #else
701
702         /* XXX: use variable in the frame */
703         if (is->video_st->sample_aspect_ratio.num)
704             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705         else if (is->video_st->codec->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707         else
708             aspect_ratio = 0;
709 #endif
710         if (aspect_ratio <= 0.0)
711             aspect_ratio = 1.0;
712         aspect_ratio *= (float)vp->width / (float)vp->height;
713         /* if an active format is indicated, then it overrides the
714            mpeg format */
715 #if 0
716         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
717             is->dtg_active_format = is->video_st->codec->dtg_active_format;
718             printf("dtg_active_format=%d\n", is->dtg_active_format);
719         }
720 #endif
721 #if 0
722         switch(is->video_st->codec->dtg_active_format) {
723         case FF_DTG_AFD_SAME:
724         default:
725             /* nothing to do */
726             break;
727         case FF_DTG_AFD_4_3:
728             aspect_ratio = 4.0 / 3.0;
729             break;
730         case FF_DTG_AFD_16_9:
731             aspect_ratio = 16.0 / 9.0;
732             break;
733         case FF_DTG_AFD_14_9:
734             aspect_ratio = 14.0 / 9.0;
735             break;
736         case FF_DTG_AFD_4_3_SP_14_9:
737             aspect_ratio = 14.0 / 9.0;
738             break;
739         case FF_DTG_AFD_16_9_SP_14_9:
740             aspect_ratio = 14.0 / 9.0;
741             break;
742         case FF_DTG_AFD_SP_4_3:
743             aspect_ratio = 4.0 / 3.0;
744             break;
745         }
746 #endif
747
748         if (is->subtitle_st)
749         {
750             if (is->subpq_size > 0)
751             {
752                 sp = &is->subpq[is->subpq_rindex];
753
754                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
755                 {
756                     SDL_LockYUVOverlay (vp->bmp);
757
758                     pict.data[0] = vp->bmp->pixels[0];
759                     pict.data[1] = vp->bmp->pixels[2];
760                     pict.data[2] = vp->bmp->pixels[1];
761
762                     pict.linesize[0] = vp->bmp->pitches[0];
763                     pict.linesize[1] = vp->bmp->pitches[2];
764                     pict.linesize[2] = vp->bmp->pitches[1];
765
766                     for (i = 0; i < sp->sub.num_rects; i++)
767                         blend_subrect(&pict, sp->sub.rects[i],
768                                       vp->bmp->w, vp->bmp->h);
769
770                     SDL_UnlockYUVOverlay (vp->bmp);
771                 }
772             }
773         }
774
775
776         /* XXX: we suppose the screen has a 1.0 pixel ratio */
777         height = is->height;
778         width = ((int)rint(height * aspect_ratio)) & ~1;
779         if (width > is->width) {
780             width = is->width;
781             height = ((int)rint(width / aspect_ratio)) & ~1;
782         }
783         x = (is->width - width) / 2;
784         y = (is->height - height) / 2;
785         if (!is->no_background) {
786             /* fill the background */
787             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
788         } else {
789             is->no_background = 0;
790         }
791         rect.x = is->xleft + x;
792         rect.y = is->ytop  + y;
793         rect.w = width;
794         rect.h = height;
795         SDL_DisplayYUVOverlay(vp->bmp, &rect);
796     } else {
797 #if 0
798         fill_rectangle(screen,
799                        is->xleft, is->ytop, is->width, is->height,
800                        QERGB(0x00, 0x00, 0x00));
801 #endif
802     }
803 }
804
805 static inline int compute_mod(int a, int b)
806 {
807     a = a % b;
808     if (a >= 0)
809         return a;
810     else
811         return a + b;
812 }
813
814 static void video_audio_display(VideoState *s)
815 {
816     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
817     int ch, channels, h, h2, bgcolor, fgcolor;
818     int16_t time_diff;
819     int rdft_bits, nb_freq;
820
821     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
822         ;
823     nb_freq= 1<<(rdft_bits-1);
824
825     /* compute display index : center on currently output samples */
826     channels = s->audio_st->codec->channels;
827     nb_display_channels = channels;
828     if (!s->paused) {
829         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
830         n = 2 * channels;
831         delay = audio_write_get_buf_size(s);
832         delay /= n;
833
834         /* to be more precise, we take into account the time spent since
835            the last buffer computation */
836         if (audio_callback_time) {
837             time_diff = av_gettime() - audio_callback_time;
838             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
839         }
840
841         delay += 2*data_used;
842         if (delay < data_used)
843             delay = data_used;
844
845         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
846         if(s->show_audio==1){
847             h= INT_MIN;
848             for(i=0; i<1000; i+=channels){
849                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
850                 int a= s->sample_array[idx];
851                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
852                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
853                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
854                 int score= a-d;
855                 if(h<score && (b^c)<0){
856                     h= score;
857                     i_start= idx;
858                 }
859             }
860         }
861
862         s->last_i_start = i_start;
863     } else {
864         i_start = s->last_i_start;
865     }
866
867     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
868     if(s->show_audio==1){
869         fill_rectangle(screen,
870                        s->xleft, s->ytop, s->width, s->height,
871                        bgcolor);
872
873         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
874
875         /* total height for one channel */
876         h = s->height / nb_display_channels;
877         /* graph height / 2 */
878         h2 = (h * 9) / 20;
879         for(ch = 0;ch < nb_display_channels; ch++) {
880             i = i_start + ch;
881             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
882             for(x = 0; x < s->width; x++) {
883                 y = (s->sample_array[i] * h2) >> 15;
884                 if (y < 0) {
885                     y = -y;
886                     ys = y1 - y;
887                 } else {
888                     ys = y1;
889                 }
890                 fill_rectangle(screen,
891                                s->xleft + x, ys, 1, y,
892                                fgcolor);
893                 i += channels;
894                 if (i >= SAMPLE_ARRAY_SIZE)
895                     i -= SAMPLE_ARRAY_SIZE;
896             }
897         }
898
899         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
900
901         for(ch = 1;ch < nb_display_channels; ch++) {
902             y = s->ytop + ch * h;
903             fill_rectangle(screen,
904                            s->xleft, y, s->width, 1,
905                            fgcolor);
906         }
907         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
908     }else{
909         nb_display_channels= FFMIN(nb_display_channels, 2);
910         if(rdft_bits != s->rdft_bits){
911             av_rdft_end(s->rdft);
912             av_free(s->rdft_data);
913             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
914             s->rdft_bits= rdft_bits;
915             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
916         }
917         {
918             FFTSample *data[2];
919             for(ch = 0;ch < nb_display_channels; ch++) {
920                 data[ch] = s->rdft_data + 2*nb_freq*ch;
921                 i = i_start + ch;
922                 for(x = 0; x < 2*nb_freq; x++) {
923                     double w= (x-nb_freq)*(1.0/nb_freq);
924                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
925                     i += channels;
926                     if (i >= SAMPLE_ARRAY_SIZE)
927                         i -= SAMPLE_ARRAY_SIZE;
928                 }
929                 av_rdft_calc(s->rdft, data[ch]);
930             }
931             //least efficient way to do this, we should of course directly access it but its more than fast enough
932             for(y=0; y<s->height; y++){
933                 double w= 1/sqrt(nb_freq);
934                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
935                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
936                        + data[1][2*y+1]*data[1][2*y+1])) : a;
937                 a= FFMIN(a,255);
938                 b= FFMIN(b,255);
939                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
940
941                 fill_rectangle(screen,
942                             s->xpos, s->height-y, 1, 1,
943                             fgcolor);
944             }
945         }
946         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
947         s->xpos++;
948         if(s->xpos >= s->width)
949             s->xpos= s->xleft;
950     }
951 }
952
953 static int video_open(VideoState *is){
954     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
955     int w,h;
956
957     if(is_full_screen) flags |= SDL_FULLSCREEN;
958     else               flags |= SDL_RESIZABLE;
959
960     if (is_full_screen && fs_screen_width) {
961         w = fs_screen_width;
962         h = fs_screen_height;
963     } else if(!is_full_screen && screen_width){
964         w = screen_width;
965         h = screen_height;
966 #if CONFIG_AVFILTER
967     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
968         w = is->out_video_filter->inputs[0]->w;
969         h = is->out_video_filter->inputs[0]->h;
970 #else
971     }else if (is->video_st && is->video_st->codec->width){
972         w = is->video_st->codec->width;
973         h = is->video_st->codec->height;
974 #endif
975     } else {
976         w = 640;
977         h = 480;
978     }
979     if(screen && is->width == screen->w && screen->w == w
980        && is->height== screen->h && screen->h == h)
981         return 0;
982
983 #ifndef __APPLE__
984     screen = SDL_SetVideoMode(w, h, 0, flags);
985 #else
986     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
987     screen = SDL_SetVideoMode(w, h, 24, flags);
988 #endif
989     if (!screen) {
990         fprintf(stderr, "SDL: could not set video mode - exiting\n");
991         return -1;
992     }
993     if (!window_title)
994         window_title = input_filename;
995     SDL_WM_SetCaption(window_title, window_title);
996
997     is->width = screen->w;
998     is->height = screen->h;
999
1000     return 0;
1001 }
1002
1003 /* display the current picture, if any */
1004 static void video_display(VideoState *is)
1005 {
1006     if(!screen)
1007         video_open(cur_stream);
1008     if (is->audio_st && is->show_audio)
1009         video_audio_display(is);
1010     else if (is->video_st)
1011         video_image_display(is);
1012 }
1013
1014 static int refresh_thread(void *opaque)
1015 {
1016     VideoState *is= opaque;
1017     while(!is->abort_request){
1018         SDL_Event event;
1019         event.type = FF_REFRESH_EVENT;
1020         event.user.data1 = opaque;
1021         if(!is->refresh){
1022             is->refresh=1;
1023             SDL_PushEvent(&event);
1024         }
1025         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1026     }
1027     return 0;
1028 }
1029
1030 /* get the current audio clock value */
1031 static double get_audio_clock(VideoState *is)
1032 {
1033     double pts;
1034     int hw_buf_size, bytes_per_sec;
1035     pts = is->audio_clock;
1036     hw_buf_size = audio_write_get_buf_size(is);
1037     bytes_per_sec = 0;
1038     if (is->audio_st) {
1039         bytes_per_sec = is->audio_st->codec->sample_rate *
1040             2 * is->audio_st->codec->channels;
1041     }
1042     if (bytes_per_sec)
1043         pts -= (double)hw_buf_size / bytes_per_sec;
1044     return pts;
1045 }
1046
1047 /* get the current video clock value */
1048 static double get_video_clock(VideoState *is)
1049 {
1050     if (is->paused) {
1051         return is->video_current_pts;
1052     } else {
1053         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1054     }
1055 }
1056
1057 /* get the current external clock value */
1058 static double get_external_clock(VideoState *is)
1059 {
1060     int64_t ti;
1061     ti = av_gettime();
1062     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1063 }
1064
1065 /* get the current master clock value */
1066 static double get_master_clock(VideoState *is)
1067 {
1068     double val;
1069
1070     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1071         if (is->video_st)
1072             val = get_video_clock(is);
1073         else
1074             val = get_audio_clock(is);
1075     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1076         if (is->audio_st)
1077             val = get_audio_clock(is);
1078         else
1079             val = get_video_clock(is);
1080     } else {
1081         val = get_external_clock(is);
1082     }
1083     return val;
1084 }
1085
1086 /* seek in the stream */
1087 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1088 {
1089     if (!is->seek_req) {
1090         is->seek_pos = pos;
1091         is->seek_rel = rel;
1092         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1093         if (seek_by_bytes)
1094             is->seek_flags |= AVSEEK_FLAG_BYTE;
1095         is->seek_req = 1;
1096     }
1097 }
1098
1099 /* pause or resume the video */
1100 static void stream_pause(VideoState *is)
1101 {
1102     if (is->paused) {
1103         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1104         if(is->read_pause_return != AVERROR(ENOSYS)){
1105             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1106         }
1107         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1108     }
1109     is->paused = !is->paused;
1110 }
1111
1112 static double compute_target_time(double frame_current_pts, VideoState *is)
1113 {
1114     double delay, sync_threshold, diff;
1115
1116     /* compute nominal delay */
1117     delay = frame_current_pts - is->frame_last_pts;
1118     if (delay <= 0 || delay >= 10.0) {
1119         /* if incorrect delay, use previous one */
1120         delay = is->frame_last_delay;
1121     } else {
1122         is->frame_last_delay = delay;
1123     }
1124     is->frame_last_pts = frame_current_pts;
1125
1126     /* update delay to follow master synchronisation source */
1127     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1128          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1129         /* if video is slave, we try to correct big delays by
1130            duplicating or deleting a frame */
1131         diff = get_video_clock(is) - get_master_clock(is);
1132
1133         /* skip or repeat frame. We take into account the
1134            delay to compute the threshold. I still don't know
1135            if it is the best guess */
1136         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1137         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1138             if (diff <= -sync_threshold)
1139                 delay = 0;
1140             else if (diff >= sync_threshold)
1141                 delay = 2 * delay;
1142         }
1143     }
1144     is->frame_timer += delay;
1145 #if defined(DEBUG_SYNC)
1146     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1147             delay, actual_delay, frame_current_pts, -diff);
1148 #endif
1149
1150     return is->frame_timer;
1151 }
1152
1153 /* called to display each frame */
1154 static void video_refresh_timer(void *opaque)
1155 {
1156     VideoState *is = opaque;
1157     VideoPicture *vp;
1158
1159     SubPicture *sp, *sp2;
1160
1161     if (is->video_st) {
1162 retry:
1163         if (is->pictq_size == 0) {
1164             //nothing to do, no picture to display in the que
1165         } else {
1166             double time= av_gettime()/1000000.0;
1167             double next_target;
1168             /* dequeue the picture */
1169             vp = &is->pictq[is->pictq_rindex];
1170
1171             if(time < vp->target_clock)
1172                 return;
1173             /* update current video pts */
1174             is->video_current_pts = vp->pts;
1175             is->video_current_pts_drift = is->video_current_pts - time;
1176             is->video_current_pos = vp->pos;
1177             if(is->pictq_size > 1){
1178                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1179                 assert(nextvp->target_clock >= vp->target_clock);
1180                 next_target= nextvp->target_clock;
1181             }else{
1182                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1183             }
1184             if(framedrop && time > next_target){
1185                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1186                 if(is->pictq_size > 1 || time > next_target + 0.5){
1187                     /* update queue size and signal for next picture */
1188                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1189                         is->pictq_rindex = 0;
1190
1191                     SDL_LockMutex(is->pictq_mutex);
1192                     is->pictq_size--;
1193                     SDL_CondSignal(is->pictq_cond);
1194                     SDL_UnlockMutex(is->pictq_mutex);
1195                     goto retry;
1196                 }
1197             }
1198
1199             if(is->subtitle_st) {
1200                 if (is->subtitle_stream_changed) {
1201                     SDL_LockMutex(is->subpq_mutex);
1202
1203                     while (is->subpq_size) {
1204                         free_subpicture(&is->subpq[is->subpq_rindex]);
1205
1206                         /* update queue size and signal for next picture */
1207                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1208                             is->subpq_rindex = 0;
1209
1210                         is->subpq_size--;
1211                     }
1212                     is->subtitle_stream_changed = 0;
1213
1214                     SDL_CondSignal(is->subpq_cond);
1215                     SDL_UnlockMutex(is->subpq_mutex);
1216                 } else {
1217                     if (is->subpq_size > 0) {
1218                         sp = &is->subpq[is->subpq_rindex];
1219
1220                         if (is->subpq_size > 1)
1221                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1222                         else
1223                             sp2 = NULL;
1224
1225                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1226                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1227                         {
1228                             free_subpicture(sp);
1229
1230                             /* update queue size and signal for next picture */
1231                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1232                                 is->subpq_rindex = 0;
1233
1234                             SDL_LockMutex(is->subpq_mutex);
1235                             is->subpq_size--;
1236                             SDL_CondSignal(is->subpq_cond);
1237                             SDL_UnlockMutex(is->subpq_mutex);
1238                         }
1239                     }
1240                 }
1241             }
1242
1243             /* display picture */
1244             if (!display_disable)
1245                 video_display(is);
1246
1247             /* update queue size and signal for next picture */
1248             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249                 is->pictq_rindex = 0;
1250
1251             SDL_LockMutex(is->pictq_mutex);
1252             is->pictq_size--;
1253             SDL_CondSignal(is->pictq_cond);
1254             SDL_UnlockMutex(is->pictq_mutex);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         if (!display_disable)
1264             video_display(is);
1265     }
1266     if (show_status) {
1267         static int64_t last_time;
1268         int64_t cur_time;
1269         int aqsize, vqsize, sqsize;
1270         double av_diff;
1271
1272         cur_time = av_gettime();
1273         if (!last_time || (cur_time - last_time) >= 30000) {
1274             aqsize = 0;
1275             vqsize = 0;
1276             sqsize = 0;
1277             if (is->audio_st)
1278                 aqsize = is->audioq.size;
1279             if (is->video_st)
1280                 vqsize = is->videoq.size;
1281             if (is->subtitle_st)
1282                 sqsize = is->subtitleq.size;
1283             av_diff = 0;
1284             if (is->audio_st && is->video_st)
1285                 av_diff = get_audio_clock(is) - get_video_clock(is);
1286             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1287                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->video_st->codec->pts_correction_num_faulty_dts, is->video_st->codec->pts_correction_num_faulty_pts);
1288             fflush(stdout);
1289             last_time = cur_time;
1290         }
1291     }
1292 }
1293
1294 static void stream_close(VideoState *is)
1295 {
1296     VideoPicture *vp;
1297     int i;
1298     /* XXX: use a special url_shutdown call to abort parse cleanly */
1299     is->abort_request = 1;
1300     SDL_WaitThread(is->parse_tid, NULL);
1301     SDL_WaitThread(is->refresh_tid, NULL);
1302
1303     /* free all pictures */
1304     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1305         vp = &is->pictq[i];
1306 #if CONFIG_AVFILTER
1307         if (vp->picref) {
1308             avfilter_unref_buffer(vp->picref);
1309             vp->picref = NULL;
1310         }
1311 #endif
1312         if (vp->bmp) {
1313             SDL_FreeYUVOverlay(vp->bmp);
1314             vp->bmp = NULL;
1315         }
1316     }
1317     SDL_DestroyMutex(is->pictq_mutex);
1318     SDL_DestroyCond(is->pictq_cond);
1319     SDL_DestroyMutex(is->subpq_mutex);
1320     SDL_DestroyCond(is->subpq_cond);
1321 #if !CONFIG_AVFILTER
1322     if (is->img_convert_ctx)
1323         sws_freeContext(is->img_convert_ctx);
1324 #endif
1325     av_free(is);
1326 }
1327
1328 static void do_exit(void)
1329 {
1330     if (cur_stream) {
1331         stream_close(cur_stream);
1332         cur_stream = NULL;
1333     }
1334     uninit_opts();
1335 #if CONFIG_AVFILTER
1336     avfilter_uninit();
1337 #endif
1338     if (show_status)
1339         printf("\n");
1340     SDL_Quit();
1341     av_log(NULL, AV_LOG_QUIET, "");
1342     exit(0);
1343 }
1344
1345 /* allocate a picture (needs to do that in main thread to avoid
1346    potential locking problems */
1347 static void alloc_picture(void *opaque)
1348 {
1349     VideoState *is = opaque;
1350     VideoPicture *vp;
1351
1352     vp = &is->pictq[is->pictq_windex];
1353
1354     if (vp->bmp)
1355         SDL_FreeYUVOverlay(vp->bmp);
1356
1357 #if CONFIG_AVFILTER
1358     if (vp->picref)
1359         avfilter_unref_buffer(vp->picref);
1360     vp->picref = NULL;
1361
1362     vp->width   = is->out_video_filter->inputs[0]->w;
1363     vp->height  = is->out_video_filter->inputs[0]->h;
1364     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1365 #else
1366     vp->width   = is->video_st->codec->width;
1367     vp->height  = is->video_st->codec->height;
1368     vp->pix_fmt = is->video_st->codec->pix_fmt;
1369 #endif
1370
1371     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1372                                    SDL_YV12_OVERLAY,
1373                                    screen);
1374     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1375         /* SDL allocates a buffer smaller than requested if the video
1376          * overlay hardware is unable to support the requested size. */
1377         fprintf(stderr, "Error: the video system does not support an image\n"
1378                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1379                         "to reduce the image size.\n", vp->width, vp->height );
1380         do_exit();
1381     }
1382
1383     SDL_LockMutex(is->pictq_mutex);
1384     vp->allocated = 1;
1385     SDL_CondSignal(is->pictq_cond);
1386     SDL_UnlockMutex(is->pictq_mutex);
1387 }
1388
1389 /**
1390  *
1391  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1392  */
1393 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1394 {
1395     VideoPicture *vp;
1396     int dst_pix_fmt;
1397 #if CONFIG_AVFILTER
1398     AVPicture pict_src;
1399 #endif
1400     /* wait until we have space to put a new picture */
1401     SDL_LockMutex(is->pictq_mutex);
1402
1403     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1404         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1405
1406     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1407            !is->videoq.abort_request) {
1408         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1409     }
1410     SDL_UnlockMutex(is->pictq_mutex);
1411
1412     if (is->videoq.abort_request)
1413         return -1;
1414
1415     vp = &is->pictq[is->pictq_windex];
1416
1417     /* alloc or resize hardware picture buffer */
1418     if (!vp->bmp ||
1419 #if CONFIG_AVFILTER
1420         vp->width  != is->out_video_filter->inputs[0]->w ||
1421         vp->height != is->out_video_filter->inputs[0]->h) {
1422 #else
1423         vp->width != is->video_st->codec->width ||
1424         vp->height != is->video_st->codec->height) {
1425 #endif
1426         SDL_Event event;
1427
1428         vp->allocated = 0;
1429
1430         /* the allocation must be done in the main thread to avoid
1431            locking problems */
1432         event.type = FF_ALLOC_EVENT;
1433         event.user.data1 = is;
1434         SDL_PushEvent(&event);
1435
1436         /* wait until the picture is allocated */
1437         SDL_LockMutex(is->pictq_mutex);
1438         while (!vp->allocated && !is->videoq.abort_request) {
1439             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1440         }
1441         SDL_UnlockMutex(is->pictq_mutex);
1442
1443         if (is->videoq.abort_request)
1444             return -1;
1445     }
1446
1447     /* if the frame is not skipped, then display it */
1448     if (vp->bmp) {
1449         AVPicture pict;
1450 #if CONFIG_AVFILTER
1451         if(vp->picref)
1452             avfilter_unref_buffer(vp->picref);
1453         vp->picref = src_frame->opaque;
1454 #endif
1455
1456         /* get a pointer on the bitmap */
1457         SDL_LockYUVOverlay (vp->bmp);
1458
1459         dst_pix_fmt = PIX_FMT_YUV420P;
1460         memset(&pict,0,sizeof(AVPicture));
1461         pict.data[0] = vp->bmp->pixels[0];
1462         pict.data[1] = vp->bmp->pixels[2];
1463         pict.data[2] = vp->bmp->pixels[1];
1464
1465         pict.linesize[0] = vp->bmp->pitches[0];
1466         pict.linesize[1] = vp->bmp->pitches[2];
1467         pict.linesize[2] = vp->bmp->pitches[1];
1468
1469 #if CONFIG_AVFILTER
1470         pict_src.data[0] = src_frame->data[0];
1471         pict_src.data[1] = src_frame->data[1];
1472         pict_src.data[2] = src_frame->data[2];
1473
1474         pict_src.linesize[0] = src_frame->linesize[0];
1475         pict_src.linesize[1] = src_frame->linesize[1];
1476         pict_src.linesize[2] = src_frame->linesize[2];
1477
1478         //FIXME use direct rendering
1479         av_picture_copy(&pict, &pict_src,
1480                         vp->pix_fmt, vp->width, vp->height);
1481 #else
1482         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1483         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1484             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1485             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1486         if (is->img_convert_ctx == NULL) {
1487             fprintf(stderr, "Cannot initialize the conversion context\n");
1488             exit(1);
1489         }
1490         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1491                   0, vp->height, pict.data, pict.linesize);
1492 #endif
1493         /* update the bitmap content */
1494         SDL_UnlockYUVOverlay(vp->bmp);
1495
1496         vp->pts = pts;
1497         vp->pos = pos;
1498
1499         /* now we can update the picture count */
1500         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1501             is->pictq_windex = 0;
1502         SDL_LockMutex(is->pictq_mutex);
1503         vp->target_clock= compute_target_time(vp->pts, is);
1504
1505         is->pictq_size++;
1506         SDL_UnlockMutex(is->pictq_mutex);
1507     }
1508     return 0;
1509 }
1510
1511 /**
1512  * compute the exact PTS for the picture if it is omitted in the stream
1513  * @param pts1 the dts of the pkt / pts of the frame
1514  */
1515 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1516 {
1517     double frame_delay, pts;
1518
1519     pts = pts1;
1520
1521     if (pts != 0) {
1522         /* update video clock with pts, if present */
1523         is->video_clock = pts;
1524     } else {
1525         pts = is->video_clock;
1526     }
1527     /* update video clock for next frame */
1528     frame_delay = av_q2d(is->video_st->codec->time_base);
1529     /* for MPEG2, the frame can be repeated, so we update the
1530        clock accordingly */
1531     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1532     is->video_clock += frame_delay;
1533
1534 #if defined(DEBUG_SYNC) && 0
1535     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1536            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1537 #endif
1538     return queue_picture(is, src_frame, pts, pos);
1539 }
1540
1541 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1542 {
1543     int len1, got_picture, i;
1544
1545     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1546         return -1;
1547
1548     if (pkt->data == flush_pkt.data) {
1549         avcodec_flush_buffers(is->video_st->codec);
1550
1551         SDL_LockMutex(is->pictq_mutex);
1552         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1553         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1554             is->pictq[i].target_clock= 0;
1555         }
1556         while (is->pictq_size && !is->videoq.abort_request) {
1557             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1558         }
1559         is->video_current_pos = -1;
1560         SDL_UnlockMutex(is->pictq_mutex);
1561
1562         is->frame_last_pts = AV_NOPTS_VALUE;
1563         is->frame_last_delay = 0;
1564         is->frame_timer = (double)av_gettime() / 1000000.0;
1565         is->skip_frames = 1;
1566         is->skip_frames_index = 0;
1567         return 0;
1568     }
1569
1570     len1 = avcodec_decode_video2(is->video_st->codec,
1571                                  frame, &got_picture,
1572                                  pkt);
1573
1574     if (got_picture) {
1575         if (decoder_reorder_pts == -1) {
1576             *pts = frame->best_effort_timestamp;
1577         } else if (decoder_reorder_pts) {
1578             *pts = frame->pkt_pts;
1579         } else {
1580             *pts = frame->pkt_dts;
1581         }
1582
1583         if (*pts == AV_NOPTS_VALUE) {
1584             *pts = 0;
1585         }
1586
1587         is->skip_frames_index += 1;
1588         if(is->skip_frames_index >= is->skip_frames){
1589             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1590             return 1;
1591         }
1592
1593     }
1594     return 0;
1595 }
1596
1597 #if CONFIG_AVFILTER
1598 typedef struct {
1599     VideoState *is;
1600     AVFrame *frame;
1601     int use_dr1;
1602 } FilterPriv;
1603
1604 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1605 {
1606     AVFilterContext *ctx = codec->opaque;
1607     AVFilterBufferRef  *ref;
1608     int perms = AV_PERM_WRITE;
1609     int i, w, h, stride[4];
1610     unsigned edge;
1611
1612     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1613         perms |= AV_PERM_NEG_LINESIZES;
1614
1615     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1616         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1617         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1618         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1619     }
1620     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1621
1622     w = codec->width;
1623     h = codec->height;
1624     avcodec_align_dimensions2(codec, &w, &h, stride);
1625     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1626     w += edge << 1;
1627     h += edge << 1;
1628
1629     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1630         return -1;
1631
1632     ref->video->w = codec->width;
1633     ref->video->h = codec->height;
1634     for(i = 0; i < 4; i ++) {
1635         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1636         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1637
1638         if (ref->data[i]) {
1639             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1640         }
1641         pic->data[i]     = ref->data[i];
1642         pic->linesize[i] = ref->linesize[i];
1643     }
1644     pic->opaque = ref;
1645     pic->age    = INT_MAX;
1646     pic->type   = FF_BUFFER_TYPE_USER;
1647     pic->reordered_opaque = codec->reordered_opaque;
1648     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1649     else           pic->pkt_pts = AV_NOPTS_VALUE;
1650     return 0;
1651 }
1652
1653 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1654 {
1655     memset(pic->data, 0, sizeof(pic->data));
1656     avfilter_unref_buffer(pic->opaque);
1657 }
1658
1659 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1660 {
1661     AVFilterBufferRef *ref = pic->opaque;
1662
1663     if (pic->data[0] == NULL) {
1664         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1665         return codec->get_buffer(codec, pic);
1666     }
1667
1668     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1669         (codec->pix_fmt != ref->format)) {
1670         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1671         return -1;
1672     }
1673
1674     pic->reordered_opaque = codec->reordered_opaque;
1675     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1676     else           pic->pkt_pts = AV_NOPTS_VALUE;
1677     return 0;
1678 }
1679
1680 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1681 {
1682     FilterPriv *priv = ctx->priv;
1683     AVCodecContext *codec;
1684     if(!opaque) return -1;
1685
1686     priv->is = opaque;
1687     codec    = priv->is->video_st->codec;
1688     codec->opaque = ctx;
1689     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1690         priv->use_dr1 = 1;
1691         codec->get_buffer     = input_get_buffer;
1692         codec->release_buffer = input_release_buffer;
1693         codec->reget_buffer   = input_reget_buffer;
1694     }
1695
1696     priv->frame = avcodec_alloc_frame();
1697
1698     return 0;
1699 }
1700
1701 static void input_uninit(AVFilterContext *ctx)
1702 {
1703     FilterPriv *priv = ctx->priv;
1704     av_free(priv->frame);
1705 }
1706
1707 static int input_request_frame(AVFilterLink *link)
1708 {
1709     FilterPriv *priv = link->src->priv;
1710     AVFilterBufferRef *picref;
1711     int64_t pts = 0;
1712     AVPacket pkt;
1713     int ret;
1714
1715     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1716         av_free_packet(&pkt);
1717     if (ret < 0)
1718         return -1;
1719
1720     if(priv->use_dr1) {
1721         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1722     } else {
1723         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1724         av_image_copy(picref->data, picref->linesize,
1725                       priv->frame->data, priv->frame->linesize,
1726                       picref->format, link->w, link->h);
1727     }
1728     av_free_packet(&pkt);
1729
1730     picref->pts = pts;
1731     picref->pos = pkt.pos;
1732     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1733     avfilter_start_frame(link, picref);
1734     avfilter_draw_slice(link, 0, link->h, 1);
1735     avfilter_end_frame(link);
1736
1737     return 0;
1738 }
1739
1740 static int input_query_formats(AVFilterContext *ctx)
1741 {
1742     FilterPriv *priv = ctx->priv;
1743     enum PixelFormat pix_fmts[] = {
1744         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1745     };
1746
1747     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1748     return 0;
1749 }
1750
1751 static int input_config_props(AVFilterLink *link)
1752 {
1753     FilterPriv *priv  = link->src->priv;
1754     AVCodecContext *c = priv->is->video_st->codec;
1755
1756     link->w = c->width;
1757     link->h = c->height;
1758     link->time_base = priv->is->video_st->time_base;
1759
1760     return 0;
1761 }
1762
1763 static AVFilter input_filter =
1764 {
1765     .name      = "ffplay_input",
1766
1767     .priv_size = sizeof(FilterPriv),
1768
1769     .init      = input_init,
1770     .uninit    = input_uninit,
1771
1772     .query_formats = input_query_formats,
1773
1774     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1775     .outputs   = (AVFilterPad[]) {{ .name = "default",
1776                                     .type = AVMEDIA_TYPE_VIDEO,
1777                                     .request_frame = input_request_frame,
1778                                     .config_props  = input_config_props, },
1779                                   { .name = NULL }},
1780 };
1781
1782 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1783 {
1784     char sws_flags_str[128];
1785     int ret;
1786     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1787     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1788     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1789     graph->scale_sws_opts = av_strdup(sws_flags_str);
1790
1791     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1792                                             NULL, is, graph)) < 0)
1793         goto the_end;
1794     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1795                                             NULL, &ffsink_ctx, graph)) < 0)
1796         goto the_end;
1797
1798     if(vfilters) {
1799         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1800         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1801
1802         outputs->name    = av_strdup("in");
1803         outputs->filter_ctx = filt_src;
1804         outputs->pad_idx = 0;
1805         outputs->next    = NULL;
1806
1807         inputs->name    = av_strdup("out");
1808         inputs->filter_ctx = filt_out;
1809         inputs->pad_idx = 0;
1810         inputs->next    = NULL;
1811
1812         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1813             goto the_end;
1814         av_freep(&vfilters);
1815     } else {
1816         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1817             goto the_end;
1818     }
1819
1820     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1821         goto the_end;
1822
1823     is->out_video_filter = filt_out;
1824 the_end:
1825     return ret;
1826 }
1827
1828 #endif  /* CONFIG_AVFILTER */
1829
1830 static int video_thread(void *arg)
1831 {
1832     VideoState *is = arg;
1833     AVFrame *frame= avcodec_alloc_frame();
1834     int64_t pts_int;
1835     double pts;
1836     int ret;
1837
1838 #if CONFIG_AVFILTER
1839     AVFilterGraph *graph = avfilter_graph_alloc();
1840     AVFilterContext *filt_out = NULL;
1841     int64_t pos;
1842
1843     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1844         goto the_end;
1845     filt_out = is->out_video_filter;
1846 #endif
1847
1848     for(;;) {
1849 #if !CONFIG_AVFILTER
1850         AVPacket pkt;
1851 #else
1852         AVFilterBufferRef *picref;
1853         AVRational tb;
1854 #endif
1855         while (is->paused && !is->videoq.abort_request)
1856             SDL_Delay(10);
1857 #if CONFIG_AVFILTER
1858         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1859         if (picref) {
1860             pts_int = picref->pts;
1861             pos     = picref->pos;
1862             frame->opaque = picref;
1863         }
1864
1865         if (av_cmp_q(tb, is->video_st->time_base)) {
1866             av_unused int64_t pts1 = pts_int;
1867             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1868             av_dlog(NULL, "video_thread(): "
1869                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1870                     tb.num, tb.den, pts1,
1871                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1872         }
1873 #else
1874         ret = get_video_frame(is, frame, &pts_int, &pkt);
1875 #endif
1876
1877         if (ret < 0) goto the_end;
1878
1879         if (!ret)
1880             continue;
1881
1882         pts = pts_int*av_q2d(is->video_st->time_base);
1883
1884 #if CONFIG_AVFILTER
1885         ret = output_picture2(is, frame, pts, pos);
1886 #else
1887         ret = output_picture2(is, frame, pts,  pkt.pos);
1888         av_free_packet(&pkt);
1889 #endif
1890         if (ret < 0)
1891             goto the_end;
1892
1893         if (step)
1894             if (cur_stream)
1895                 stream_pause(cur_stream);
1896     }
1897  the_end:
1898 #if CONFIG_AVFILTER
1899     avfilter_graph_free(&graph);
1900 #endif
1901     av_free(frame);
1902     return 0;
1903 }
1904
1905 static int subtitle_thread(void *arg)
1906 {
1907     VideoState *is = arg;
1908     SubPicture *sp;
1909     AVPacket pkt1, *pkt = &pkt1;
1910     int len1, got_subtitle;
1911     double pts;
1912     int i, j;
1913     int r, g, b, y, u, v, a;
1914
1915     for(;;) {
1916         while (is->paused && !is->subtitleq.abort_request) {
1917             SDL_Delay(10);
1918         }
1919         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1920             break;
1921
1922         if(pkt->data == flush_pkt.data){
1923             avcodec_flush_buffers(is->subtitle_st->codec);
1924             continue;
1925         }
1926         SDL_LockMutex(is->subpq_mutex);
1927         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1928                !is->subtitleq.abort_request) {
1929             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1930         }
1931         SDL_UnlockMutex(is->subpq_mutex);
1932
1933         if (is->subtitleq.abort_request)
1934             goto the_end;
1935
1936         sp = &is->subpq[is->subpq_windex];
1937
1938        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1939            this packet, if any */
1940         pts = 0;
1941         if (pkt->pts != AV_NOPTS_VALUE)
1942             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1943
1944         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1945                                     &sp->sub, &got_subtitle,
1946                                     pkt);
1947 //            if (len1 < 0)
1948 //                break;
1949         if (got_subtitle && sp->sub.format == 0) {
1950             sp->pts = pts;
1951
1952             for (i = 0; i < sp->sub.num_rects; i++)
1953             {
1954                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1955                 {
1956                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1957                     y = RGB_TO_Y_CCIR(r, g, b);
1958                     u = RGB_TO_U_CCIR(r, g, b, 0);
1959                     v = RGB_TO_V_CCIR(r, g, b, 0);
1960                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1961                 }
1962             }
1963
1964             /* now we can update the picture count */
1965             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1966                 is->subpq_windex = 0;
1967             SDL_LockMutex(is->subpq_mutex);
1968             is->subpq_size++;
1969             SDL_UnlockMutex(is->subpq_mutex);
1970         }
1971         av_free_packet(pkt);
1972 //        if (step)
1973 //            if (cur_stream)
1974 //                stream_pause(cur_stream);
1975     }
1976  the_end:
1977     return 0;
1978 }
1979
1980 /* copy samples for viewing in editor window */
1981 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1982 {
1983     int size, len, channels;
1984
1985     channels = is->audio_st->codec->channels;
1986
1987     size = samples_size / sizeof(short);
1988     while (size > 0) {
1989         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1990         if (len > size)
1991             len = size;
1992         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1993         samples += len;
1994         is->sample_array_index += len;
1995         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1996             is->sample_array_index = 0;
1997         size -= len;
1998     }
1999 }
2000
2001 /* return the new audio buffer size (samples can be added or deleted
2002    to get better sync if video or external master clock) */
2003 static int synchronize_audio(VideoState *is, short *samples,
2004                              int samples_size1, double pts)
2005 {
2006     int n, samples_size;
2007     double ref_clock;
2008
2009     n = 2 * is->audio_st->codec->channels;
2010     samples_size = samples_size1;
2011
2012     /* if not master, then we try to remove or add samples to correct the clock */
2013     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2014          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2015         double diff, avg_diff;
2016         int wanted_size, min_size, max_size, nb_samples;
2017
2018         ref_clock = get_master_clock(is);
2019         diff = get_audio_clock(is) - ref_clock;
2020
2021         if (diff < AV_NOSYNC_THRESHOLD) {
2022             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2023             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2024                 /* not enough measures to have a correct estimate */
2025                 is->audio_diff_avg_count++;
2026             } else {
2027                 /* estimate the A-V difference */
2028                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2029
2030                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2031                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2032                     nb_samples = samples_size / n;
2033
2034                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2035                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2036                     if (wanted_size < min_size)
2037                         wanted_size = min_size;
2038                     else if (wanted_size > max_size)
2039                         wanted_size = max_size;
2040
2041                     /* add or remove samples to correction the synchro */
2042                     if (wanted_size < samples_size) {
2043                         /* remove samples */
2044                         samples_size = wanted_size;
2045                     } else if (wanted_size > samples_size) {
2046                         uint8_t *samples_end, *q;
2047                         int nb;
2048
2049                         /* add samples */
2050                         nb = (samples_size - wanted_size);
2051                         samples_end = (uint8_t *)samples + samples_size - n;
2052                         q = samples_end + n;
2053                         while (nb > 0) {
2054                             memcpy(q, samples_end, n);
2055                             q += n;
2056                             nb -= n;
2057                         }
2058                         samples_size = wanted_size;
2059                     }
2060                 }
2061 #if 0
2062                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2063                        diff, avg_diff, samples_size - samples_size1,
2064                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2065 #endif
2066             }
2067         } else {
2068             /* too big difference : may be initial PTS errors, so
2069                reset A-V filter */
2070             is->audio_diff_avg_count = 0;
2071             is->audio_diff_cum = 0;
2072         }
2073     }
2074
2075     return samples_size;
2076 }
2077
2078 /* decode one audio frame and returns its uncompressed size */
2079 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2080 {
2081     AVPacket *pkt_temp = &is->audio_pkt_temp;
2082     AVPacket *pkt = &is->audio_pkt;
2083     AVCodecContext *dec= is->audio_st->codec;
2084     int n, len1, data_size;
2085     double pts;
2086
2087     for(;;) {
2088         /* NOTE: the audio packet can contain several frames */
2089         while (pkt_temp->size > 0) {
2090             data_size = sizeof(is->audio_buf1);
2091             len1 = avcodec_decode_audio3(dec,
2092                                         (int16_t *)is->audio_buf1, &data_size,
2093                                         pkt_temp);
2094             if (len1 < 0) {
2095                 /* if error, we skip the frame */
2096                 pkt_temp->size = 0;
2097                 break;
2098             }
2099
2100             pkt_temp->data += len1;
2101             pkt_temp->size -= len1;
2102             if (data_size <= 0)
2103                 continue;
2104
2105             if (dec->sample_fmt != is->audio_src_fmt) {
2106                 if (is->reformat_ctx)
2107                     av_audio_convert_free(is->reformat_ctx);
2108                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2109                                                          dec->sample_fmt, 1, NULL, 0);
2110                 if (!is->reformat_ctx) {
2111                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2112                         av_get_sample_fmt_name(dec->sample_fmt),
2113                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2114                         break;
2115                 }
2116                 is->audio_src_fmt= dec->sample_fmt;
2117             }
2118
2119             if (is->reformat_ctx) {
2120                 const void *ibuf[6]= {is->audio_buf1};
2121                 void *obuf[6]= {is->audio_buf2};
2122                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2123                 int ostride[6]= {2};
2124                 int len= data_size/istride[0];
2125                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2126                     printf("av_audio_convert() failed\n");
2127                     break;
2128                 }
2129                 is->audio_buf= is->audio_buf2;
2130                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2131                           remove this legacy cruft */
2132                 data_size= len*2;
2133             }else{
2134                 is->audio_buf= is->audio_buf1;
2135             }
2136
2137             /* if no pts, then compute it */
2138             pts = is->audio_clock;
2139             *pts_ptr = pts;
2140             n = 2 * dec->channels;
2141             is->audio_clock += (double)data_size /
2142                 (double)(n * dec->sample_rate);
2143 #if defined(DEBUG_SYNC)
2144             {
2145                 static double last_clock;
2146                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2147                        is->audio_clock - last_clock,
2148                        is->audio_clock, pts);
2149                 last_clock = is->audio_clock;
2150             }
2151 #endif
2152             return data_size;
2153         }
2154
2155         /* free the current packet */
2156         if (pkt->data)
2157             av_free_packet(pkt);
2158
2159         if (is->paused || is->audioq.abort_request) {
2160             return -1;
2161         }
2162
2163         /* read next packet */
2164         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2165             return -1;
2166         if(pkt->data == flush_pkt.data){
2167             avcodec_flush_buffers(dec);
2168             continue;
2169         }
2170
2171         pkt_temp->data = pkt->data;
2172         pkt_temp->size = pkt->size;
2173
2174         /* if update the audio clock with the pts */
2175         if (pkt->pts != AV_NOPTS_VALUE) {
2176             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2177         }
2178     }
2179 }
2180
2181 /* get the current audio output buffer size, in samples. With SDL, we
2182    cannot have a precise information */
2183 static int audio_write_get_buf_size(VideoState *is)
2184 {
2185     return is->audio_buf_size - is->audio_buf_index;
2186 }
2187
2188
2189 /* prepare a new audio buffer */
2190 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2191 {
2192     VideoState *is = opaque;
2193     int audio_size, len1;
2194     double pts;
2195
2196     audio_callback_time = av_gettime();
2197
2198     while (len > 0) {
2199         if (is->audio_buf_index >= is->audio_buf_size) {
2200            audio_size = audio_decode_frame(is, &pts);
2201            if (audio_size < 0) {
2202                 /* if error, just output silence */
2203                is->audio_buf = is->audio_buf1;
2204                is->audio_buf_size = 1024;
2205                memset(is->audio_buf, 0, is->audio_buf_size);
2206            } else {
2207                if (is->show_audio)
2208                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2209                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2210                                               pts);
2211                is->audio_buf_size = audio_size;
2212            }
2213            is->audio_buf_index = 0;
2214         }
2215         len1 = is->audio_buf_size - is->audio_buf_index;
2216         if (len1 > len)
2217             len1 = len;
2218         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2219         len -= len1;
2220         stream += len1;
2221         is->audio_buf_index += len1;
2222     }
2223 }
2224
2225 /* open a given stream. Return 0 if OK */
2226 static int stream_component_open(VideoState *is, int stream_index)
2227 {
2228     AVFormatContext *ic = is->ic;
2229     AVCodecContext *avctx;
2230     AVCodec *codec;
2231     SDL_AudioSpec wanted_spec, spec;
2232
2233     if (stream_index < 0 || stream_index >= ic->nb_streams)
2234         return -1;
2235     avctx = ic->streams[stream_index]->codec;
2236
2237     /* prepare audio output */
2238     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2239         if (avctx->channels > 0) {
2240             avctx->request_channels = FFMIN(2, avctx->channels);
2241         } else {
2242             avctx->request_channels = 2;
2243         }
2244     }
2245
2246     codec = avcodec_find_decoder(avctx->codec_id);
2247     avctx->debug_mv = debug_mv;
2248     avctx->debug = debug;
2249     avctx->workaround_bugs = workaround_bugs;
2250     avctx->lowres = lowres;
2251     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2252     avctx->idct_algo= idct;
2253     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2254     avctx->skip_frame= skip_frame;
2255     avctx->skip_idct= skip_idct;
2256     avctx->skip_loop_filter= skip_loop_filter;
2257     avctx->error_recognition= error_recognition;
2258     avctx->error_concealment= error_concealment;
2259     avcodec_thread_init(avctx, thread_count);
2260
2261     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2262
2263     if (!codec ||
2264         avcodec_open(avctx, codec) < 0)
2265         return -1;
2266
2267     /* prepare audio output */
2268     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2269         wanted_spec.freq = avctx->sample_rate;
2270         wanted_spec.format = AUDIO_S16SYS;
2271         wanted_spec.channels = avctx->channels;
2272         wanted_spec.silence = 0;
2273         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2274         wanted_spec.callback = sdl_audio_callback;
2275         wanted_spec.userdata = is;
2276         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2277             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2278             return -1;
2279         }
2280         is->audio_hw_buf_size = spec.size;
2281         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2282     }
2283
2284     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2285     switch(avctx->codec_type) {
2286     case AVMEDIA_TYPE_AUDIO:
2287         is->audio_stream = stream_index;
2288         is->audio_st = ic->streams[stream_index];
2289         is->audio_buf_size = 0;
2290         is->audio_buf_index = 0;
2291
2292         /* init averaging filter */
2293         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2294         is->audio_diff_avg_count = 0;
2295         /* since we do not have a precise anough audio fifo fullness,
2296            we correct audio sync only if larger than this threshold */
2297         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2298
2299         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2300         packet_queue_init(&is->audioq);
2301         SDL_PauseAudio(0);
2302         break;
2303     case AVMEDIA_TYPE_VIDEO:
2304         is->video_stream = stream_index;
2305         is->video_st = ic->streams[stream_index];
2306
2307 //        is->video_current_pts_time = av_gettime();
2308
2309         packet_queue_init(&is->videoq);
2310         is->video_tid = SDL_CreateThread(video_thread, is);
2311         break;
2312     case AVMEDIA_TYPE_SUBTITLE:
2313         is->subtitle_stream = stream_index;
2314         is->subtitle_st = ic->streams[stream_index];
2315         packet_queue_init(&is->subtitleq);
2316
2317         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2318         break;
2319     default:
2320         break;
2321     }
2322     return 0;
2323 }
2324
2325 static void stream_component_close(VideoState *is, int stream_index)
2326 {
2327     AVFormatContext *ic = is->ic;
2328     AVCodecContext *avctx;
2329
2330     if (stream_index < 0 || stream_index >= ic->nb_streams)
2331         return;
2332     avctx = ic->streams[stream_index]->codec;
2333
2334     switch(avctx->codec_type) {
2335     case AVMEDIA_TYPE_AUDIO:
2336         packet_queue_abort(&is->audioq);
2337
2338         SDL_CloseAudio();
2339
2340         packet_queue_end(&is->audioq);
2341         if (is->reformat_ctx)
2342             av_audio_convert_free(is->reformat_ctx);
2343         is->reformat_ctx = NULL;
2344         break;
2345     case AVMEDIA_TYPE_VIDEO:
2346         packet_queue_abort(&is->videoq);
2347
2348         /* note: we also signal this mutex to make sure we deblock the
2349            video thread in all cases */
2350         SDL_LockMutex(is->pictq_mutex);
2351         SDL_CondSignal(is->pictq_cond);
2352         SDL_UnlockMutex(is->pictq_mutex);
2353
2354         SDL_WaitThread(is->video_tid, NULL);
2355
2356         packet_queue_end(&is->videoq);
2357         break;
2358     case AVMEDIA_TYPE_SUBTITLE:
2359         packet_queue_abort(&is->subtitleq);
2360
2361         /* note: we also signal this mutex to make sure we deblock the
2362            video thread in all cases */
2363         SDL_LockMutex(is->subpq_mutex);
2364         is->subtitle_stream_changed = 1;
2365
2366         SDL_CondSignal(is->subpq_cond);
2367         SDL_UnlockMutex(is->subpq_mutex);
2368
2369         SDL_WaitThread(is->subtitle_tid, NULL);
2370
2371         packet_queue_end(&is->subtitleq);
2372         break;
2373     default:
2374         break;
2375     }
2376
2377     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2378     avcodec_close(avctx);
2379     switch(avctx->codec_type) {
2380     case AVMEDIA_TYPE_AUDIO:
2381         is->audio_st = NULL;
2382         is->audio_stream = -1;
2383         break;
2384     case AVMEDIA_TYPE_VIDEO:
2385         is->video_st = NULL;
2386         is->video_stream = -1;
2387         break;
2388     case AVMEDIA_TYPE_SUBTITLE:
2389         is->subtitle_st = NULL;
2390         is->subtitle_stream = -1;
2391         break;
2392     default:
2393         break;
2394     }
2395 }
2396
2397 /* since we have only one decoding thread, we can use a global
2398    variable instead of a thread local variable */
2399 static VideoState *global_video_state;
2400
2401 static int decode_interrupt_cb(void)
2402 {
2403     return (global_video_state && global_video_state->abort_request);
2404 }
2405
2406 /* this thread gets the stream from the disk or the network */
2407 static int decode_thread(void *arg)
2408 {
2409     VideoState *is = arg;
2410     AVFormatContext *ic;
2411     int err, i, ret;
2412     int st_index[AVMEDIA_TYPE_NB];
2413     AVPacket pkt1, *pkt = &pkt1;
2414     AVFormatParameters params, *ap = &params;
2415     int eof=0;
2416     int pkt_in_play_range = 0;
2417
2418     ic = avformat_alloc_context();
2419
2420     memset(st_index, -1, sizeof(st_index));
2421     is->video_stream = -1;
2422     is->audio_stream = -1;
2423     is->subtitle_stream = -1;
2424
2425     global_video_state = is;
2426     url_set_interrupt_cb(decode_interrupt_cb);
2427
2428     memset(ap, 0, sizeof(*ap));
2429
2430     ap->prealloced_context = 1;
2431     ap->width = frame_width;
2432     ap->height= frame_height;
2433     ap->time_base= (AVRational){1, 25};
2434     ap->pix_fmt = frame_pix_fmt;
2435
2436     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2437
2438     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2439     if (err < 0) {
2440         print_error(is->filename, err);
2441         ret = -1;
2442         goto fail;
2443     }
2444     is->ic = ic;
2445
2446     if(genpts)
2447         ic->flags |= AVFMT_FLAG_GENPTS;
2448
2449     err = av_find_stream_info(ic);
2450     if (err < 0) {
2451         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2452         ret = -1;
2453         goto fail;
2454     }
2455     if(ic->pb)
2456         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2457
2458     if(seek_by_bytes<0)
2459         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2460
2461     /* if seeking requested, we execute it */
2462     if (start_time != AV_NOPTS_VALUE) {
2463         int64_t timestamp;
2464
2465         timestamp = start_time;
2466         /* add the stream start time */
2467         if (ic->start_time != AV_NOPTS_VALUE)
2468             timestamp += ic->start_time;
2469         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2470         if (ret < 0) {
2471             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2472                     is->filename, (double)timestamp / AV_TIME_BASE);
2473         }
2474     }
2475
2476     for (i = 0; i < ic->nb_streams; i++)
2477         ic->streams[i]->discard = AVDISCARD_ALL;
2478     if (!video_disable)
2479         st_index[AVMEDIA_TYPE_VIDEO] =
2480             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2481                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2482     if (!audio_disable)
2483         st_index[AVMEDIA_TYPE_AUDIO] =
2484             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2485                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2486                                 st_index[AVMEDIA_TYPE_VIDEO],
2487                                 NULL, 0);
2488     if (!video_disable)
2489         st_index[AVMEDIA_TYPE_SUBTITLE] =
2490             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2491                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2492                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2493                                  st_index[AVMEDIA_TYPE_AUDIO] :
2494                                  st_index[AVMEDIA_TYPE_VIDEO]),
2495                                 NULL, 0);
2496     if (show_status) {
2497         dump_format(ic, 0, is->filename, 0);
2498     }
2499
2500     /* open the streams */
2501     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2502         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2503     }
2504
2505     ret=-1;
2506     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2507         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2508     }
2509     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2510     if(ret<0) {
2511         if (!display_disable)
2512             is->show_audio = 2;
2513     }
2514
2515     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2516         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2517     }
2518
2519     if (is->video_stream < 0 && is->audio_stream < 0) {
2520         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2521         ret = -1;
2522         goto fail;
2523     }
2524
2525     for(;;) {
2526         if (is->abort_request)
2527             break;
2528         if (is->paused != is->last_paused) {
2529             is->last_paused = is->paused;
2530             if (is->paused)
2531                 is->read_pause_return= av_read_pause(ic);
2532             else
2533                 av_read_play(ic);
2534         }
2535 #if CONFIG_RTSP_DEMUXER
2536         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2537             /* wait 10 ms to avoid trying to get another packet */
2538             /* XXX: horrible */
2539             SDL_Delay(10);
2540             continue;
2541         }
2542 #endif
2543         if (is->seek_req) {
2544             int64_t seek_target= is->seek_pos;
2545             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2546             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2547 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2548 //      of the seek_pos/seek_rel variables
2549
2550             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2551             if (ret < 0) {
2552                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2553             }else{
2554                 if (is->audio_stream >= 0) {
2555                     packet_queue_flush(&is->audioq);
2556                     packet_queue_put(&is->audioq, &flush_pkt);
2557                 }
2558                 if (is->subtitle_stream >= 0) {
2559                     packet_queue_flush(&is->subtitleq);
2560                     packet_queue_put(&is->subtitleq, &flush_pkt);
2561                 }
2562                 if (is->video_stream >= 0) {
2563                     packet_queue_flush(&is->videoq);
2564                     packet_queue_put(&is->videoq, &flush_pkt);
2565                 }
2566             }
2567             is->seek_req = 0;
2568             eof= 0;
2569         }
2570
2571         /* if the queue are full, no need to read more */
2572         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2573             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2574                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2575                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2576             /* wait 10 ms */
2577             SDL_Delay(10);
2578             continue;
2579         }
2580         if(eof) {
2581             if(is->video_stream >= 0){
2582                 av_init_packet(pkt);
2583                 pkt->data=NULL;
2584                 pkt->size=0;
2585                 pkt->stream_index= is->video_stream;
2586                 packet_queue_put(&is->videoq, pkt);
2587             }
2588             SDL_Delay(10);
2589             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2590                 if(loop!=1 && (!loop || --loop)){
2591                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2592                 }else if(autoexit){
2593                     ret=AVERROR_EOF;
2594                     goto fail;
2595                 }
2596             }
2597             continue;
2598         }
2599         ret = av_read_frame(ic, pkt);
2600         if (ret < 0) {
2601             if (ret == AVERROR_EOF || url_feof(ic->pb))
2602                 eof=1;
2603             if (url_ferror(ic->pb))
2604                 break;
2605             SDL_Delay(100); /* wait for user event */
2606             continue;
2607         }
2608         /* check if packet is in play range specified by user, then queue, otherwise discard */
2609         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2610                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2611                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2612                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2613                 <= ((double)duration/1000000);
2614         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2615             packet_queue_put(&is->audioq, pkt);
2616         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2617             packet_queue_put(&is->videoq, pkt);
2618         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2619             packet_queue_put(&is->subtitleq, pkt);
2620         } else {
2621             av_free_packet(pkt);
2622         }
2623     }
2624     /* wait until the end */
2625     while (!is->abort_request) {
2626         SDL_Delay(100);
2627     }
2628
2629     ret = 0;
2630  fail:
2631     /* disable interrupting */
2632     global_video_state = NULL;
2633
2634     /* close each stream */
2635     if (is->audio_stream >= 0)
2636         stream_component_close(is, is->audio_stream);
2637     if (is->video_stream >= 0)
2638         stream_component_close(is, is->video_stream);
2639     if (is->subtitle_stream >= 0)
2640         stream_component_close(is, is->subtitle_stream);
2641     if (is->ic) {
2642         av_close_input_file(is->ic);
2643         is->ic = NULL; /* safety */
2644     }
2645     url_set_interrupt_cb(NULL);
2646
2647     if (ret != 0) {
2648         SDL_Event event;
2649
2650         event.type = FF_QUIT_EVENT;
2651         event.user.data1 = is;
2652         SDL_PushEvent(&event);
2653     }
2654     return 0;
2655 }
2656
2657 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2658 {
2659     VideoState *is;
2660
2661     is = av_mallocz(sizeof(VideoState));
2662     if (!is)
2663         return NULL;
2664     av_strlcpy(is->filename, filename, sizeof(is->filename));
2665     is->iformat = iformat;
2666     is->ytop = 0;
2667     is->xleft = 0;
2668
2669     /* start video display */
2670     is->pictq_mutex = SDL_CreateMutex();
2671     is->pictq_cond = SDL_CreateCond();
2672
2673     is->subpq_mutex = SDL_CreateMutex();
2674     is->subpq_cond = SDL_CreateCond();
2675
2676     is->av_sync_type = av_sync_type;
2677     is->parse_tid = SDL_CreateThread(decode_thread, is);
2678     if (!is->parse_tid) {
2679         av_free(is);
2680         return NULL;
2681     }
2682     return is;
2683 }
2684
2685 static void stream_cycle_channel(VideoState *is, int codec_type)
2686 {
2687     AVFormatContext *ic = is->ic;
2688     int start_index, stream_index;
2689     AVStream *st;
2690
2691     if (codec_type == AVMEDIA_TYPE_VIDEO)
2692         start_index = is->video_stream;
2693     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2694         start_index = is->audio_stream;
2695     else
2696         start_index = is->subtitle_stream;
2697     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2698         return;
2699     stream_index = start_index;
2700     for(;;) {
2701         if (++stream_index >= is->ic->nb_streams)
2702         {
2703             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2704             {
2705                 stream_index = -1;
2706                 goto the_end;
2707             } else
2708                 stream_index = 0;
2709         }
2710         if (stream_index == start_index)
2711             return;
2712         st = ic->streams[stream_index];
2713         if (st->codec->codec_type == codec_type) {
2714             /* check that parameters are OK */
2715             switch(codec_type) {
2716             case AVMEDIA_TYPE_AUDIO:
2717                 if (st->codec->sample_rate != 0 &&
2718                     st->codec->channels != 0)
2719                     goto the_end;
2720                 break;
2721             case AVMEDIA_TYPE_VIDEO:
2722             case AVMEDIA_TYPE_SUBTITLE:
2723                 goto the_end;
2724             default:
2725                 break;
2726             }
2727         }
2728     }
2729  the_end:
2730     stream_component_close(is, start_index);
2731     stream_component_open(is, stream_index);
2732 }
2733
2734
2735 static void toggle_full_screen(void)
2736 {
2737     is_full_screen = !is_full_screen;
2738     if (!fs_screen_width) {
2739         /* use default SDL method */
2740 //        SDL_WM_ToggleFullScreen(screen);
2741     }
2742     video_open(cur_stream);
2743 }
2744
2745 static void toggle_pause(void)
2746 {
2747     if (cur_stream)
2748         stream_pause(cur_stream);
2749     step = 0;
2750 }
2751
2752 static void step_to_next_frame(void)
2753 {
2754     if (cur_stream) {
2755         /* if the stream is paused unpause it, then step */
2756         if (cur_stream->paused)
2757             stream_pause(cur_stream);
2758     }
2759     step = 1;
2760 }
2761
2762 static void toggle_audio_display(void)
2763 {
2764     if (cur_stream) {
2765         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2766         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2767         fill_rectangle(screen,
2768                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2769                     bgcolor);
2770         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2771     }
2772 }
2773
2774 /* handle an event sent by the GUI */
2775 static void event_loop(void)
2776 {
2777     SDL_Event event;
2778     double incr, pos, frac;
2779
2780     for(;;) {
2781         double x;
2782         SDL_WaitEvent(&event);
2783         switch(event.type) {
2784         case SDL_KEYDOWN:
2785             if (exit_on_keydown) {
2786                 do_exit();
2787                 break;
2788             }
2789             switch(event.key.keysym.sym) {
2790             case SDLK_ESCAPE:
2791             case SDLK_q:
2792                 do_exit();
2793                 break;
2794             case SDLK_f:
2795                 toggle_full_screen();
2796                 break;
2797             case SDLK_p:
2798             case SDLK_SPACE:
2799                 toggle_pause();
2800                 break;
2801             case SDLK_s: //S: Step to next frame
2802                 step_to_next_frame();
2803                 break;
2804             case SDLK_a:
2805                 if (cur_stream)
2806                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2807                 break;
2808             case SDLK_v:
2809                 if (cur_stream)
2810                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2811                 break;
2812             case SDLK_t:
2813                 if (cur_stream)
2814                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2815                 break;
2816             case SDLK_w:
2817                 toggle_audio_display();
2818                 break;
2819             case SDLK_LEFT:
2820                 incr = -10.0;
2821                 goto do_seek;
2822             case SDLK_RIGHT:
2823                 incr = 10.0;
2824                 goto do_seek;
2825             case SDLK_UP:
2826                 incr = 60.0;
2827                 goto do_seek;
2828             case SDLK_DOWN:
2829                 incr = -60.0;
2830             do_seek:
2831                 if (cur_stream) {
2832                     if (seek_by_bytes) {
2833                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2834                             pos= cur_stream->video_current_pos;
2835                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2836                             pos= cur_stream->audio_pkt.pos;
2837                         }else
2838                             pos = url_ftell(cur_stream->ic->pb);
2839                         if (cur_stream->ic->bit_rate)
2840                             incr *= cur_stream->ic->bit_rate / 8.0;
2841                         else
2842                             incr *= 180000.0;
2843                         pos += incr;
2844                         stream_seek(cur_stream, pos, incr, 1);
2845                     } else {
2846                         pos = get_master_clock(cur_stream);
2847                         pos += incr;
2848                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2849                     }
2850                 }
2851                 break;
2852             default:
2853                 break;
2854             }
2855             break;
2856         case SDL_MOUSEBUTTONDOWN:
2857             if (exit_on_mousedown) {
2858                 do_exit();
2859                 break;
2860             }
2861         case SDL_MOUSEMOTION:
2862             if(event.type ==SDL_MOUSEBUTTONDOWN){
2863                 x= event.button.x;
2864             }else{
2865                 if(event.motion.state != SDL_PRESSED)
2866                     break;
2867                 x= event.motion.x;
2868             }
2869             if (cur_stream) {
2870                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2871                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2872                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2873                 }else{
2874                     int64_t ts;
2875                     int ns, hh, mm, ss;
2876                     int tns, thh, tmm, tss;
2877                     tns = cur_stream->ic->duration/1000000LL;
2878                     thh = tns/3600;
2879                     tmm = (tns%3600)/60;
2880                     tss = (tns%60);
2881                     frac = x/cur_stream->width;
2882                     ns = frac*tns;
2883                     hh = ns/3600;
2884                     mm = (ns%3600)/60;
2885                     ss = (ns%60);
2886                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2887                             hh, mm, ss, thh, tmm, tss);
2888                     ts = frac*cur_stream->ic->duration;
2889                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2890                         ts += cur_stream->ic->start_time;
2891                     stream_seek(cur_stream, ts, 0, 0);
2892                 }
2893             }
2894             break;
2895         case SDL_VIDEORESIZE:
2896             if (cur_stream) {
2897                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2898                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2899                 screen_width = cur_stream->width = event.resize.w;
2900                 screen_height= cur_stream->height= event.resize.h;
2901             }
2902             break;
2903         case SDL_QUIT:
2904         case FF_QUIT_EVENT:
2905             do_exit();
2906             break;
2907         case FF_ALLOC_EVENT:
2908             video_open(event.user.data1);
2909             alloc_picture(event.user.data1);
2910             break;
2911         case FF_REFRESH_EVENT:
2912             video_refresh_timer(event.user.data1);
2913             cur_stream->refresh=0;
2914             break;
2915         default:
2916             break;
2917         }
2918     }
2919 }
2920
2921 static void opt_frame_size(const char *arg)
2922 {
2923     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2924         fprintf(stderr, "Incorrect frame size\n");
2925         exit(1);
2926     }
2927     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2928         fprintf(stderr, "Frame size must be a multiple of 2\n");
2929         exit(1);
2930     }
2931 }
2932
2933 static int opt_width(const char *opt, const char *arg)
2934 {
2935     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2936     return 0;
2937 }
2938
2939 static int opt_height(const char *opt, const char *arg)
2940 {
2941     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2942     return 0;
2943 }
2944
2945 static void opt_format(const char *arg)
2946 {
2947     file_iformat = av_find_input_format(arg);
2948     if (!file_iformat) {
2949         fprintf(stderr, "Unknown input format: %s\n", arg);
2950         exit(1);
2951     }
2952 }
2953
2954 static void opt_frame_pix_fmt(const char *arg)
2955 {
2956     frame_pix_fmt = av_get_pix_fmt(arg);
2957 }
2958
2959 static int opt_sync(const char *opt, const char *arg)
2960 {
2961     if (!strcmp(arg, "audio"))
2962         av_sync_type = AV_SYNC_AUDIO_MASTER;
2963     else if (!strcmp(arg, "video"))
2964         av_sync_type = AV_SYNC_VIDEO_MASTER;
2965     else if (!strcmp(arg, "ext"))
2966         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2967     else {
2968         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2969         exit(1);
2970     }
2971     return 0;
2972 }
2973
2974 static int opt_seek(const char *opt, const char *arg)
2975 {
2976     start_time = parse_time_or_die(opt, arg, 1);
2977     return 0;
2978 }
2979
2980 static int opt_duration(const char *opt, const char *arg)
2981 {
2982     duration = parse_time_or_die(opt, arg, 1);
2983     return 0;
2984 }
2985
2986 static int opt_debug(const char *opt, const char *arg)
2987 {
2988     av_log_set_level(99);
2989     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2990     return 0;
2991 }
2992
2993 static int opt_vismv(const char *opt, const char *arg)
2994 {
2995     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2996     return 0;
2997 }
2998
2999 static int opt_thread_count(const char *opt, const char *arg)
3000 {
3001     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3002 #if !HAVE_THREADS
3003     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3004 #endif
3005     return 0;
3006 }
3007
3008 static const OptionDef options[] = {
3009 #include "cmdutils_common_opts.h"
3010     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3011     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3012     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3013     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3014     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3015     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3016     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3017     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3018     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3019     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3020     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3021     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3022     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3023     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3024     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3025     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3026     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3027     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3028     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3029     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3030     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3031     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3032     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3033     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3034     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3035     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3036     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3037     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3038     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3039     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3040     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3041     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3042     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3043     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3044     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3045     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3046     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3047 #if CONFIG_AVFILTER
3048     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3049 #endif
3050     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3051     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3052     { NULL, },
3053 };
3054
3055 static void show_usage(void)
3056 {
3057     printf("Simple media player\n");
3058     printf("usage: ffplay [options] input_file\n");
3059     printf("\n");
3060 }
3061
3062 static void show_help(void)
3063 {
3064     av_log_set_callback(log_callback_help);
3065     show_usage();
3066     show_help_options(options, "Main options:\n",
3067                       OPT_EXPERT, 0);
3068     show_help_options(options, "\nAdvanced options:\n",
3069                       OPT_EXPERT, OPT_EXPERT);
3070     printf("\n");
3071     av_opt_show2(avcodec_opts[0], NULL,
3072                  AV_OPT_FLAG_DECODING_PARAM, 0);
3073     printf("\n");
3074     av_opt_show2(avformat_opts, NULL,
3075                  AV_OPT_FLAG_DECODING_PARAM, 0);
3076 #if !CONFIG_AVFILTER
3077     printf("\n");
3078     av_opt_show2(sws_opts, NULL,
3079                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3080 #endif
3081     printf("\nWhile playing:\n"
3082            "q, ESC              quit\n"
3083            "f                   toggle full screen\n"
3084            "p, SPC              pause\n"
3085            "a                   cycle audio channel\n"
3086            "v                   cycle video channel\n"
3087            "t                   cycle subtitle channel\n"
3088            "w                   show audio waves\n"
3089            "s                   activate frame-step mode\n"
3090            "left/right          seek backward/forward 10 seconds\n"
3091            "down/up             seek backward/forward 1 minute\n"
3092            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3093            );
3094 }
3095
3096 static void opt_input_file(const char *filename)
3097 {
3098     if (input_filename) {
3099         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3100                 filename, input_filename);
3101         exit(1);
3102     }
3103     if (!strcmp(filename, "-"))
3104         filename = "pipe:";
3105     input_filename = filename;
3106 }
3107
3108 /* Called from the main */
3109 int main(int argc, char **argv)
3110 {
3111     int flags;
3112
3113     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3114
3115     /* register all codecs, demux and protocols */
3116     avcodec_register_all();
3117 #if CONFIG_AVDEVICE
3118     avdevice_register_all();
3119 #endif
3120 #if CONFIG_AVFILTER
3121     avfilter_register_all();
3122 #endif
3123     av_register_all();
3124
3125     init_opts();
3126
3127     show_banner();
3128
3129     parse_options(argc, argv, options, opt_input_file);
3130
3131     if (!input_filename) {
3132         show_usage();
3133         fprintf(stderr, "An input file must be specified\n");
3134         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3135         exit(1);
3136     }
3137
3138     if (display_disable) {
3139         video_disable = 1;
3140     }
3141     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3142 #if !defined(__MINGW32__) && !defined(__APPLE__)
3143     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3144 #endif
3145     if (SDL_Init (flags)) {
3146         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3147         exit(1);
3148     }
3149
3150     if (!display_disable) {
3151 #if HAVE_SDL_VIDEO_SIZE
3152         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3153         fs_screen_width = vi->current_w;
3154         fs_screen_height = vi->current_h;
3155 #endif
3156     }
3157
3158     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3159     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3160     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3161
3162     av_init_packet(&flush_pkt);
3163     flush_pkt.data= "FLUSH";
3164
3165     cur_stream = stream_open(input_filename, file_iformat);
3166
3167     event_loop();
3168
3169     /* never returns */
3170
3171     return 0;
3172 }