]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Add my GPG fingerprint.
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavcore/parseutils.h"
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavcodec/opt.h"
37 #include "libavcodec/avfft.h"
38
39 #if CONFIG_AVFILTER
40 # include "libavfilter/avfilter.h"
41 # include "libavfilter/avfiltergraph.h"
42 # include "libavfilter/graphparser.h"
43 #endif
44
45 #include "cmdutils.h"
46
47 #include <SDL.h>
48 #include <SDL_thread.h>
49
50 #ifdef __MINGW32__
51 #undef main /* We don't want SDL to override our main() */
52 #endif
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "FFplay";
58 const int program_birth_year = 2003;
59
60 //#define DEBUG_SYNC
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2*65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///<presentation time stamp for this picture
102     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
103     int64_t pos;                                 ///<byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t *audio_buf;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum SampleFormat audio_src_fmt;
166     AVAudioConvert *reformat_ctx;
167
168     int show_audio; /* if true, display audio samples */
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     //    QETimer *video_timer;
206     char filename[1024];
207     int width, height, xleft, ytop;
208
209     int64_t faulty_pts;
210     int64_t faulty_dts;
211     int64_t last_dts_for_fault_detection;
212     int64_t last_pts_for_fault_detection;
213
214 #if CONFIG_AVFILTER
215     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
216 #endif
217
218     float skip_frames;
219     float skip_frames_index;
220     int refresh;
221 } VideoState;
222
223 static void show_help(void);
224 static int audio_write_get_buf_size(VideoState *is);
225
226 /* options specified by the user */
227 static AVInputFormat *file_iformat;
228 static const char *input_filename;
229 static const char *window_title;
230 static int fs_screen_width;
231 static int fs_screen_height;
232 static int screen_width = 0;
233 static int screen_height = 0;
234 static int frame_width = 0;
235 static int frame_height = 0;
236 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
237 static int audio_disable;
238 static int video_disable;
239 static int wanted_stream[AVMEDIA_TYPE_NB]={
240     [AVMEDIA_TYPE_AUDIO]=-1,
241     [AVMEDIA_TYPE_VIDEO]=-1,
242     [AVMEDIA_TYPE_SUBTITLE]=-1,
243 };
244 static int seek_by_bytes=-1;
245 static int display_disable;
246 static int show_status = 1;
247 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
248 static int64_t start_time = AV_NOPTS_VALUE;
249 static int64_t duration = AV_NOPTS_VALUE;
250 static int debug = 0;
251 static int debug_mv = 0;
252 static int step = 0;
253 static int thread_count = 1;
254 static int workaround_bugs = 1;
255 static int fast = 0;
256 static int genpts = 0;
257 static int lowres = 0;
258 static int idct = FF_IDCT_AUTO;
259 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
260 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
261 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
262 static int error_recognition = FF_ER_CAREFUL;
263 static int error_concealment = 3;
264 static int decoder_reorder_pts= -1;
265 static int autoexit;
266 static int exit_on_keydown;
267 static int exit_on_mousedown;
268 static int loop=1;
269 static int framedrop=1;
270
271 static int rdftspeed=20;
272 #if CONFIG_AVFILTER
273 static char *vfilters = NULL;
274 #endif
275
276 /* current context */
277 static int is_full_screen;
278 static VideoState *cur_stream;
279 static int64_t audio_callback_time;
280
281 static AVPacket flush_pkt;
282
283 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
284 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
285 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
286
287 static SDL_Surface *screen;
288
289 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
290
291 /* packet queue handling */
292 static void packet_queue_init(PacketQueue *q)
293 {
294     memset(q, 0, sizeof(PacketQueue));
295     q->mutex = SDL_CreateMutex();
296     q->cond = SDL_CreateCond();
297     packet_queue_put(q, &flush_pkt);
298 }
299
300 static void packet_queue_flush(PacketQueue *q)
301 {
302     AVPacketList *pkt, *pkt1;
303
304     SDL_LockMutex(q->mutex);
305     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
306         pkt1 = pkt->next;
307         av_free_packet(&pkt->pkt);
308         av_freep(&pkt);
309     }
310     q->last_pkt = NULL;
311     q->first_pkt = NULL;
312     q->nb_packets = 0;
313     q->size = 0;
314     SDL_UnlockMutex(q->mutex);
315 }
316
317 static void packet_queue_end(PacketQueue *q)
318 {
319     packet_queue_flush(q);
320     SDL_DestroyMutex(q->mutex);
321     SDL_DestroyCond(q->cond);
322 }
323
324 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
325 {
326     AVPacketList *pkt1;
327
328     /* duplicate the packet */
329     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
330         return -1;
331
332     pkt1 = av_malloc(sizeof(AVPacketList));
333     if (!pkt1)
334         return -1;
335     pkt1->pkt = *pkt;
336     pkt1->next = NULL;
337
338
339     SDL_LockMutex(q->mutex);
340
341     if (!q->last_pkt)
342
343         q->first_pkt = pkt1;
344     else
345         q->last_pkt->next = pkt1;
346     q->last_pkt = pkt1;
347     q->nb_packets++;
348     q->size += pkt1->pkt.size + sizeof(*pkt1);
349     /* XXX: should duplicate packet data in DV case */
350     SDL_CondSignal(q->cond);
351
352     SDL_UnlockMutex(q->mutex);
353     return 0;
354 }
355
356 static void packet_queue_abort(PacketQueue *q)
357 {
358     SDL_LockMutex(q->mutex);
359
360     q->abort_request = 1;
361
362     SDL_CondSignal(q->cond);
363
364     SDL_UnlockMutex(q->mutex);
365 }
366
367 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
368 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
369 {
370     AVPacketList *pkt1;
371     int ret;
372
373     SDL_LockMutex(q->mutex);
374
375     for(;;) {
376         if (q->abort_request) {
377             ret = -1;
378             break;
379         }
380
381         pkt1 = q->first_pkt;
382         if (pkt1) {
383             q->first_pkt = pkt1->next;
384             if (!q->first_pkt)
385                 q->last_pkt = NULL;
386             q->nb_packets--;
387             q->size -= pkt1->pkt.size + sizeof(*pkt1);
388             *pkt = pkt1->pkt;
389             av_free(pkt1);
390             ret = 1;
391             break;
392         } else if (!block) {
393             ret = 0;
394             break;
395         } else {
396             SDL_CondWait(q->cond, q->mutex);
397         }
398     }
399     SDL_UnlockMutex(q->mutex);
400     return ret;
401 }
402
403 static inline void fill_rectangle(SDL_Surface *screen,
404                                   int x, int y, int w, int h, int color)
405 {
406     SDL_Rect rect;
407     rect.x = x;
408     rect.y = y;
409     rect.w = w;
410     rect.h = h;
411     SDL_FillRect(screen, &rect, color);
412 }
413
414 #if 0
415 /* draw only the border of a rectangle */
416 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
417 {
418     int w1, w2, h1, h2;
419
420     /* fill the background */
421     w1 = x;
422     if (w1 < 0)
423         w1 = 0;
424     w2 = s->width - (x + w);
425     if (w2 < 0)
426         w2 = 0;
427     h1 = y;
428     if (h1 < 0)
429         h1 = 0;
430     h2 = s->height - (y + h);
431     if (h2 < 0)
432         h2 = 0;
433     fill_rectangle(screen,
434                    s->xleft, s->ytop,
435                    w1, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + s->width - w2, s->ytop,
439                    w2, s->height,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop,
443                    s->width - w1 - w2, h1,
444                    color);
445     fill_rectangle(screen,
446                    s->xleft + w1, s->ytop + s->height - h2,
447                    s->width - w1 - w2, h2,
448                    color);
449 }
450 #endif
451
452 #define ALPHA_BLEND(a, oldp, newp, s)\
453 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
454
455 #define RGBA_IN(r, g, b, a, s)\
456 {\
457     unsigned int v = ((const uint32_t *)(s))[0];\
458     a = (v >> 24) & 0xff;\
459     r = (v >> 16) & 0xff;\
460     g = (v >> 8) & 0xff;\
461     b = v & 0xff;\
462 }
463
464 #define YUVA_IN(y, u, v, a, s, pal)\
465 {\
466     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
467     a = (val >> 24) & 0xff;\
468     y = (val >> 16) & 0xff;\
469     u = (val >> 8) & 0xff;\
470     v = val & 0xff;\
471 }
472
473 #define YUVA_OUT(d, y, u, v, a)\
474 {\
475     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
476 }
477
478
479 #define BPP 1
480
481 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
482 {
483     int wrap, wrap3, width2, skip2;
484     int y, u, v, a, u1, v1, a1, w, h;
485     uint8_t *lum, *cb, *cr;
486     const uint8_t *p;
487     const uint32_t *pal;
488     int dstx, dsty, dstw, dsth;
489
490     dstw = av_clip(rect->w, 0, imgw);
491     dsth = av_clip(rect->h, 0, imgh);
492     dstx = av_clip(rect->x, 0, imgw - dstw);
493     dsty = av_clip(rect->y, 0, imgh - dsth);
494     lum = dst->data[0] + dsty * dst->linesize[0];
495     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
496     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
497
498     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
499     skip2 = dstx >> 1;
500     wrap = dst->linesize[0];
501     wrap3 = rect->pict.linesize[0];
502     p = rect->pict.data[0];
503     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
504
505     if (dsty & 1) {
506         lum += dstx;
507         cb += skip2;
508         cr += skip2;
509
510         if (dstx & 1) {
511             YUVA_IN(y, u, v, a, p, pal);
512             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
514             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
515             cb++;
516             cr++;
517             lum++;
518             p += BPP;
519         }
520         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 = u;
523             v1 = v;
524             a1 = a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526
527             YUVA_IN(y, u, v, a, p + BPP, pal);
528             u1 += u;
529             v1 += v;
530             a1 += a;
531             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
532             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
533             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
534             cb++;
535             cr++;
536             p += 2 * BPP;
537             lum += 2;
538         }
539         if (w) {
540             YUVA_IN(y, u, v, a, p, pal);
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
543             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
544             p++;
545             lum++;
546         }
547         p += wrap3 - dstw * BPP;
548         lum += wrap - dstw - dstx;
549         cb += dst->linesize[1] - width2 - skip2;
550         cr += dst->linesize[2] - width2 - skip2;
551     }
552     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
553         lum += dstx;
554         cb += skip2;
555         cr += skip2;
556
557         if (dstx & 1) {
558             YUVA_IN(y, u, v, a, p, pal);
559             u1 = u;
560             v1 = v;
561             a1 = a;
562             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563             p += wrap3;
564             lum += wrap;
565             YUVA_IN(y, u, v, a, p, pal);
566             u1 += u;
567             v1 += v;
568             a1 += a;
569             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
571             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
572             cb++;
573             cr++;
574             p += -wrap3 + BPP;
575             lum += -wrap + 1;
576         }
577         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 = u;
580             v1 = v;
581             a1 = a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583
584             YUVA_IN(y, u, v, a, p + BPP, pal);
585             u1 += u;
586             v1 += v;
587             a1 += a;
588             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
589             p += wrap3;
590             lum += wrap;
591
592             YUVA_IN(y, u, v, a, p, pal);
593             u1 += u;
594             v1 += v;
595             a1 += a;
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
598             YUVA_IN(y, u, v, a, p + BPP, pal);
599             u1 += u;
600             v1 += v;
601             a1 += a;
602             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
603
604             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
605             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
606
607             cb++;
608             cr++;
609             p += -wrap3 + 2 * BPP;
610             lum += -wrap + 2;
611         }
612         if (w) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618             p += wrap3;
619             lum += wrap;
620             YUVA_IN(y, u, v, a, p, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
627             cb++;
628             cr++;
629             p += -wrap3 + BPP;
630             lum += -wrap + 1;
631         }
632         p += wrap3 + (wrap3 - dstw * BPP);
633         lum += wrap + (wrap - dstw - dstx);
634         cb += dst->linesize[1] - width2 - skip2;
635         cr += dst->linesize[2] - width2 - skip2;
636     }
637     /* handle odd height */
638     if (h) {
639         lum += dstx;
640         cb += skip2;
641         cr += skip2;
642
643         if (dstx & 1) {
644             YUVA_IN(y, u, v, a, p, pal);
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
647             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
648             cb++;
649             cr++;
650             lum++;
651             p += BPP;
652         }
653         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
654             YUVA_IN(y, u, v, a, p, pal);
655             u1 = u;
656             v1 = v;
657             a1 = a;
658             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
659
660             YUVA_IN(y, u, v, a, p + BPP, pal);
661             u1 += u;
662             v1 += v;
663             a1 += a;
664             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
665             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
666             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
667             cb++;
668             cr++;
669             p += 2 * BPP;
670             lum += 2;
671         }
672         if (w) {
673             YUVA_IN(y, u, v, a, p, pal);
674             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
675             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
676             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
677         }
678     }
679 }
680
681 static void free_subpicture(SubPicture *sp)
682 {
683     avsubtitle_free(&sp->sub);
684 }
685
686 static void video_image_display(VideoState *is)
687 {
688     VideoPicture *vp;
689     SubPicture *sp;
690     AVPicture pict;
691     float aspect_ratio;
692     int width, height, x, y;
693     SDL_Rect rect;
694     int i;
695
696     vp = &is->pictq[is->pictq_rindex];
697     if (vp->bmp) {
698 #if CONFIG_AVFILTER
699          if (vp->picref->video->pixel_aspect.num == 0)
700              aspect_ratio = 0;
701          else
702              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
703 #else
704
705         /* XXX: use variable in the frame */
706         if (is->video_st->sample_aspect_ratio.num)
707             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
708         else if (is->video_st->codec->sample_aspect_ratio.num)
709             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
710         else
711             aspect_ratio = 0;
712 #endif
713         if (aspect_ratio <= 0.0)
714             aspect_ratio = 1.0;
715         aspect_ratio *= (float)vp->width / (float)vp->height;
716         /* if an active format is indicated, then it overrides the
717            mpeg format */
718 #if 0
719         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
720             is->dtg_active_format = is->video_st->codec->dtg_active_format;
721             printf("dtg_active_format=%d\n", is->dtg_active_format);
722         }
723 #endif
724 #if 0
725         switch(is->video_st->codec->dtg_active_format) {
726         case FF_DTG_AFD_SAME:
727         default:
728             /* nothing to do */
729             break;
730         case FF_DTG_AFD_4_3:
731             aspect_ratio = 4.0 / 3.0;
732             break;
733         case FF_DTG_AFD_16_9:
734             aspect_ratio = 16.0 / 9.0;
735             break;
736         case FF_DTG_AFD_14_9:
737             aspect_ratio = 14.0 / 9.0;
738             break;
739         case FF_DTG_AFD_4_3_SP_14_9:
740             aspect_ratio = 14.0 / 9.0;
741             break;
742         case FF_DTG_AFD_16_9_SP_14_9:
743             aspect_ratio = 14.0 / 9.0;
744             break;
745         case FF_DTG_AFD_SP_4_3:
746             aspect_ratio = 4.0 / 3.0;
747             break;
748         }
749 #endif
750
751         if (is->subtitle_st)
752         {
753             if (is->subpq_size > 0)
754             {
755                 sp = &is->subpq[is->subpq_rindex];
756
757                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
758                 {
759                     SDL_LockYUVOverlay (vp->bmp);
760
761                     pict.data[0] = vp->bmp->pixels[0];
762                     pict.data[1] = vp->bmp->pixels[2];
763                     pict.data[2] = vp->bmp->pixels[1];
764
765                     pict.linesize[0] = vp->bmp->pitches[0];
766                     pict.linesize[1] = vp->bmp->pitches[2];
767                     pict.linesize[2] = vp->bmp->pitches[1];
768
769                     for (i = 0; i < sp->sub.num_rects; i++)
770                         blend_subrect(&pict, sp->sub.rects[i],
771                                       vp->bmp->w, vp->bmp->h);
772
773                     SDL_UnlockYUVOverlay (vp->bmp);
774                 }
775             }
776         }
777
778
779         /* XXX: we suppose the screen has a 1.0 pixel ratio */
780         height = is->height;
781         width = ((int)rint(height * aspect_ratio)) & ~1;
782         if (width > is->width) {
783             width = is->width;
784             height = ((int)rint(width / aspect_ratio)) & ~1;
785         }
786         x = (is->width - width) / 2;
787         y = (is->height - height) / 2;
788         if (!is->no_background) {
789             /* fill the background */
790             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
791         } else {
792             is->no_background = 0;
793         }
794         rect.x = is->xleft + x;
795         rect.y = is->ytop  + y;
796         rect.w = width;
797         rect.h = height;
798         SDL_DisplayYUVOverlay(vp->bmp, &rect);
799     } else {
800 #if 0
801         fill_rectangle(screen,
802                        is->xleft, is->ytop, is->width, is->height,
803                        QERGB(0x00, 0x00, 0x00));
804 #endif
805     }
806 }
807
808 static inline int compute_mod(int a, int b)
809 {
810     a = a % b;
811     if (a >= 0)
812         return a;
813     else
814         return a + b;
815 }
816
817 static void video_audio_display(VideoState *s)
818 {
819     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
820     int ch, channels, h, h2, bgcolor, fgcolor;
821     int16_t time_diff;
822     int rdft_bits, nb_freq;
823
824     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
825         ;
826     nb_freq= 1<<(rdft_bits-1);
827
828     /* compute display index : center on currently output samples */
829     channels = s->audio_st->codec->channels;
830     nb_display_channels = channels;
831     if (!s->paused) {
832         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
833         n = 2 * channels;
834         delay = audio_write_get_buf_size(s);
835         delay /= n;
836
837         /* to be more precise, we take into account the time spent since
838            the last buffer computation */
839         if (audio_callback_time) {
840             time_diff = av_gettime() - audio_callback_time;
841             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
842         }
843
844         delay += 2*data_used;
845         if (delay < data_used)
846             delay = data_used;
847
848         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
849         if(s->show_audio==1){
850             h= INT_MIN;
851             for(i=0; i<1000; i+=channels){
852                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
853                 int a= s->sample_array[idx];
854                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
855                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
856                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
857                 int score= a-d;
858                 if(h<score && (b^c)<0){
859                     h= score;
860                     i_start= idx;
861                 }
862             }
863         }
864
865         s->last_i_start = i_start;
866     } else {
867         i_start = s->last_i_start;
868     }
869
870     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
871     if(s->show_audio==1){
872         fill_rectangle(screen,
873                        s->xleft, s->ytop, s->width, s->height,
874                        bgcolor);
875
876         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
877
878         /* total height for one channel */
879         h = s->height / nb_display_channels;
880         /* graph height / 2 */
881         h2 = (h * 9) / 20;
882         for(ch = 0;ch < nb_display_channels; ch++) {
883             i = i_start + ch;
884             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
885             for(x = 0; x < s->width; x++) {
886                 y = (s->sample_array[i] * h2) >> 15;
887                 if (y < 0) {
888                     y = -y;
889                     ys = y1 - y;
890                 } else {
891                     ys = y1;
892                 }
893                 fill_rectangle(screen,
894                                s->xleft + x, ys, 1, y,
895                                fgcolor);
896                 i += channels;
897                 if (i >= SAMPLE_ARRAY_SIZE)
898                     i -= SAMPLE_ARRAY_SIZE;
899             }
900         }
901
902         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
903
904         for(ch = 1;ch < nb_display_channels; ch++) {
905             y = s->ytop + ch * h;
906             fill_rectangle(screen,
907                            s->xleft, y, s->width, 1,
908                            fgcolor);
909         }
910         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
911     }else{
912         nb_display_channels= FFMIN(nb_display_channels, 2);
913         if(rdft_bits != s->rdft_bits){
914             av_rdft_end(s->rdft);
915             av_free(s->rdft_data);
916             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
917             s->rdft_bits= rdft_bits;
918             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
919         }
920         {
921             FFTSample *data[2];
922             for(ch = 0;ch < nb_display_channels; ch++) {
923                 data[ch] = s->rdft_data + 2*nb_freq*ch;
924                 i = i_start + ch;
925                 for(x = 0; x < 2*nb_freq; x++) {
926                     double w= (x-nb_freq)*(1.0/nb_freq);
927                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
928                     i += channels;
929                     if (i >= SAMPLE_ARRAY_SIZE)
930                         i -= SAMPLE_ARRAY_SIZE;
931                 }
932                 av_rdft_calc(s->rdft, data[ch]);
933             }
934             //least efficient way to do this, we should of course directly access it but its more than fast enough
935             for(y=0; y<s->height; y++){
936                 double w= 1/sqrt(nb_freq);
937                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
938                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
939                        + data[1][2*y+1]*data[1][2*y+1])) : a;
940                 a= FFMIN(a,255);
941                 b= FFMIN(b,255);
942                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
943
944                 fill_rectangle(screen,
945                             s->xpos, s->height-y, 1, 1,
946                             fgcolor);
947             }
948         }
949         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
950         s->xpos++;
951         if(s->xpos >= s->width)
952             s->xpos= s->xleft;
953     }
954 }
955
956 static int video_open(VideoState *is){
957     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
958     int w,h;
959
960     if(is_full_screen) flags |= SDL_FULLSCREEN;
961     else               flags |= SDL_RESIZABLE;
962
963     if (is_full_screen && fs_screen_width) {
964         w = fs_screen_width;
965         h = fs_screen_height;
966     } else if(!is_full_screen && screen_width){
967         w = screen_width;
968         h = screen_height;
969 #if CONFIG_AVFILTER
970     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
971         w = is->out_video_filter->inputs[0]->w;
972         h = is->out_video_filter->inputs[0]->h;
973 #else
974     }else if (is->video_st && is->video_st->codec->width){
975         w = is->video_st->codec->width;
976         h = is->video_st->codec->height;
977 #endif
978     } else {
979         w = 640;
980         h = 480;
981     }
982     if(screen && is->width == screen->w && screen->w == w
983        && is->height== screen->h && screen->h == h)
984         return 0;
985
986 #ifndef __APPLE__
987     screen = SDL_SetVideoMode(w, h, 0, flags);
988 #else
989     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
990     screen = SDL_SetVideoMode(w, h, 24, flags);
991 #endif
992     if (!screen) {
993         fprintf(stderr, "SDL: could not set video mode - exiting\n");
994         return -1;
995     }
996     if (!window_title)
997         window_title = input_filename;
998     SDL_WM_SetCaption(window_title, window_title);
999
1000     is->width = screen->w;
1001     is->height = screen->h;
1002
1003     return 0;
1004 }
1005
1006 /* display the current picture, if any */
1007 static void video_display(VideoState *is)
1008 {
1009     if(!screen)
1010         video_open(cur_stream);
1011     if (is->audio_st && is->show_audio)
1012         video_audio_display(is);
1013     else if (is->video_st)
1014         video_image_display(is);
1015 }
1016
1017 static int refresh_thread(void *opaque)
1018 {
1019     VideoState *is= opaque;
1020     while(!is->abort_request){
1021     SDL_Event event;
1022     event.type = FF_REFRESH_EVENT;
1023     event.user.data1 = opaque;
1024         if(!is->refresh){
1025             is->refresh=1;
1026     SDL_PushEvent(&event);
1027         }
1028         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1029     }
1030     return 0;
1031 }
1032
1033 /* get the current audio clock value */
1034 static double get_audio_clock(VideoState *is)
1035 {
1036     double pts;
1037     int hw_buf_size, bytes_per_sec;
1038     pts = is->audio_clock;
1039     hw_buf_size = audio_write_get_buf_size(is);
1040     bytes_per_sec = 0;
1041     if (is->audio_st) {
1042         bytes_per_sec = is->audio_st->codec->sample_rate *
1043             2 * is->audio_st->codec->channels;
1044     }
1045     if (bytes_per_sec)
1046         pts -= (double)hw_buf_size / bytes_per_sec;
1047     return pts;
1048 }
1049
1050 /* get the current video clock value */
1051 static double get_video_clock(VideoState *is)
1052 {
1053     if (is->paused) {
1054         return is->video_current_pts;
1055     } else {
1056         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1057     }
1058 }
1059
1060 /* get the current external clock value */
1061 static double get_external_clock(VideoState *is)
1062 {
1063     int64_t ti;
1064     ti = av_gettime();
1065     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1066 }
1067
1068 /* get the current master clock value */
1069 static double get_master_clock(VideoState *is)
1070 {
1071     double val;
1072
1073     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1074         if (is->video_st)
1075             val = get_video_clock(is);
1076         else
1077             val = get_audio_clock(is);
1078     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1079         if (is->audio_st)
1080             val = get_audio_clock(is);
1081         else
1082             val = get_video_clock(is);
1083     } else {
1084         val = get_external_clock(is);
1085     }
1086     return val;
1087 }
1088
1089 /* seek in the stream */
1090 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1091 {
1092     if (!is->seek_req) {
1093         is->seek_pos = pos;
1094         is->seek_rel = rel;
1095         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1096         if (seek_by_bytes)
1097             is->seek_flags |= AVSEEK_FLAG_BYTE;
1098         is->seek_req = 1;
1099     }
1100 }
1101
1102 /* pause or resume the video */
1103 static void stream_pause(VideoState *is)
1104 {
1105     if (is->paused) {
1106         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1107         if(is->read_pause_return != AVERROR(ENOSYS)){
1108             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1109         }
1110         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1111     }
1112     is->paused = !is->paused;
1113 }
1114
1115 static double compute_target_time(double frame_current_pts, VideoState *is)
1116 {
1117     double delay, sync_threshold, diff;
1118
1119     /* compute nominal delay */
1120     delay = frame_current_pts - is->frame_last_pts;
1121     if (delay <= 0 || delay >= 10.0) {
1122         /* if incorrect delay, use previous one */
1123         delay = is->frame_last_delay;
1124     } else {
1125         is->frame_last_delay = delay;
1126     }
1127     is->frame_last_pts = frame_current_pts;
1128
1129     /* update delay to follow master synchronisation source */
1130     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1131          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1132         /* if video is slave, we try to correct big delays by
1133            duplicating or deleting a frame */
1134         diff = get_video_clock(is) - get_master_clock(is);
1135
1136         /* skip or repeat frame. We take into account the
1137            delay to compute the threshold. I still don't know
1138            if it is the best guess */
1139         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1140         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1141             if (diff <= -sync_threshold)
1142                 delay = 0;
1143             else if (diff >= sync_threshold)
1144                 delay = 2 * delay;
1145         }
1146     }
1147     is->frame_timer += delay;
1148 #if defined(DEBUG_SYNC)
1149     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1150             delay, actual_delay, frame_current_pts, -diff);
1151 #endif
1152
1153     return is->frame_timer;
1154 }
1155
1156 /* called to display each frame */
1157 static void video_refresh_timer(void *opaque)
1158 {
1159     VideoState *is = opaque;
1160     VideoPicture *vp;
1161
1162     SubPicture *sp, *sp2;
1163
1164     if (is->video_st) {
1165 retry:
1166         if (is->pictq_size == 0) {
1167             //nothing to do, no picture to display in the que
1168         } else {
1169             double time= av_gettime()/1000000.0;
1170             double next_target;
1171             /* dequeue the picture */
1172             vp = &is->pictq[is->pictq_rindex];
1173
1174             if(time < vp->target_clock)
1175                 return;
1176             /* update current video pts */
1177             is->video_current_pts = vp->pts;
1178             is->video_current_pts_drift = is->video_current_pts - time;
1179             is->video_current_pos = vp->pos;
1180             if(is->pictq_size > 1){
1181                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1182                 assert(nextvp->target_clock >= vp->target_clock);
1183                 next_target= nextvp->target_clock;
1184             }else{
1185                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1186             }
1187             if(framedrop && time > next_target){
1188                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1189                 if(is->pictq_size > 1 || time > next_target + 0.5){
1190                     /* update queue size and signal for next picture */
1191                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1192                         is->pictq_rindex = 0;
1193
1194                     SDL_LockMutex(is->pictq_mutex);
1195                     is->pictq_size--;
1196                     SDL_CondSignal(is->pictq_cond);
1197                     SDL_UnlockMutex(is->pictq_mutex);
1198                     goto retry;
1199                 }
1200             }
1201
1202             if(is->subtitle_st) {
1203                 if (is->subtitle_stream_changed) {
1204                     SDL_LockMutex(is->subpq_mutex);
1205
1206                     while (is->subpq_size) {
1207                         free_subpicture(&is->subpq[is->subpq_rindex]);
1208
1209                         /* update queue size and signal for next picture */
1210                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1211                             is->subpq_rindex = 0;
1212
1213                         is->subpq_size--;
1214                     }
1215                     is->subtitle_stream_changed = 0;
1216
1217                     SDL_CondSignal(is->subpq_cond);
1218                     SDL_UnlockMutex(is->subpq_mutex);
1219                 } else {
1220                     if (is->subpq_size > 0) {
1221                         sp = &is->subpq[is->subpq_rindex];
1222
1223                         if (is->subpq_size > 1)
1224                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1225                         else
1226                             sp2 = NULL;
1227
1228                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1229                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1230                         {
1231                             free_subpicture(sp);
1232
1233                             /* update queue size and signal for next picture */
1234                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1235                                 is->subpq_rindex = 0;
1236
1237                             SDL_LockMutex(is->subpq_mutex);
1238                             is->subpq_size--;
1239                             SDL_CondSignal(is->subpq_cond);
1240                             SDL_UnlockMutex(is->subpq_mutex);
1241                         }
1242                     }
1243                 }
1244             }
1245
1246             /* display picture */
1247             video_display(is);
1248
1249             /* update queue size and signal for next picture */
1250             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1251                 is->pictq_rindex = 0;
1252
1253             SDL_LockMutex(is->pictq_mutex);
1254             is->pictq_size--;
1255             SDL_CondSignal(is->pictq_cond);
1256             SDL_UnlockMutex(is->pictq_mutex);
1257         }
1258     } else if (is->audio_st) {
1259         /* draw the next audio frame */
1260
1261         /* if only audio stream, then display the audio bars (better
1262            than nothing, just to test the implementation */
1263
1264         /* display picture */
1265         video_display(is);
1266     }
1267     if (show_status) {
1268         static int64_t last_time;
1269         int64_t cur_time;
1270         int aqsize, vqsize, sqsize;
1271         double av_diff;
1272
1273         cur_time = av_gettime();
1274         if (!last_time || (cur_time - last_time) >= 30000) {
1275             aqsize = 0;
1276             vqsize = 0;
1277             sqsize = 0;
1278             if (is->audio_st)
1279                 aqsize = is->audioq.size;
1280             if (is->video_st)
1281                 vqsize = is->videoq.size;
1282             if (is->subtitle_st)
1283                 sqsize = is->subtitleq.size;
1284             av_diff = 0;
1285             if (is->audio_st && is->video_st)
1286                 av_diff = get_audio_clock(is) - get_video_clock(is);
1287             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1288                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1289             fflush(stdout);
1290             last_time = cur_time;
1291         }
1292     }
1293 }
1294
1295 static void stream_close(VideoState *is)
1296 {
1297     VideoPicture *vp;
1298     int i;
1299     /* XXX: use a special url_shutdown call to abort parse cleanly */
1300     is->abort_request = 1;
1301     SDL_WaitThread(is->parse_tid, NULL);
1302     SDL_WaitThread(is->refresh_tid, NULL);
1303
1304     /* free all pictures */
1305     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1306         vp = &is->pictq[i];
1307 #if CONFIG_AVFILTER
1308         if (vp->picref) {
1309             avfilter_unref_buffer(vp->picref);
1310             vp->picref = NULL;
1311         }
1312 #endif
1313         if (vp->bmp) {
1314             SDL_FreeYUVOverlay(vp->bmp);
1315             vp->bmp = NULL;
1316         }
1317     }
1318     SDL_DestroyMutex(is->pictq_mutex);
1319     SDL_DestroyCond(is->pictq_cond);
1320     SDL_DestroyMutex(is->subpq_mutex);
1321     SDL_DestroyCond(is->subpq_cond);
1322 #if !CONFIG_AVFILTER
1323     if (is->img_convert_ctx)
1324         sws_freeContext(is->img_convert_ctx);
1325 #endif
1326     av_free(is);
1327 }
1328
1329 static void do_exit(void)
1330 {
1331     int i;
1332     if (cur_stream) {
1333         stream_close(cur_stream);
1334         cur_stream = NULL;
1335     }
1336     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
1337         av_free(avcodec_opts[i]);
1338     av_free(avformat_opts);
1339     av_free(sws_opts);
1340 #if CONFIG_AVFILTER
1341     avfilter_uninit();
1342 #endif
1343     if (show_status)
1344         printf("\n");
1345     SDL_Quit();
1346     exit(0);
1347 }
1348
1349 /* allocate a picture (needs to do that in main thread to avoid
1350    potential locking problems */
1351 static void alloc_picture(void *opaque)
1352 {
1353     VideoState *is = opaque;
1354     VideoPicture *vp;
1355
1356     vp = &is->pictq[is->pictq_windex];
1357
1358     if (vp->bmp)
1359         SDL_FreeYUVOverlay(vp->bmp);
1360
1361 #if CONFIG_AVFILTER
1362     if (vp->picref)
1363         avfilter_unref_buffer(vp->picref);
1364     vp->picref = NULL;
1365
1366     vp->width   = is->out_video_filter->inputs[0]->w;
1367     vp->height  = is->out_video_filter->inputs[0]->h;
1368     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1369 #else
1370     vp->width   = is->video_st->codec->width;
1371     vp->height  = is->video_st->codec->height;
1372     vp->pix_fmt = is->video_st->codec->pix_fmt;
1373 #endif
1374
1375     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1376                                    SDL_YV12_OVERLAY,
1377                                    screen);
1378     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1379         /* SDL allocates a buffer smaller than requested if the video
1380          * overlay hardware is unable to support the requested size. */
1381         fprintf(stderr, "Error: the video system does not support an image\n"
1382                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1383                         "to reduce the image size.\n", vp->width, vp->height );
1384         do_exit();
1385     }
1386
1387     SDL_LockMutex(is->pictq_mutex);
1388     vp->allocated = 1;
1389     SDL_CondSignal(is->pictq_cond);
1390     SDL_UnlockMutex(is->pictq_mutex);
1391 }
1392
1393 /**
1394  *
1395  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1396  */
1397 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1398 {
1399     VideoPicture *vp;
1400     int dst_pix_fmt;
1401 #if CONFIG_AVFILTER
1402     AVPicture pict_src;
1403 #endif
1404     /* wait until we have space to put a new picture */
1405     SDL_LockMutex(is->pictq_mutex);
1406
1407     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1408         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1409
1410     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1411            !is->videoq.abort_request) {
1412         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1413     }
1414     SDL_UnlockMutex(is->pictq_mutex);
1415
1416     if (is->videoq.abort_request)
1417         return -1;
1418
1419     vp = &is->pictq[is->pictq_windex];
1420
1421     /* alloc or resize hardware picture buffer */
1422     if (!vp->bmp ||
1423 #if CONFIG_AVFILTER
1424         vp->width  != is->out_video_filter->inputs[0]->w ||
1425         vp->height != is->out_video_filter->inputs[0]->h) {
1426 #else
1427         vp->width != is->video_st->codec->width ||
1428         vp->height != is->video_st->codec->height) {
1429 #endif
1430         SDL_Event event;
1431
1432         vp->allocated = 0;
1433
1434         /* the allocation must be done in the main thread to avoid
1435            locking problems */
1436         event.type = FF_ALLOC_EVENT;
1437         event.user.data1 = is;
1438         SDL_PushEvent(&event);
1439
1440         /* wait until the picture is allocated */
1441         SDL_LockMutex(is->pictq_mutex);
1442         while (!vp->allocated && !is->videoq.abort_request) {
1443             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1444         }
1445         SDL_UnlockMutex(is->pictq_mutex);
1446
1447         if (is->videoq.abort_request)
1448             return -1;
1449     }
1450
1451     /* if the frame is not skipped, then display it */
1452     if (vp->bmp) {
1453         AVPicture pict;
1454 #if CONFIG_AVFILTER
1455         if(vp->picref)
1456             avfilter_unref_buffer(vp->picref);
1457         vp->picref = src_frame->opaque;
1458 #endif
1459
1460         /* get a pointer on the bitmap */
1461         SDL_LockYUVOverlay (vp->bmp);
1462
1463         dst_pix_fmt = PIX_FMT_YUV420P;
1464         memset(&pict,0,sizeof(AVPicture));
1465         pict.data[0] = vp->bmp->pixels[0];
1466         pict.data[1] = vp->bmp->pixels[2];
1467         pict.data[2] = vp->bmp->pixels[1];
1468
1469         pict.linesize[0] = vp->bmp->pitches[0];
1470         pict.linesize[1] = vp->bmp->pitches[2];
1471         pict.linesize[2] = vp->bmp->pitches[1];
1472
1473 #if CONFIG_AVFILTER
1474         pict_src.data[0] = src_frame->data[0];
1475         pict_src.data[1] = src_frame->data[1];
1476         pict_src.data[2] = src_frame->data[2];
1477
1478         pict_src.linesize[0] = src_frame->linesize[0];
1479         pict_src.linesize[1] = src_frame->linesize[1];
1480         pict_src.linesize[2] = src_frame->linesize[2];
1481
1482         //FIXME use direct rendering
1483         av_picture_copy(&pict, &pict_src,
1484                         vp->pix_fmt, vp->width, vp->height);
1485 #else
1486         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1487         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1488             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1489             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1490         if (is->img_convert_ctx == NULL) {
1491             fprintf(stderr, "Cannot initialize the conversion context\n");
1492             exit(1);
1493         }
1494         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1495                   0, vp->height, pict.data, pict.linesize);
1496 #endif
1497         /* update the bitmap content */
1498         SDL_UnlockYUVOverlay(vp->bmp);
1499
1500         vp->pts = pts;
1501         vp->pos = pos;
1502
1503         /* now we can update the picture count */
1504         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1505             is->pictq_windex = 0;
1506         SDL_LockMutex(is->pictq_mutex);
1507         vp->target_clock= compute_target_time(vp->pts, is);
1508
1509         is->pictq_size++;
1510         SDL_UnlockMutex(is->pictq_mutex);
1511     }
1512     return 0;
1513 }
1514
1515 /**
1516  * compute the exact PTS for the picture if it is omitted in the stream
1517  * @param pts1 the dts of the pkt / pts of the frame
1518  */
1519 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1520 {
1521     double frame_delay, pts;
1522
1523     pts = pts1;
1524
1525     if (pts != 0) {
1526         /* update video clock with pts, if present */
1527         is->video_clock = pts;
1528     } else {
1529         pts = is->video_clock;
1530     }
1531     /* update video clock for next frame */
1532     frame_delay = av_q2d(is->video_st->codec->time_base);
1533     /* for MPEG2, the frame can be repeated, so we update the
1534        clock accordingly */
1535     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1536     is->video_clock += frame_delay;
1537
1538 #if defined(DEBUG_SYNC) && 0
1539     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1540            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1541 #endif
1542     return queue_picture(is, src_frame, pts, pos);
1543 }
1544
1545 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1546 {
1547     int len1, got_picture, i;
1548
1549         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1550             return -1;
1551
1552         if(pkt->data == flush_pkt.data){
1553             avcodec_flush_buffers(is->video_st->codec);
1554
1555             SDL_LockMutex(is->pictq_mutex);
1556             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1557             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1558                 is->pictq[i].target_clock= 0;
1559             }
1560             while (is->pictq_size && !is->videoq.abort_request) {
1561                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1562             }
1563             is->video_current_pos= -1;
1564             SDL_UnlockMutex(is->pictq_mutex);
1565
1566             is->last_dts_for_fault_detection=
1567             is->last_pts_for_fault_detection= INT64_MIN;
1568             is->frame_last_pts= AV_NOPTS_VALUE;
1569             is->frame_last_delay = 0;
1570             is->frame_timer = (double)av_gettime() / 1000000.0;
1571             is->skip_frames= 1;
1572             is->skip_frames_index= 0;
1573             return 0;
1574         }
1575
1576         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1577            this packet, if any */
1578         is->video_st->codec->reordered_opaque= pkt->pts;
1579         len1 = avcodec_decode_video2(is->video_st->codec,
1580                                     frame, &got_picture,
1581                                     pkt);
1582
1583         if (got_picture) {
1584             if(pkt->dts != AV_NOPTS_VALUE){
1585                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1586                 is->last_dts_for_fault_detection= pkt->dts;
1587             }
1588             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1589                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1590                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1591             }
1592         }
1593
1594         if(   (   decoder_reorder_pts==1
1595                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1596                || pkt->dts == AV_NOPTS_VALUE)
1597            && frame->reordered_opaque != AV_NOPTS_VALUE)
1598             *pts= frame->reordered_opaque;
1599         else if(pkt->dts != AV_NOPTS_VALUE)
1600             *pts= pkt->dts;
1601         else
1602             *pts= 0;
1603
1604 //            if (len1 < 0)
1605 //                break;
1606     if (got_picture){
1607         is->skip_frames_index += 1;
1608         if(is->skip_frames_index >= is->skip_frames){
1609             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1610             return 1;
1611         }
1612
1613     }
1614     return 0;
1615 }
1616
1617 #if CONFIG_AVFILTER
1618 typedef struct {
1619     VideoState *is;
1620     AVFrame *frame;
1621     int use_dr1;
1622 } FilterPriv;
1623
1624 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1625 {
1626     AVFilterContext *ctx = codec->opaque;
1627     AVFilterBufferRef  *ref;
1628     int perms = AV_PERM_WRITE;
1629     int i, w, h, stride[4];
1630     unsigned edge;
1631
1632     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1633         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1634         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1635         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1636     }
1637     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1638
1639     w = codec->width;
1640     h = codec->height;
1641     avcodec_align_dimensions2(codec, &w, &h, stride);
1642     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1643     w += edge << 1;
1644     h += edge << 1;
1645
1646     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1647         return -1;
1648
1649     ref->video->w = codec->width;
1650     ref->video->h = codec->height;
1651     for(i = 0; i < 4; i ++) {
1652         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1653         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1654
1655         if (ref->data[i]) {
1656             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1657         }
1658         pic->data[i]     = ref->data[i];
1659         pic->linesize[i] = ref->linesize[i];
1660     }
1661     pic->opaque = ref;
1662     pic->age    = INT_MAX;
1663     pic->type   = FF_BUFFER_TYPE_USER;
1664     pic->reordered_opaque = codec->reordered_opaque;
1665     return 0;
1666 }
1667
1668 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1669 {
1670     memset(pic->data, 0, sizeof(pic->data));
1671     avfilter_unref_buffer(pic->opaque);
1672 }
1673
1674 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1675 {
1676     AVFilterBufferRef *ref = pic->opaque;
1677
1678     if (pic->data[0] == NULL) {
1679         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1680         return codec->get_buffer(codec, pic);
1681     }
1682
1683     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1684         (codec->pix_fmt != ref->format)) {
1685         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1686         return -1;
1687     }
1688
1689     pic->reordered_opaque = codec->reordered_opaque;
1690     return 0;
1691 }
1692
1693 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1694 {
1695     FilterPriv *priv = ctx->priv;
1696     AVCodecContext *codec;
1697     if(!opaque) return -1;
1698
1699     priv->is = opaque;
1700     codec    = priv->is->video_st->codec;
1701     codec->opaque = ctx;
1702     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1703         priv->use_dr1 = 1;
1704         codec->get_buffer     = input_get_buffer;
1705         codec->release_buffer = input_release_buffer;
1706         codec->reget_buffer   = input_reget_buffer;
1707     }
1708
1709     priv->frame = avcodec_alloc_frame();
1710
1711     return 0;
1712 }
1713
1714 static void input_uninit(AVFilterContext *ctx)
1715 {
1716     FilterPriv *priv = ctx->priv;
1717     av_free(priv->frame);
1718 }
1719
1720 static int input_request_frame(AVFilterLink *link)
1721 {
1722     FilterPriv *priv = link->src->priv;
1723     AVFilterBufferRef *picref;
1724     int64_t pts = 0;
1725     AVPacket pkt;
1726     int ret;
1727
1728     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1729         av_free_packet(&pkt);
1730     if (ret < 0)
1731         return -1;
1732
1733     if(priv->use_dr1) {
1734         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1735     } else {
1736         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1737         av_picture_data_copy(picref->data, picref->linesize,
1738                              priv->frame->data, priv->frame->linesize,
1739                              picref->format, link->w, link->h);
1740     }
1741     av_free_packet(&pkt);
1742
1743     picref->pts = pts;
1744     picref->pos = pkt.pos;
1745     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1746     avfilter_start_frame(link, picref);
1747     avfilter_draw_slice(link, 0, link->h, 1);
1748     avfilter_end_frame(link);
1749
1750     return 0;
1751 }
1752
1753 static int input_query_formats(AVFilterContext *ctx)
1754 {
1755     FilterPriv *priv = ctx->priv;
1756     enum PixelFormat pix_fmts[] = {
1757         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1758     };
1759
1760     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1761     return 0;
1762 }
1763
1764 static int input_config_props(AVFilterLink *link)
1765 {
1766     FilterPriv *priv  = link->src->priv;
1767     AVCodecContext *c = priv->is->video_st->codec;
1768
1769     link->w = c->width;
1770     link->h = c->height;
1771
1772     return 0;
1773 }
1774
1775 static AVFilter input_filter =
1776 {
1777     .name      = "ffplay_input",
1778
1779     .priv_size = sizeof(FilterPriv),
1780
1781     .init      = input_init,
1782     .uninit    = input_uninit,
1783
1784     .query_formats = input_query_formats,
1785
1786     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1787     .outputs   = (AVFilterPad[]) {{ .name = "default",
1788                                     .type = AVMEDIA_TYPE_VIDEO,
1789                                     .request_frame = input_request_frame,
1790                                     .config_props  = input_config_props, },
1791                                   { .name = NULL }},
1792 };
1793
1794 static void output_end_frame(AVFilterLink *link)
1795 {
1796 }
1797
1798 static int output_query_formats(AVFilterContext *ctx)
1799 {
1800     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1801
1802     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1803     return 0;
1804 }
1805
1806 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1807                                     int64_t *pts, int64_t *pos)
1808 {
1809     AVFilterBufferRef *pic;
1810
1811     if(avfilter_request_frame(ctx->inputs[0]))
1812         return -1;
1813     if(!(pic = ctx->inputs[0]->cur_buf))
1814         return -1;
1815     ctx->inputs[0]->cur_buf = NULL;
1816
1817     frame->opaque = pic;
1818     *pts          = pic->pts;
1819     *pos          = pic->pos;
1820
1821     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1822     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1823
1824     return 1;
1825 }
1826
1827 static AVFilter output_filter =
1828 {
1829     .name      = "ffplay_output",
1830
1831     .query_formats = output_query_formats,
1832
1833     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1834                                     .type          = AVMEDIA_TYPE_VIDEO,
1835                                     .end_frame     = output_end_frame,
1836                                     .min_perms     = AV_PERM_READ, },
1837                                   { .name = NULL }},
1838     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1839 };
1840 #endif  /* CONFIG_AVFILTER */
1841
1842 static int video_thread(void *arg)
1843 {
1844     VideoState *is = arg;
1845     AVFrame *frame= avcodec_alloc_frame();
1846     int64_t pts_int;
1847     double pts;
1848     int ret;
1849
1850 #if CONFIG_AVFILTER
1851     int64_t pos;
1852     char sws_flags_str[128];
1853     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1854     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1855     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1856     graph->scale_sws_opts = av_strdup(sws_flags_str);
1857
1858     if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1859     if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1860
1861     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1862     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1863
1864
1865     if(vfilters) {
1866         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1867         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1868
1869         outputs->name    = av_strdup("in");
1870         outputs->filter  = filt_src;
1871         outputs->pad_idx = 0;
1872         outputs->next    = NULL;
1873
1874         inputs->name    = av_strdup("out");
1875         inputs->filter  = filt_out;
1876         inputs->pad_idx = 0;
1877         inputs->next    = NULL;
1878
1879         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1880             goto the_end;
1881         av_freep(&vfilters);
1882     } else {
1883         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1884     }
1885     avfilter_graph_add_filter(graph, filt_src);
1886     avfilter_graph_add_filter(graph, filt_out);
1887
1888     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1889     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1890     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1891
1892     is->out_video_filter = filt_out;
1893 #endif
1894
1895     for(;;) {
1896 #if !CONFIG_AVFILTER
1897         AVPacket pkt;
1898 #endif
1899         while (is->paused && !is->videoq.abort_request)
1900             SDL_Delay(10);
1901 #if CONFIG_AVFILTER
1902         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1903 #else
1904         ret = get_video_frame(is, frame, &pts_int, &pkt);
1905 #endif
1906
1907         if (ret < 0) goto the_end;
1908
1909         if (!ret)
1910             continue;
1911
1912         pts = pts_int*av_q2d(is->video_st->time_base);
1913
1914 #if CONFIG_AVFILTER
1915         ret = output_picture2(is, frame, pts, pos);
1916 #else
1917         ret = output_picture2(is, frame, pts,  pkt.pos);
1918         av_free_packet(&pkt);
1919 #endif
1920         if (ret < 0)
1921             goto the_end;
1922
1923         if (step)
1924             if (cur_stream)
1925                 stream_pause(cur_stream);
1926     }
1927  the_end:
1928 #if CONFIG_AVFILTER
1929     avfilter_graph_destroy(graph);
1930     av_freep(&graph);
1931 #endif
1932     av_free(frame);
1933     return 0;
1934 }
1935
1936 static int subtitle_thread(void *arg)
1937 {
1938     VideoState *is = arg;
1939     SubPicture *sp;
1940     AVPacket pkt1, *pkt = &pkt1;
1941     int len1, got_subtitle;
1942     double pts;
1943     int i, j;
1944     int r, g, b, y, u, v, a;
1945
1946     for(;;) {
1947         while (is->paused && !is->subtitleq.abort_request) {
1948             SDL_Delay(10);
1949         }
1950         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1951             break;
1952
1953         if(pkt->data == flush_pkt.data){
1954             avcodec_flush_buffers(is->subtitle_st->codec);
1955             continue;
1956         }
1957         SDL_LockMutex(is->subpq_mutex);
1958         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1959                !is->subtitleq.abort_request) {
1960             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1961         }
1962         SDL_UnlockMutex(is->subpq_mutex);
1963
1964         if (is->subtitleq.abort_request)
1965             goto the_end;
1966
1967         sp = &is->subpq[is->subpq_windex];
1968
1969        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1970            this packet, if any */
1971         pts = 0;
1972         if (pkt->pts != AV_NOPTS_VALUE)
1973             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1974
1975         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1976                                     &sp->sub, &got_subtitle,
1977                                     pkt);
1978 //            if (len1 < 0)
1979 //                break;
1980         if (got_subtitle && sp->sub.format == 0) {
1981             sp->pts = pts;
1982
1983             for (i = 0; i < sp->sub.num_rects; i++)
1984             {
1985                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1986                 {
1987                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1988                     y = RGB_TO_Y_CCIR(r, g, b);
1989                     u = RGB_TO_U_CCIR(r, g, b, 0);
1990                     v = RGB_TO_V_CCIR(r, g, b, 0);
1991                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1992                 }
1993             }
1994
1995             /* now we can update the picture count */
1996             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1997                 is->subpq_windex = 0;
1998             SDL_LockMutex(is->subpq_mutex);
1999             is->subpq_size++;
2000             SDL_UnlockMutex(is->subpq_mutex);
2001         }
2002         av_free_packet(pkt);
2003 //        if (step)
2004 //            if (cur_stream)
2005 //                stream_pause(cur_stream);
2006     }
2007  the_end:
2008     return 0;
2009 }
2010
2011 /* copy samples for viewing in editor window */
2012 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2013 {
2014     int size, len, channels;
2015
2016     channels = is->audio_st->codec->channels;
2017
2018     size = samples_size / sizeof(short);
2019     while (size > 0) {
2020         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2021         if (len > size)
2022             len = size;
2023         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2024         samples += len;
2025         is->sample_array_index += len;
2026         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2027             is->sample_array_index = 0;
2028         size -= len;
2029     }
2030 }
2031
2032 /* return the new audio buffer size (samples can be added or deleted
2033    to get better sync if video or external master clock) */
2034 static int synchronize_audio(VideoState *is, short *samples,
2035                              int samples_size1, double pts)
2036 {
2037     int n, samples_size;
2038     double ref_clock;
2039
2040     n = 2 * is->audio_st->codec->channels;
2041     samples_size = samples_size1;
2042
2043     /* if not master, then we try to remove or add samples to correct the clock */
2044     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2045          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2046         double diff, avg_diff;
2047         int wanted_size, min_size, max_size, nb_samples;
2048
2049         ref_clock = get_master_clock(is);
2050         diff = get_audio_clock(is) - ref_clock;
2051
2052         if (diff < AV_NOSYNC_THRESHOLD) {
2053             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2054             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2055                 /* not enough measures to have a correct estimate */
2056                 is->audio_diff_avg_count++;
2057             } else {
2058                 /* estimate the A-V difference */
2059                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2060
2061                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2062                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2063                     nb_samples = samples_size / n;
2064
2065                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2066                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2067                     if (wanted_size < min_size)
2068                         wanted_size = min_size;
2069                     else if (wanted_size > max_size)
2070                         wanted_size = max_size;
2071
2072                     /* add or remove samples to correction the synchro */
2073                     if (wanted_size < samples_size) {
2074                         /* remove samples */
2075                         samples_size = wanted_size;
2076                     } else if (wanted_size > samples_size) {
2077                         uint8_t *samples_end, *q;
2078                         int nb;
2079
2080                         /* add samples */
2081                         nb = (samples_size - wanted_size);
2082                         samples_end = (uint8_t *)samples + samples_size - n;
2083                         q = samples_end + n;
2084                         while (nb > 0) {
2085                             memcpy(q, samples_end, n);
2086                             q += n;
2087                             nb -= n;
2088                         }
2089                         samples_size = wanted_size;
2090                     }
2091                 }
2092 #if 0
2093                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2094                        diff, avg_diff, samples_size - samples_size1,
2095                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2096 #endif
2097             }
2098         } else {
2099             /* too big difference : may be initial PTS errors, so
2100                reset A-V filter */
2101             is->audio_diff_avg_count = 0;
2102             is->audio_diff_cum = 0;
2103         }
2104     }
2105
2106     return samples_size;
2107 }
2108
2109 /* decode one audio frame and returns its uncompressed size */
2110 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2111 {
2112     AVPacket *pkt_temp = &is->audio_pkt_temp;
2113     AVPacket *pkt = &is->audio_pkt;
2114     AVCodecContext *dec= is->audio_st->codec;
2115     int n, len1, data_size;
2116     double pts;
2117
2118     for(;;) {
2119         /* NOTE: the audio packet can contain several frames */
2120         while (pkt_temp->size > 0) {
2121             data_size = sizeof(is->audio_buf1);
2122             len1 = avcodec_decode_audio3(dec,
2123                                         (int16_t *)is->audio_buf1, &data_size,
2124                                         pkt_temp);
2125             if (len1 < 0) {
2126                 /* if error, we skip the frame */
2127                 pkt_temp->size = 0;
2128                 break;
2129             }
2130
2131             pkt_temp->data += len1;
2132             pkt_temp->size -= len1;
2133             if (data_size <= 0)
2134                 continue;
2135
2136             if (dec->sample_fmt != is->audio_src_fmt) {
2137                 if (is->reformat_ctx)
2138                     av_audio_convert_free(is->reformat_ctx);
2139                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2140                                                          dec->sample_fmt, 1, NULL, 0);
2141                 if (!is->reformat_ctx) {
2142                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2143                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2144                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2145                         break;
2146                 }
2147                 is->audio_src_fmt= dec->sample_fmt;
2148             }
2149
2150             if (is->reformat_ctx) {
2151                 const void *ibuf[6]= {is->audio_buf1};
2152                 void *obuf[6]= {is->audio_buf2};
2153                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2154                 int ostride[6]= {2};
2155                 int len= data_size/istride[0];
2156                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2157                     printf("av_audio_convert() failed\n");
2158                     break;
2159                 }
2160                 is->audio_buf= is->audio_buf2;
2161                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2162                           remove this legacy cruft */
2163                 data_size= len*2;
2164             }else{
2165                 is->audio_buf= is->audio_buf1;
2166             }
2167
2168             /* if no pts, then compute it */
2169             pts = is->audio_clock;
2170             *pts_ptr = pts;
2171             n = 2 * dec->channels;
2172             is->audio_clock += (double)data_size /
2173                 (double)(n * dec->sample_rate);
2174 #if defined(DEBUG_SYNC)
2175             {
2176                 static double last_clock;
2177                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2178                        is->audio_clock - last_clock,
2179                        is->audio_clock, pts);
2180                 last_clock = is->audio_clock;
2181             }
2182 #endif
2183             return data_size;
2184         }
2185
2186         /* free the current packet */
2187         if (pkt->data)
2188             av_free_packet(pkt);
2189
2190         if (is->paused || is->audioq.abort_request) {
2191             return -1;
2192         }
2193
2194         /* read next packet */
2195         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2196             return -1;
2197         if(pkt->data == flush_pkt.data){
2198             avcodec_flush_buffers(dec);
2199             continue;
2200         }
2201
2202         pkt_temp->data = pkt->data;
2203         pkt_temp->size = pkt->size;
2204
2205         /* if update the audio clock with the pts */
2206         if (pkt->pts != AV_NOPTS_VALUE) {
2207             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2208         }
2209     }
2210 }
2211
2212 /* get the current audio output buffer size, in samples. With SDL, we
2213    cannot have a precise information */
2214 static int audio_write_get_buf_size(VideoState *is)
2215 {
2216     return is->audio_buf_size - is->audio_buf_index;
2217 }
2218
2219
2220 /* prepare a new audio buffer */
2221 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2222 {
2223     VideoState *is = opaque;
2224     int audio_size, len1;
2225     double pts;
2226
2227     audio_callback_time = av_gettime();
2228
2229     while (len > 0) {
2230         if (is->audio_buf_index >= is->audio_buf_size) {
2231            audio_size = audio_decode_frame(is, &pts);
2232            if (audio_size < 0) {
2233                 /* if error, just output silence */
2234                is->audio_buf = is->audio_buf1;
2235                is->audio_buf_size = 1024;
2236                memset(is->audio_buf, 0, is->audio_buf_size);
2237            } else {
2238                if (is->show_audio)
2239                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2240                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2241                                               pts);
2242                is->audio_buf_size = audio_size;
2243            }
2244            is->audio_buf_index = 0;
2245         }
2246         len1 = is->audio_buf_size - is->audio_buf_index;
2247         if (len1 > len)
2248             len1 = len;
2249         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2250         len -= len1;
2251         stream += len1;
2252         is->audio_buf_index += len1;
2253     }
2254 }
2255
2256 /* open a given stream. Return 0 if OK */
2257 static int stream_component_open(VideoState *is, int stream_index)
2258 {
2259     AVFormatContext *ic = is->ic;
2260     AVCodecContext *avctx;
2261     AVCodec *codec;
2262     SDL_AudioSpec wanted_spec, spec;
2263
2264     if (stream_index < 0 || stream_index >= ic->nb_streams)
2265         return -1;
2266     avctx = ic->streams[stream_index]->codec;
2267
2268     /* prepare audio output */
2269     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2270         if (avctx->channels > 0) {
2271             avctx->request_channels = FFMIN(2, avctx->channels);
2272         } else {
2273             avctx->request_channels = 2;
2274         }
2275     }
2276
2277     codec = avcodec_find_decoder(avctx->codec_id);
2278     avctx->debug_mv = debug_mv;
2279     avctx->debug = debug;
2280     avctx->workaround_bugs = workaround_bugs;
2281     avctx->lowres = lowres;
2282     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2283     avctx->idct_algo= idct;
2284     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2285     avctx->skip_frame= skip_frame;
2286     avctx->skip_idct= skip_idct;
2287     avctx->skip_loop_filter= skip_loop_filter;
2288     avctx->error_recognition= error_recognition;
2289     avctx->error_concealment= error_concealment;
2290     avcodec_thread_init(avctx, thread_count);
2291
2292     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2293
2294     if (!codec ||
2295         avcodec_open(avctx, codec) < 0)
2296         return -1;
2297
2298     /* prepare audio output */
2299     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2300         wanted_spec.freq = avctx->sample_rate;
2301         wanted_spec.format = AUDIO_S16SYS;
2302         wanted_spec.channels = avctx->channels;
2303         wanted_spec.silence = 0;
2304         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2305         wanted_spec.callback = sdl_audio_callback;
2306         wanted_spec.userdata = is;
2307         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2308             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2309             return -1;
2310         }
2311         is->audio_hw_buf_size = spec.size;
2312         is->audio_src_fmt= SAMPLE_FMT_S16;
2313     }
2314
2315     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2316     switch(avctx->codec_type) {
2317     case AVMEDIA_TYPE_AUDIO:
2318         is->audio_stream = stream_index;
2319         is->audio_st = ic->streams[stream_index];
2320         is->audio_buf_size = 0;
2321         is->audio_buf_index = 0;
2322
2323         /* init averaging filter */
2324         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2325         is->audio_diff_avg_count = 0;
2326         /* since we do not have a precise anough audio fifo fullness,
2327            we correct audio sync only if larger than this threshold */
2328         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2329
2330         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2331         packet_queue_init(&is->audioq);
2332         SDL_PauseAudio(0);
2333         break;
2334     case AVMEDIA_TYPE_VIDEO:
2335         is->video_stream = stream_index;
2336         is->video_st = ic->streams[stream_index];
2337
2338 //        is->video_current_pts_time = av_gettime();
2339
2340         packet_queue_init(&is->videoq);
2341         is->video_tid = SDL_CreateThread(video_thread, is);
2342         break;
2343     case AVMEDIA_TYPE_SUBTITLE:
2344         is->subtitle_stream = stream_index;
2345         is->subtitle_st = ic->streams[stream_index];
2346         packet_queue_init(&is->subtitleq);
2347
2348         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2349         break;
2350     default:
2351         break;
2352     }
2353     return 0;
2354 }
2355
2356 static void stream_component_close(VideoState *is, int stream_index)
2357 {
2358     AVFormatContext *ic = is->ic;
2359     AVCodecContext *avctx;
2360
2361     if (stream_index < 0 || stream_index >= ic->nb_streams)
2362         return;
2363     avctx = ic->streams[stream_index]->codec;
2364
2365     switch(avctx->codec_type) {
2366     case AVMEDIA_TYPE_AUDIO:
2367         packet_queue_abort(&is->audioq);
2368
2369         SDL_CloseAudio();
2370
2371         packet_queue_end(&is->audioq);
2372         if (is->reformat_ctx)
2373             av_audio_convert_free(is->reformat_ctx);
2374         is->reformat_ctx = NULL;
2375         break;
2376     case AVMEDIA_TYPE_VIDEO:
2377         packet_queue_abort(&is->videoq);
2378
2379         /* note: we also signal this mutex to make sure we deblock the
2380            video thread in all cases */
2381         SDL_LockMutex(is->pictq_mutex);
2382         SDL_CondSignal(is->pictq_cond);
2383         SDL_UnlockMutex(is->pictq_mutex);
2384
2385         SDL_WaitThread(is->video_tid, NULL);
2386
2387         packet_queue_end(&is->videoq);
2388         break;
2389     case AVMEDIA_TYPE_SUBTITLE:
2390         packet_queue_abort(&is->subtitleq);
2391
2392         /* note: we also signal this mutex to make sure we deblock the
2393            video thread in all cases */
2394         SDL_LockMutex(is->subpq_mutex);
2395         is->subtitle_stream_changed = 1;
2396
2397         SDL_CondSignal(is->subpq_cond);
2398         SDL_UnlockMutex(is->subpq_mutex);
2399
2400         SDL_WaitThread(is->subtitle_tid, NULL);
2401
2402         packet_queue_end(&is->subtitleq);
2403         break;
2404     default:
2405         break;
2406     }
2407
2408     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2409     avcodec_close(avctx);
2410     switch(avctx->codec_type) {
2411     case AVMEDIA_TYPE_AUDIO:
2412         is->audio_st = NULL;
2413         is->audio_stream = -1;
2414         break;
2415     case AVMEDIA_TYPE_VIDEO:
2416         is->video_st = NULL;
2417         is->video_stream = -1;
2418         break;
2419     case AVMEDIA_TYPE_SUBTITLE:
2420         is->subtitle_st = NULL;
2421         is->subtitle_stream = -1;
2422         break;
2423     default:
2424         break;
2425     }
2426 }
2427
2428 /* since we have only one decoding thread, we can use a global
2429    variable instead of a thread local variable */
2430 static VideoState *global_video_state;
2431
2432 static int decode_interrupt_cb(void)
2433 {
2434     return (global_video_state && global_video_state->abort_request);
2435 }
2436
2437 /* this thread gets the stream from the disk or the network */
2438 static int decode_thread(void *arg)
2439 {
2440     VideoState *is = arg;
2441     AVFormatContext *ic;
2442     int err, i, ret;
2443     int st_index[AVMEDIA_TYPE_NB];
2444     int st_count[AVMEDIA_TYPE_NB]={0};
2445     int st_best_packet_count[AVMEDIA_TYPE_NB];
2446     AVPacket pkt1, *pkt = &pkt1;
2447     AVFormatParameters params, *ap = &params;
2448     int eof=0;
2449     int pkt_in_play_range = 0;
2450
2451     ic = avformat_alloc_context();
2452
2453     memset(st_index, -1, sizeof(st_index));
2454     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2455     is->video_stream = -1;
2456     is->audio_stream = -1;
2457     is->subtitle_stream = -1;
2458
2459     global_video_state = is;
2460     url_set_interrupt_cb(decode_interrupt_cb);
2461
2462     memset(ap, 0, sizeof(*ap));
2463
2464     ap->prealloced_context = 1;
2465     ap->width = frame_width;
2466     ap->height= frame_height;
2467     ap->time_base= (AVRational){1, 25};
2468     ap->pix_fmt = frame_pix_fmt;
2469
2470     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2471
2472     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2473     if (err < 0) {
2474         print_error(is->filename, err);
2475         ret = -1;
2476         goto fail;
2477     }
2478     is->ic = ic;
2479
2480     if(genpts)
2481         ic->flags |= AVFMT_FLAG_GENPTS;
2482
2483     err = av_find_stream_info(ic);
2484     if (err < 0) {
2485         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2486         ret = -1;
2487         goto fail;
2488     }
2489     if(ic->pb)
2490         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2491
2492     if(seek_by_bytes<0)
2493         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2494
2495     /* if seeking requested, we execute it */
2496     if (start_time != AV_NOPTS_VALUE) {
2497         int64_t timestamp;
2498
2499         timestamp = start_time;
2500         /* add the stream start time */
2501         if (ic->start_time != AV_NOPTS_VALUE)
2502             timestamp += ic->start_time;
2503         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2504         if (ret < 0) {
2505             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2506                     is->filename, (double)timestamp / AV_TIME_BASE);
2507         }
2508     }
2509
2510     for(i = 0; i < ic->nb_streams; i++) {
2511         AVStream *st= ic->streams[i];
2512         AVCodecContext *avctx = st->codec;
2513         ic->streams[i]->discard = AVDISCARD_ALL;
2514         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2515             continue;
2516         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2517             continue;
2518
2519         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2520             continue;
2521         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2522
2523         switch(avctx->codec_type) {
2524         case AVMEDIA_TYPE_AUDIO:
2525             if (!audio_disable)
2526                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2527             break;
2528         case AVMEDIA_TYPE_VIDEO:
2529         case AVMEDIA_TYPE_SUBTITLE:
2530             if (!video_disable)
2531                 st_index[avctx->codec_type] = i;
2532             break;
2533         default:
2534             break;
2535         }
2536     }
2537     if (show_status) {
2538         dump_format(ic, 0, is->filename, 0);
2539     }
2540
2541     /* open the streams */
2542     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2543         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2544     }
2545
2546     ret=-1;
2547     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2548         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2549     }
2550     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2551     if(ret<0) {
2552         if (!display_disable)
2553             is->show_audio = 2;
2554     }
2555
2556     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2557         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2558     }
2559
2560     if (is->video_stream < 0 && is->audio_stream < 0) {
2561         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2562         ret = -1;
2563         goto fail;
2564     }
2565
2566     for(;;) {
2567         if (is->abort_request)
2568             break;
2569         if (is->paused != is->last_paused) {
2570             is->last_paused = is->paused;
2571             if (is->paused)
2572                 is->read_pause_return= av_read_pause(ic);
2573             else
2574                 av_read_play(ic);
2575         }
2576 #if CONFIG_RTSP_DEMUXER
2577         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2578             /* wait 10 ms to avoid trying to get another packet */
2579             /* XXX: horrible */
2580             SDL_Delay(10);
2581             continue;
2582         }
2583 #endif
2584         if (is->seek_req) {
2585             int64_t seek_target= is->seek_pos;
2586             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2587             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2588 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2589 //      of the seek_pos/seek_rel variables
2590
2591             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2592             if (ret < 0) {
2593                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2594             }else{
2595                 if (is->audio_stream >= 0) {
2596                     packet_queue_flush(&is->audioq);
2597                     packet_queue_put(&is->audioq, &flush_pkt);
2598                 }
2599                 if (is->subtitle_stream >= 0) {
2600                     packet_queue_flush(&is->subtitleq);
2601                     packet_queue_put(&is->subtitleq, &flush_pkt);
2602                 }
2603                 if (is->video_stream >= 0) {
2604                     packet_queue_flush(&is->videoq);
2605                     packet_queue_put(&is->videoq, &flush_pkt);
2606                 }
2607             }
2608             is->seek_req = 0;
2609             eof= 0;
2610         }
2611
2612         /* if the queue are full, no need to read more */
2613         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2614             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2615                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2616                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2617             /* wait 10 ms */
2618             SDL_Delay(10);
2619             continue;
2620         }
2621         if(url_feof(ic->pb) || eof) {
2622             if(is->video_stream >= 0){
2623                 av_init_packet(pkt);
2624                 pkt->data=NULL;
2625                 pkt->size=0;
2626                 pkt->stream_index= is->video_stream;
2627                 packet_queue_put(&is->videoq, pkt);
2628             }
2629             SDL_Delay(10);
2630             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2631                 if(loop!=1 && (!loop || --loop)){
2632                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2633                 }else if(autoexit){
2634                     ret=AVERROR_EOF;
2635                     goto fail;
2636                 }
2637             }
2638             continue;
2639         }
2640         ret = av_read_frame(ic, pkt);
2641         if (ret < 0) {
2642             if (ret == AVERROR_EOF)
2643                 eof=1;
2644             if (url_ferror(ic->pb))
2645                 break;
2646             SDL_Delay(100); /* wait for user event */
2647             continue;
2648         }
2649         /* check if packet is in play range specified by user, then queue, otherwise discard */
2650         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2651                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2652                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2653                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2654                 <= ((double)duration/1000000);
2655         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2656             packet_queue_put(&is->audioq, pkt);
2657         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2658             packet_queue_put(&is->videoq, pkt);
2659         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2660             packet_queue_put(&is->subtitleq, pkt);
2661         } else {
2662             av_free_packet(pkt);
2663         }
2664     }
2665     /* wait until the end */
2666     while (!is->abort_request) {
2667         SDL_Delay(100);
2668     }
2669
2670     ret = 0;
2671  fail:
2672     /* disable interrupting */
2673     global_video_state = NULL;
2674
2675     /* close each stream */
2676     if (is->audio_stream >= 0)
2677         stream_component_close(is, is->audio_stream);
2678     if (is->video_stream >= 0)
2679         stream_component_close(is, is->video_stream);
2680     if (is->subtitle_stream >= 0)
2681         stream_component_close(is, is->subtitle_stream);
2682     if (is->ic) {
2683         av_close_input_file(is->ic);
2684         is->ic = NULL; /* safety */
2685     }
2686     url_set_interrupt_cb(NULL);
2687
2688     if (ret != 0) {
2689         SDL_Event event;
2690
2691         event.type = FF_QUIT_EVENT;
2692         event.user.data1 = is;
2693         SDL_PushEvent(&event);
2694     }
2695     return 0;
2696 }
2697
2698 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2699 {
2700     VideoState *is;
2701
2702     is = av_mallocz(sizeof(VideoState));
2703     if (!is)
2704         return NULL;
2705     av_strlcpy(is->filename, filename, sizeof(is->filename));
2706     is->iformat = iformat;
2707     is->ytop = 0;
2708     is->xleft = 0;
2709
2710     /* start video display */
2711     is->pictq_mutex = SDL_CreateMutex();
2712     is->pictq_cond = SDL_CreateCond();
2713
2714     is->subpq_mutex = SDL_CreateMutex();
2715     is->subpq_cond = SDL_CreateCond();
2716
2717     is->av_sync_type = av_sync_type;
2718     is->parse_tid = SDL_CreateThread(decode_thread, is);
2719     if (!is->parse_tid) {
2720         av_free(is);
2721         return NULL;
2722     }
2723     return is;
2724 }
2725
2726 static void stream_cycle_channel(VideoState *is, int codec_type)
2727 {
2728     AVFormatContext *ic = is->ic;
2729     int start_index, stream_index;
2730     AVStream *st;
2731
2732     if (codec_type == AVMEDIA_TYPE_VIDEO)
2733         start_index = is->video_stream;
2734     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2735         start_index = is->audio_stream;
2736     else
2737         start_index = is->subtitle_stream;
2738     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2739         return;
2740     stream_index = start_index;
2741     for(;;) {
2742         if (++stream_index >= is->ic->nb_streams)
2743         {
2744             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2745             {
2746                 stream_index = -1;
2747                 goto the_end;
2748             } else
2749                 stream_index = 0;
2750         }
2751         if (stream_index == start_index)
2752             return;
2753         st = ic->streams[stream_index];
2754         if (st->codec->codec_type == codec_type) {
2755             /* check that parameters are OK */
2756             switch(codec_type) {
2757             case AVMEDIA_TYPE_AUDIO:
2758                 if (st->codec->sample_rate != 0 &&
2759                     st->codec->channels != 0)
2760                     goto the_end;
2761                 break;
2762             case AVMEDIA_TYPE_VIDEO:
2763             case AVMEDIA_TYPE_SUBTITLE:
2764                 goto the_end;
2765             default:
2766                 break;
2767             }
2768         }
2769     }
2770  the_end:
2771     stream_component_close(is, start_index);
2772     stream_component_open(is, stream_index);
2773 }
2774
2775
2776 static void toggle_full_screen(void)
2777 {
2778     is_full_screen = !is_full_screen;
2779     if (!fs_screen_width) {
2780         /* use default SDL method */
2781 //        SDL_WM_ToggleFullScreen(screen);
2782     }
2783     video_open(cur_stream);
2784 }
2785
2786 static void toggle_pause(void)
2787 {
2788     if (cur_stream)
2789         stream_pause(cur_stream);
2790     step = 0;
2791 }
2792
2793 static void step_to_next_frame(void)
2794 {
2795     if (cur_stream) {
2796         /* if the stream is paused unpause it, then step */
2797         if (cur_stream->paused)
2798             stream_pause(cur_stream);
2799     }
2800     step = 1;
2801 }
2802
2803 static void toggle_audio_display(void)
2804 {
2805     if (cur_stream) {
2806         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2807         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2808         fill_rectangle(screen,
2809                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2810                     bgcolor);
2811         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2812     }
2813 }
2814
2815 /* handle an event sent by the GUI */
2816 static void event_loop(void)
2817 {
2818     SDL_Event event;
2819     double incr, pos, frac;
2820
2821     for(;;) {
2822         double x;
2823         SDL_WaitEvent(&event);
2824         switch(event.type) {
2825         case SDL_KEYDOWN:
2826             if (exit_on_keydown) {
2827                 do_exit();
2828                 break;
2829             }
2830             switch(event.key.keysym.sym) {
2831             case SDLK_ESCAPE:
2832             case SDLK_q:
2833                 do_exit();
2834                 break;
2835             case SDLK_f:
2836                 toggle_full_screen();
2837                 break;
2838             case SDLK_p:
2839             case SDLK_SPACE:
2840                 toggle_pause();
2841                 break;
2842             case SDLK_s: //S: Step to next frame
2843                 step_to_next_frame();
2844                 break;
2845             case SDLK_a:
2846                 if (cur_stream)
2847                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2848                 break;
2849             case SDLK_v:
2850                 if (cur_stream)
2851                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2852                 break;
2853             case SDLK_t:
2854                 if (cur_stream)
2855                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2856                 break;
2857             case SDLK_w:
2858                 toggle_audio_display();
2859                 break;
2860             case SDLK_LEFT:
2861                 incr = -10.0;
2862                 goto do_seek;
2863             case SDLK_RIGHT:
2864                 incr = 10.0;
2865                 goto do_seek;
2866             case SDLK_UP:
2867                 incr = 60.0;
2868                 goto do_seek;
2869             case SDLK_DOWN:
2870                 incr = -60.0;
2871             do_seek:
2872                 if (cur_stream) {
2873                     if (seek_by_bytes) {
2874                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2875                             pos= cur_stream->video_current_pos;
2876                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2877                             pos= cur_stream->audio_pkt.pos;
2878                         }else
2879                             pos = url_ftell(cur_stream->ic->pb);
2880                         if (cur_stream->ic->bit_rate)
2881                             incr *= cur_stream->ic->bit_rate / 8.0;
2882                         else
2883                             incr *= 180000.0;
2884                         pos += incr;
2885                         stream_seek(cur_stream, pos, incr, 1);
2886                     } else {
2887                         pos = get_master_clock(cur_stream);
2888                         pos += incr;
2889                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2890                     }
2891                 }
2892                 break;
2893             default:
2894                 break;
2895             }
2896             break;
2897         case SDL_MOUSEBUTTONDOWN:
2898             if (exit_on_mousedown) {
2899                 do_exit();
2900                 break;
2901             }
2902         case SDL_MOUSEMOTION:
2903             if(event.type ==SDL_MOUSEBUTTONDOWN){
2904                 x= event.button.x;
2905             }else{
2906                 if(event.motion.state != SDL_PRESSED)
2907                     break;
2908                 x= event.motion.x;
2909             }
2910             if (cur_stream) {
2911                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2912                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2913                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2914                 }else{
2915                     int64_t ts;
2916                     int ns, hh, mm, ss;
2917                     int tns, thh, tmm, tss;
2918                     tns = cur_stream->ic->duration/1000000LL;
2919                     thh = tns/3600;
2920                     tmm = (tns%3600)/60;
2921                     tss = (tns%60);
2922                     frac = x/cur_stream->width;
2923                     ns = frac*tns;
2924                     hh = ns/3600;
2925                     mm = (ns%3600)/60;
2926                     ss = (ns%60);
2927                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2928                             hh, mm, ss, thh, tmm, tss);
2929                     ts = frac*cur_stream->ic->duration;
2930                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2931                         ts += cur_stream->ic->start_time;
2932                     stream_seek(cur_stream, ts, 0, 0);
2933                 }
2934             }
2935             break;
2936         case SDL_VIDEORESIZE:
2937             if (cur_stream) {
2938                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2939                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2940                 screen_width = cur_stream->width = event.resize.w;
2941                 screen_height= cur_stream->height= event.resize.h;
2942             }
2943             break;
2944         case SDL_QUIT:
2945         case FF_QUIT_EVENT:
2946             do_exit();
2947             break;
2948         case FF_ALLOC_EVENT:
2949             video_open(event.user.data1);
2950             alloc_picture(event.user.data1);
2951             break;
2952         case FF_REFRESH_EVENT:
2953             video_refresh_timer(event.user.data1);
2954             cur_stream->refresh=0;
2955             break;
2956         default:
2957             break;
2958         }
2959     }
2960 }
2961
2962 static void opt_frame_size(const char *arg)
2963 {
2964     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2965         fprintf(stderr, "Incorrect frame size\n");
2966         exit(1);
2967     }
2968     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2969         fprintf(stderr, "Frame size must be a multiple of 2\n");
2970         exit(1);
2971     }
2972 }
2973
2974 static int opt_width(const char *opt, const char *arg)
2975 {
2976     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2977     return 0;
2978 }
2979
2980 static int opt_height(const char *opt, const char *arg)
2981 {
2982     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2983     return 0;
2984 }
2985
2986 static void opt_format(const char *arg)
2987 {
2988     file_iformat = av_find_input_format(arg);
2989     if (!file_iformat) {
2990         fprintf(stderr, "Unknown input format: %s\n", arg);
2991         exit(1);
2992     }
2993 }
2994
2995 static void opt_frame_pix_fmt(const char *arg)
2996 {
2997     frame_pix_fmt = av_get_pix_fmt(arg);
2998 }
2999
3000 static int opt_sync(const char *opt, const char *arg)
3001 {
3002     if (!strcmp(arg, "audio"))
3003         av_sync_type = AV_SYNC_AUDIO_MASTER;
3004     else if (!strcmp(arg, "video"))
3005         av_sync_type = AV_SYNC_VIDEO_MASTER;
3006     else if (!strcmp(arg, "ext"))
3007         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3008     else {
3009         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3010         exit(1);
3011     }
3012     return 0;
3013 }
3014
3015 static int opt_seek(const char *opt, const char *arg)
3016 {
3017     start_time = parse_time_or_die(opt, arg, 1);
3018     return 0;
3019 }
3020
3021 static int opt_duration(const char *opt, const char *arg)
3022 {
3023     duration = parse_time_or_die(opt, arg, 1);
3024     return 0;
3025 }
3026
3027 static int opt_debug(const char *opt, const char *arg)
3028 {
3029     av_log_set_level(99);
3030     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3031     return 0;
3032 }
3033
3034 static int opt_vismv(const char *opt, const char *arg)
3035 {
3036     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3037     return 0;
3038 }
3039
3040 static int opt_thread_count(const char *opt, const char *arg)
3041 {
3042     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3043 #if !HAVE_THREADS
3044     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3045 #endif
3046     return 0;
3047 }
3048
3049 static const OptionDef options[] = {
3050 #include "cmdutils_common_opts.h"
3051     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3052     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3053     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3054     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3055     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3056     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3057     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3058     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3059     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3060     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3061     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3062     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3063     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3064     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3065     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3066     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3067     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3068     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3069     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3070     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3071     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3072     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3073     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3074     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3075     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3076     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3077     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3078     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3079     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3080     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3081     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3082     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3083     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3084     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3085     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3086     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3087     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3088 #if CONFIG_AVFILTER
3089     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3090 #endif
3091     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3092     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3093     { NULL, },
3094 };
3095
3096 static void show_usage(void)
3097 {
3098     printf("Simple media player\n");
3099     printf("usage: ffplay [options] input_file\n");
3100     printf("\n");
3101 }
3102
3103 static void show_help(void)
3104 {
3105     show_usage();
3106     show_help_options(options, "Main options:\n",
3107                       OPT_EXPERT, 0);
3108     show_help_options(options, "\nAdvanced options:\n",
3109                       OPT_EXPERT, OPT_EXPERT);
3110     printf("\nWhile playing:\n"
3111            "q, ESC              quit\n"
3112            "f                   toggle full screen\n"
3113            "p, SPC              pause\n"
3114            "a                   cycle audio channel\n"
3115            "v                   cycle video channel\n"
3116            "t                   cycle subtitle channel\n"
3117            "w                   show audio waves\n"
3118            "s                   activate frame-step mode\n"
3119            "left/right          seek backward/forward 10 seconds\n"
3120            "down/up             seek backward/forward 1 minute\n"
3121            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3122            );
3123 }
3124
3125 static void opt_input_file(const char *filename)
3126 {
3127     if (input_filename) {
3128         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3129                 filename, input_filename);
3130         exit(1);
3131     }
3132     if (!strcmp(filename, "-"))
3133         filename = "pipe:";
3134     input_filename = filename;
3135 }
3136
3137 /* Called from the main */
3138 int main(int argc, char **argv)
3139 {
3140     int flags, i;
3141
3142     /* register all codecs, demux and protocols */
3143     avcodec_register_all();
3144 #if CONFIG_AVDEVICE
3145     avdevice_register_all();
3146 #endif
3147 #if CONFIG_AVFILTER
3148     avfilter_register_all();
3149 #endif
3150     av_register_all();
3151
3152     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3153         avcodec_opts[i]= avcodec_alloc_context2(i);
3154     }
3155     avformat_opts = avformat_alloc_context();
3156 #if !CONFIG_AVFILTER
3157     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3158 #endif
3159
3160     show_banner();
3161
3162     parse_options(argc, argv, options, opt_input_file);
3163
3164     if (!input_filename) {
3165         show_usage();
3166         fprintf(stderr, "An input file must be specified\n");
3167         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3168         exit(1);
3169     }
3170
3171     if (display_disable) {
3172         video_disable = 1;
3173     }
3174     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3175 #if !defined(__MINGW32__) && !defined(__APPLE__)
3176     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3177 #endif
3178     if (SDL_Init (flags)) {
3179         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3180         exit(1);
3181     }
3182
3183     if (!display_disable) {
3184 #if HAVE_SDL_VIDEO_SIZE
3185         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3186         fs_screen_width = vi->current_w;
3187         fs_screen_height = vi->current_h;
3188 #endif
3189     }
3190
3191     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3192     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3193     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3194
3195     av_init_packet(&flush_pkt);
3196     flush_pkt.data= "FLUSH";
3197
3198     cur_stream = stream_open(input_filename, file_iformat);
3199
3200     event_loop();
3201
3202     /* never returns */
3203
3204     return 0;
3205 }