]> git.sesse.net Git - ffmpeg/blob - ffplay.c
add FF_API_INOFFICIAL define to disable the deprecated 'inofficial' flag
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavcore/imgutils.h"
32 #include "libavcore/parseutils.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavcodec/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avfilter.h"
42 # include "libavfilter/avfiltergraph.h"
43 # include "libavfilter/graphparser.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "FFplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG_SYNC
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2*65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///<presentation time stamp for this picture
103     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///<byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     enum PixelFormat pix_fmt;
109
110 #if CONFIG_AVFILTER
111     AVFilterBufferRef *picref;
112 #endif
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 enum {
121     AV_SYNC_AUDIO_MASTER, /* default choice */
122     AV_SYNC_VIDEO_MASTER,
123     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125
126 typedef struct VideoState {
127     SDL_Thread *parse_tid;
128     SDL_Thread *video_tid;
129     SDL_Thread *refresh_tid;
130     AVInputFormat *iformat;
131     int no_background;
132     int abort_request;
133     int paused;
134     int last_paused;
135     int seek_req;
136     int seek_flags;
137     int64_t seek_pos;
138     int64_t seek_rel;
139     int read_pause_return;
140     AVFormatContext *ic;
141     int dtg_active_format;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     /* samples output by the codec. we reserve more space for avsync
158        compensation */
159     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     uint8_t *audio_buf;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     AVPacket audio_pkt_temp;
165     AVPacket audio_pkt;
166     enum SampleFormat audio_src_fmt;
167     AVAudioConvert *reformat_ctx;
168
169     int show_audio; /* if true, display audio samples */
170     int16_t sample_array[SAMPLE_ARRAY_SIZE];
171     int sample_array_index;
172     int last_i_start;
173     RDFTContext *rdft;
174     int rdft_bits;
175     FFTSample *rdft_data;
176     int xpos;
177
178     SDL_Thread *subtitle_tid;
179     int subtitle_stream;
180     int subtitle_stream_changed;
181     AVStream *subtitle_st;
182     PacketQueue subtitleq;
183     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
184     int subpq_size, subpq_rindex, subpq_windex;
185     SDL_mutex *subpq_mutex;
186     SDL_cond *subpq_cond;
187
188     double frame_timer;
189     double frame_last_pts;
190     double frame_last_delay;
191     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
192     int video_stream;
193     AVStream *video_st;
194     PacketQueue videoq;
195     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
196     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
197     int64_t video_current_pos;                   ///<current displayed file pos
198     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
199     int pictq_size, pictq_rindex, pictq_windex;
200     SDL_mutex *pictq_mutex;
201     SDL_cond *pictq_cond;
202 #if !CONFIG_AVFILTER
203     struct SwsContext *img_convert_ctx;
204 #endif
205
206     //    QETimer *video_timer;
207     char filename[1024];
208     int width, height, xleft, ytop;
209
210     PtsCorrectionContext pts_ctx;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for(;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #if 0
413 /* draw only the border of a rectangle */
414 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415 {
416     int w1, w2, h1, h2;
417
418     /* fill the background */
419     w1 = x;
420     if (w1 < 0)
421         w1 = 0;
422     w2 = s->width - (x + w);
423     if (w2 < 0)
424         w2 = 0;
425     h1 = y;
426     if (h1 < 0)
427         h1 = 0;
428     h2 = s->height - (y + h);
429     if (h2 < 0)
430         h2 = 0;
431     fill_rectangle(screen,
432                    s->xleft, s->ytop,
433                    w1, s->height,
434                    color);
435     fill_rectangle(screen,
436                    s->xleft + s->width - w2, s->ytop,
437                    w2, s->height,
438                    color);
439     fill_rectangle(screen,
440                    s->xleft + w1, s->ytop,
441                    s->width - w1 - w2, h1,
442                    color);
443     fill_rectangle(screen,
444                    s->xleft + w1, s->ytop + s->height - h2,
445                    s->width - w1 - w2, h2,
446                    color);
447 }
448 #endif
449
450 #define ALPHA_BLEND(a, oldp, newp, s)\
451 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452
453 #define RGBA_IN(r, g, b, a, s)\
454 {\
455     unsigned int v = ((const uint32_t *)(s))[0];\
456     a = (v >> 24) & 0xff;\
457     r = (v >> 16) & 0xff;\
458     g = (v >> 8) & 0xff;\
459     b = v & 0xff;\
460 }
461
462 #define YUVA_IN(y, u, v, a, s, pal)\
463 {\
464     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465     a = (val >> 24) & 0xff;\
466     y = (val >> 16) & 0xff;\
467     u = (val >> 8) & 0xff;\
468     v = val & 0xff;\
469 }
470
471 #define YUVA_OUT(d, y, u, v, a)\
472 {\
473     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474 }
475
476
477 #define BPP 1
478
479 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480 {
481     int wrap, wrap3, width2, skip2;
482     int y, u, v, a, u1, v1, a1, w, h;
483     uint8_t *lum, *cb, *cr;
484     const uint8_t *p;
485     const uint32_t *pal;
486     int dstx, dsty, dstw, dsth;
487
488     dstw = av_clip(rect->w, 0, imgw);
489     dsth = av_clip(rect->h, 0, imgh);
490     dstx = av_clip(rect->x, 0, imgw - dstw);
491     dsty = av_clip(rect->y, 0, imgh - dsth);
492     lum = dst->data[0] + dsty * dst->linesize[0];
493     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495
496     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497     skip2 = dstx >> 1;
498     wrap = dst->linesize[0];
499     wrap3 = rect->pict.linesize[0];
500     p = rect->pict.data[0];
501     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502
503     if (dsty & 1) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513             cb++;
514             cr++;
515             lum++;
516             p += BPP;
517         }
518         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 = u;
521             v1 = v;
522             a1 = a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524
525             YUVA_IN(y, u, v, a, p + BPP, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += 2 * BPP;
535             lum += 2;
536         }
537         if (w) {
538             YUVA_IN(y, u, v, a, p, pal);
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542             p++;
543             lum++;
544         }
545         p += wrap3 - dstw * BPP;
546         lum += wrap - dstw - dstx;
547         cb += dst->linesize[1] - width2 - skip2;
548         cr += dst->linesize[2] - width2 - skip2;
549     }
550     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551         lum += dstx;
552         cb += skip2;
553         cr += skip2;
554
555         if (dstx & 1) {
556             YUVA_IN(y, u, v, a, p, pal);
557             u1 = u;
558             v1 = v;
559             a1 = a;
560             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561             p += wrap3;
562             lum += wrap;
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570             cb++;
571             cr++;
572             p += -wrap3 + BPP;
573             lum += -wrap + 1;
574         }
575         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 = u;
578             v1 = v;
579             a1 = a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581
582             YUVA_IN(y, u, v, a, p + BPP, pal);
583             u1 += u;
584             v1 += v;
585             a1 += a;
586             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587             p += wrap3;
588             lum += wrap;
589
590             YUVA_IN(y, u, v, a, p, pal);
591             u1 += u;
592             v1 += v;
593             a1 += a;
594             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595
596             YUVA_IN(y, u, v, a, p + BPP, pal);
597             u1 += u;
598             v1 += v;
599             a1 += a;
600             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601
602             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604
605             cb++;
606             cr++;
607             p += -wrap3 + 2 * BPP;
608             lum += -wrap + 2;
609         }
610         if (w) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616             p += wrap3;
617             lum += wrap;
618             YUVA_IN(y, u, v, a, p, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625             cb++;
626             cr++;
627             p += -wrap3 + BPP;
628             lum += -wrap + 1;
629         }
630         p += wrap3 + (wrap3 - dstw * BPP);
631         lum += wrap + (wrap - dstw - dstx);
632         cb += dst->linesize[1] - width2 - skip2;
633         cr += dst->linesize[2] - width2 - skip2;
634     }
635     /* handle odd height */
636     if (h) {
637         lum += dstx;
638         cb += skip2;
639         cr += skip2;
640
641         if (dstx & 1) {
642             YUVA_IN(y, u, v, a, p, pal);
643             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646             cb++;
647             cr++;
648             lum++;
649             p += BPP;
650         }
651         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652             YUVA_IN(y, u, v, a, p, pal);
653             u1 = u;
654             v1 = v;
655             a1 = a;
656             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657
658             YUVA_IN(y, u, v, a, p + BPP, pal);
659             u1 += u;
660             v1 += v;
661             a1 += a;
662             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665             cb++;
666             cr++;
667             p += 2 * BPP;
668             lum += 2;
669         }
670         if (w) {
671             YUVA_IN(y, u, v, a, p, pal);
672             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675         }
676     }
677 }
678
679 static void free_subpicture(SubPicture *sp)
680 {
681     avsubtitle_free(&sp->sub);
682 }
683
684 static void video_image_display(VideoState *is)
685 {
686     VideoPicture *vp;
687     SubPicture *sp;
688     AVPicture pict;
689     float aspect_ratio;
690     int width, height, x, y;
691     SDL_Rect rect;
692     int i;
693
694     vp = &is->pictq[is->pictq_rindex];
695     if (vp->bmp) {
696 #if CONFIG_AVFILTER
697          if (vp->picref->video->pixel_aspect.num == 0)
698              aspect_ratio = 0;
699          else
700              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701 #else
702
703         /* XXX: use variable in the frame */
704         if (is->video_st->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706         else if (is->video_st->codec->sample_aspect_ratio.num)
707             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708         else
709             aspect_ratio = 0;
710 #endif
711         if (aspect_ratio <= 0.0)
712             aspect_ratio = 1.0;
713         aspect_ratio *= (float)vp->width / (float)vp->height;
714         /* if an active format is indicated, then it overrides the
715            mpeg format */
716 #if 0
717         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
718             is->dtg_active_format = is->video_st->codec->dtg_active_format;
719             printf("dtg_active_format=%d\n", is->dtg_active_format);
720         }
721 #endif
722 #if 0
723         switch(is->video_st->codec->dtg_active_format) {
724         case FF_DTG_AFD_SAME:
725         default:
726             /* nothing to do */
727             break;
728         case FF_DTG_AFD_4_3:
729             aspect_ratio = 4.0 / 3.0;
730             break;
731         case FF_DTG_AFD_16_9:
732             aspect_ratio = 16.0 / 9.0;
733             break;
734         case FF_DTG_AFD_14_9:
735             aspect_ratio = 14.0 / 9.0;
736             break;
737         case FF_DTG_AFD_4_3_SP_14_9:
738             aspect_ratio = 14.0 / 9.0;
739             break;
740         case FF_DTG_AFD_16_9_SP_14_9:
741             aspect_ratio = 14.0 / 9.0;
742             break;
743         case FF_DTG_AFD_SP_4_3:
744             aspect_ratio = 4.0 / 3.0;
745             break;
746         }
747 #endif
748
749         if (is->subtitle_st)
750         {
751             if (is->subpq_size > 0)
752             {
753                 sp = &is->subpq[is->subpq_rindex];
754
755                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
756                 {
757                     SDL_LockYUVOverlay (vp->bmp);
758
759                     pict.data[0] = vp->bmp->pixels[0];
760                     pict.data[1] = vp->bmp->pixels[2];
761                     pict.data[2] = vp->bmp->pixels[1];
762
763                     pict.linesize[0] = vp->bmp->pitches[0];
764                     pict.linesize[1] = vp->bmp->pitches[2];
765                     pict.linesize[2] = vp->bmp->pitches[1];
766
767                     for (i = 0; i < sp->sub.num_rects; i++)
768                         blend_subrect(&pict, sp->sub.rects[i],
769                                       vp->bmp->w, vp->bmp->h);
770
771                     SDL_UnlockYUVOverlay (vp->bmp);
772                 }
773             }
774         }
775
776
777         /* XXX: we suppose the screen has a 1.0 pixel ratio */
778         height = is->height;
779         width = ((int)rint(height * aspect_ratio)) & ~1;
780         if (width > is->width) {
781             width = is->width;
782             height = ((int)rint(width / aspect_ratio)) & ~1;
783         }
784         x = (is->width - width) / 2;
785         y = (is->height - height) / 2;
786         if (!is->no_background) {
787             /* fill the background */
788             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
789         } else {
790             is->no_background = 0;
791         }
792         rect.x = is->xleft + x;
793         rect.y = is->ytop  + y;
794         rect.w = width;
795         rect.h = height;
796         SDL_DisplayYUVOverlay(vp->bmp, &rect);
797     } else {
798 #if 0
799         fill_rectangle(screen,
800                        is->xleft, is->ytop, is->width, is->height,
801                        QERGB(0x00, 0x00, 0x00));
802 #endif
803     }
804 }
805
806 static inline int compute_mod(int a, int b)
807 {
808     a = a % b;
809     if (a >= 0)
810         return a;
811     else
812         return a + b;
813 }
814
815 static void video_audio_display(VideoState *s)
816 {
817     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818     int ch, channels, h, h2, bgcolor, fgcolor;
819     int16_t time_diff;
820     int rdft_bits, nb_freq;
821
822     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
823         ;
824     nb_freq= 1<<(rdft_bits-1);
825
826     /* compute display index : center on currently output samples */
827     channels = s->audio_st->codec->channels;
828     nb_display_channels = channels;
829     if (!s->paused) {
830         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
831         n = 2 * channels;
832         delay = audio_write_get_buf_size(s);
833         delay /= n;
834
835         /* to be more precise, we take into account the time spent since
836            the last buffer computation */
837         if (audio_callback_time) {
838             time_diff = av_gettime() - audio_callback_time;
839             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
840         }
841
842         delay += 2*data_used;
843         if (delay < data_used)
844             delay = data_used;
845
846         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847         if(s->show_audio==1){
848             h= INT_MIN;
849             for(i=0; i<1000; i+=channels){
850                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851                 int a= s->sample_array[idx];
852                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
853                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
854                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
855                 int score= a-d;
856                 if(h<score && (b^c)<0){
857                     h= score;
858                     i_start= idx;
859                 }
860             }
861         }
862
863         s->last_i_start = i_start;
864     } else {
865         i_start = s->last_i_start;
866     }
867
868     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869     if(s->show_audio==1){
870         fill_rectangle(screen,
871                        s->xleft, s->ytop, s->width, s->height,
872                        bgcolor);
873
874         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875
876         /* total height for one channel */
877         h = s->height / nb_display_channels;
878         /* graph height / 2 */
879         h2 = (h * 9) / 20;
880         for(ch = 0;ch < nb_display_channels; ch++) {
881             i = i_start + ch;
882             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883             for(x = 0; x < s->width; x++) {
884                 y = (s->sample_array[i] * h2) >> 15;
885                 if (y < 0) {
886                     y = -y;
887                     ys = y1 - y;
888                 } else {
889                     ys = y1;
890                 }
891                 fill_rectangle(screen,
892                                s->xleft + x, ys, 1, y,
893                                fgcolor);
894                 i += channels;
895                 if (i >= SAMPLE_ARRAY_SIZE)
896                     i -= SAMPLE_ARRAY_SIZE;
897             }
898         }
899
900         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901
902         for(ch = 1;ch < nb_display_channels; ch++) {
903             y = s->ytop + ch * h;
904             fill_rectangle(screen,
905                            s->xleft, y, s->width, 1,
906                            fgcolor);
907         }
908         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909     }else{
910         nb_display_channels= FFMIN(nb_display_channels, 2);
911         if(rdft_bits != s->rdft_bits){
912             av_rdft_end(s->rdft);
913             av_free(s->rdft_data);
914             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915             s->rdft_bits= rdft_bits;
916             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
917         }
918         {
919             FFTSample *data[2];
920             for(ch = 0;ch < nb_display_channels; ch++) {
921                 data[ch] = s->rdft_data + 2*nb_freq*ch;
922                 i = i_start + ch;
923                 for(x = 0; x < 2*nb_freq; x++) {
924                     double w= (x-nb_freq)*(1.0/nb_freq);
925                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
926                     i += channels;
927                     if (i >= SAMPLE_ARRAY_SIZE)
928                         i -= SAMPLE_ARRAY_SIZE;
929                 }
930                 av_rdft_calc(s->rdft, data[ch]);
931             }
932             //least efficient way to do this, we should of course directly access it but its more than fast enough
933             for(y=0; y<s->height; y++){
934                 double w= 1/sqrt(nb_freq);
935                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
936                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
937                        + data[1][2*y+1]*data[1][2*y+1])) : a;
938                 a= FFMIN(a,255);
939                 b= FFMIN(b,255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         s->xpos++;
949         if(s->xpos >= s->width)
950             s->xpos= s->xleft;
951     }
952 }
953
954 static int video_open(VideoState *is){
955     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956     int w,h;
957
958     if(is_full_screen) flags |= SDL_FULLSCREEN;
959     else               flags |= SDL_RESIZABLE;
960
961     if (is_full_screen && fs_screen_width) {
962         w = fs_screen_width;
963         h = fs_screen_height;
964     } else if(!is_full_screen && screen_width){
965         w = screen_width;
966         h = screen_height;
967 #if CONFIG_AVFILTER
968     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969         w = is->out_video_filter->inputs[0]->w;
970         h = is->out_video_filter->inputs[0]->h;
971 #else
972     }else if (is->video_st && is->video_st->codec->width){
973         w = is->video_st->codec->width;
974         h = is->video_st->codec->height;
975 #endif
976     } else {
977         w = 640;
978         h = 480;
979     }
980     if(screen && is->width == screen->w && screen->w == w
981        && is->height== screen->h && screen->h == h)
982         return 0;
983
984 #ifndef __APPLE__
985     screen = SDL_SetVideoMode(w, h, 0, flags);
986 #else
987     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988     screen = SDL_SetVideoMode(w, h, 24, flags);
989 #endif
990     if (!screen) {
991         fprintf(stderr, "SDL: could not set video mode - exiting\n");
992         return -1;
993     }
994     if (!window_title)
995         window_title = input_filename;
996     SDL_WM_SetCaption(window_title, window_title);
997
998     is->width = screen->w;
999     is->height = screen->h;
1000
1001     return 0;
1002 }
1003
1004 /* display the current picture, if any */
1005 static void video_display(VideoState *is)
1006 {
1007     if(!screen)
1008         video_open(cur_stream);
1009     if (is->audio_st && is->show_audio)
1010         video_audio_display(is);
1011     else if (is->video_st)
1012         video_image_display(is);
1013 }
1014
1015 static int refresh_thread(void *opaque)
1016 {
1017     VideoState *is= opaque;
1018     while(!is->abort_request){
1019     SDL_Event event;
1020     event.type = FF_REFRESH_EVENT;
1021     event.user.data1 = opaque;
1022         if(!is->refresh){
1023             is->refresh=1;
1024     SDL_PushEvent(&event);
1025         }
1026         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027     }
1028     return 0;
1029 }
1030
1031 /* get the current audio clock value */
1032 static double get_audio_clock(VideoState *is)
1033 {
1034     double pts;
1035     int hw_buf_size, bytes_per_sec;
1036     pts = is->audio_clock;
1037     hw_buf_size = audio_write_get_buf_size(is);
1038     bytes_per_sec = 0;
1039     if (is->audio_st) {
1040         bytes_per_sec = is->audio_st->codec->sample_rate *
1041             2 * is->audio_st->codec->channels;
1042     }
1043     if (bytes_per_sec)
1044         pts -= (double)hw_buf_size / bytes_per_sec;
1045     return pts;
1046 }
1047
1048 /* get the current video clock value */
1049 static double get_video_clock(VideoState *is)
1050 {
1051     if (is->paused) {
1052         return is->video_current_pts;
1053     } else {
1054         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055     }
1056 }
1057
1058 /* get the current external clock value */
1059 static double get_external_clock(VideoState *is)
1060 {
1061     int64_t ti;
1062     ti = av_gettime();
1063     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064 }
1065
1066 /* get the current master clock value */
1067 static double get_master_clock(VideoState *is)
1068 {
1069     double val;
1070
1071     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072         if (is->video_st)
1073             val = get_video_clock(is);
1074         else
1075             val = get_audio_clock(is);
1076     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077         if (is->audio_st)
1078             val = get_audio_clock(is);
1079         else
1080             val = get_video_clock(is);
1081     } else {
1082         val = get_external_clock(is);
1083     }
1084     return val;
1085 }
1086
1087 /* seek in the stream */
1088 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089 {
1090     if (!is->seek_req) {
1091         is->seek_pos = pos;
1092         is->seek_rel = rel;
1093         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094         if (seek_by_bytes)
1095             is->seek_flags |= AVSEEK_FLAG_BYTE;
1096         is->seek_req = 1;
1097     }
1098 }
1099
1100 /* pause or resume the video */
1101 static void stream_pause(VideoState *is)
1102 {
1103     if (is->paused) {
1104         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105         if(is->read_pause_return != AVERROR(ENOSYS)){
1106             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107         }
1108         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109     }
1110     is->paused = !is->paused;
1111 }
1112
1113 static double compute_target_time(double frame_current_pts, VideoState *is)
1114 {
1115     double delay, sync_threshold, diff;
1116
1117     /* compute nominal delay */
1118     delay = frame_current_pts - is->frame_last_pts;
1119     if (delay <= 0 || delay >= 10.0) {
1120         /* if incorrect delay, use previous one */
1121         delay = is->frame_last_delay;
1122     } else {
1123         is->frame_last_delay = delay;
1124     }
1125     is->frame_last_pts = frame_current_pts;
1126
1127     /* update delay to follow master synchronisation source */
1128     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130         /* if video is slave, we try to correct big delays by
1131            duplicating or deleting a frame */
1132         diff = get_video_clock(is) - get_master_clock(is);
1133
1134         /* skip or repeat frame. We take into account the
1135            delay to compute the threshold. I still don't know
1136            if it is the best guess */
1137         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139             if (diff <= -sync_threshold)
1140                 delay = 0;
1141             else if (diff >= sync_threshold)
1142                 delay = 2 * delay;
1143         }
1144     }
1145     is->frame_timer += delay;
1146 #if defined(DEBUG_SYNC)
1147     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148             delay, actual_delay, frame_current_pts, -diff);
1149 #endif
1150
1151     return is->frame_timer;
1152 }
1153
1154 /* called to display each frame */
1155 static void video_refresh_timer(void *opaque)
1156 {
1157     VideoState *is = opaque;
1158     VideoPicture *vp;
1159
1160     SubPicture *sp, *sp2;
1161
1162     if (is->video_st) {
1163 retry:
1164         if (is->pictq_size == 0) {
1165             //nothing to do, no picture to display in the que
1166         } else {
1167             double time= av_gettime()/1000000.0;
1168             double next_target;
1169             /* dequeue the picture */
1170             vp = &is->pictq[is->pictq_rindex];
1171
1172             if(time < vp->target_clock)
1173                 return;
1174             /* update current video pts */
1175             is->video_current_pts = vp->pts;
1176             is->video_current_pts_drift = is->video_current_pts - time;
1177             is->video_current_pos = vp->pos;
1178             if(is->pictq_size > 1){
1179                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180                 assert(nextvp->target_clock >= vp->target_clock);
1181                 next_target= nextvp->target_clock;
1182             }else{
1183                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184             }
1185             if(framedrop && time > next_target){
1186                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187                 if(is->pictq_size > 1 || time > next_target + 0.5){
1188                     /* update queue size and signal for next picture */
1189                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190                         is->pictq_rindex = 0;
1191
1192                     SDL_LockMutex(is->pictq_mutex);
1193                     is->pictq_size--;
1194                     SDL_CondSignal(is->pictq_cond);
1195                     SDL_UnlockMutex(is->pictq_mutex);
1196                     goto retry;
1197                 }
1198             }
1199
1200             if(is->subtitle_st) {
1201                 if (is->subtitle_stream_changed) {
1202                     SDL_LockMutex(is->subpq_mutex);
1203
1204                     while (is->subpq_size) {
1205                         free_subpicture(&is->subpq[is->subpq_rindex]);
1206
1207                         /* update queue size and signal for next picture */
1208                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209                             is->subpq_rindex = 0;
1210
1211                         is->subpq_size--;
1212                     }
1213                     is->subtitle_stream_changed = 0;
1214
1215                     SDL_CondSignal(is->subpq_cond);
1216                     SDL_UnlockMutex(is->subpq_mutex);
1217                 } else {
1218                     if (is->subpq_size > 0) {
1219                         sp = &is->subpq[is->subpq_rindex];
1220
1221                         if (is->subpq_size > 1)
1222                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223                         else
1224                             sp2 = NULL;
1225
1226                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228                         {
1229                             free_subpicture(sp);
1230
1231                             /* update queue size and signal for next picture */
1232                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233                                 is->subpq_rindex = 0;
1234
1235                             SDL_LockMutex(is->subpq_mutex);
1236                             is->subpq_size--;
1237                             SDL_CondSignal(is->subpq_cond);
1238                             SDL_UnlockMutex(is->subpq_mutex);
1239                         }
1240                     }
1241                 }
1242             }
1243
1244             /* display picture */
1245             video_display(is);
1246
1247             /* update queue size and signal for next picture */
1248             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249                 is->pictq_rindex = 0;
1250
1251             SDL_LockMutex(is->pictq_mutex);
1252             is->pictq_size--;
1253             SDL_CondSignal(is->pictq_cond);
1254             SDL_UnlockMutex(is->pictq_mutex);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         video_display(is);
1264     }
1265     if (show_status) {
1266         static int64_t last_time;
1267         int64_t cur_time;
1268         int aqsize, vqsize, sqsize;
1269         double av_diff;
1270
1271         cur_time = av_gettime();
1272         if (!last_time || (cur_time - last_time) >= 30000) {
1273             aqsize = 0;
1274             vqsize = 0;
1275             sqsize = 0;
1276             if (is->audio_st)
1277                 aqsize = is->audioq.size;
1278             if (is->video_st)
1279                 vqsize = is->videoq.size;
1280             if (is->subtitle_st)
1281                 sqsize = is->subtitleq.size;
1282             av_diff = 0;
1283             if (is->audio_st && is->video_st)
1284                 av_diff = get_audio_clock(is) - get_video_clock(is);
1285             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1286                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1287             fflush(stdout);
1288             last_time = cur_time;
1289         }
1290     }
1291 }
1292
1293 static void stream_close(VideoState *is)
1294 {
1295     VideoPicture *vp;
1296     int i;
1297     /* XXX: use a special url_shutdown call to abort parse cleanly */
1298     is->abort_request = 1;
1299     SDL_WaitThread(is->parse_tid, NULL);
1300     SDL_WaitThread(is->refresh_tid, NULL);
1301
1302     /* free all pictures */
1303     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1304         vp = &is->pictq[i];
1305 #if CONFIG_AVFILTER
1306         if (vp->picref) {
1307             avfilter_unref_buffer(vp->picref);
1308             vp->picref = NULL;
1309         }
1310 #endif
1311         if (vp->bmp) {
1312             SDL_FreeYUVOverlay(vp->bmp);
1313             vp->bmp = NULL;
1314         }
1315     }
1316     SDL_DestroyMutex(is->pictq_mutex);
1317     SDL_DestroyCond(is->pictq_cond);
1318     SDL_DestroyMutex(is->subpq_mutex);
1319     SDL_DestroyCond(is->subpq_cond);
1320 #if !CONFIG_AVFILTER
1321     if (is->img_convert_ctx)
1322         sws_freeContext(is->img_convert_ctx);
1323 #endif
1324     av_free(is);
1325 }
1326
1327 static void do_exit(void)
1328 {
1329     int i;
1330     if (cur_stream) {
1331         stream_close(cur_stream);
1332         cur_stream = NULL;
1333     }
1334     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
1335         av_free(avcodec_opts[i]);
1336     av_free(avformat_opts);
1337     av_free(sws_opts);
1338 #if CONFIG_AVFILTER
1339     avfilter_uninit();
1340 #endif
1341     if (show_status)
1342         printf("\n");
1343     SDL_Quit();
1344     av_log(NULL, AV_LOG_QUIET, "");
1345     exit(0);
1346 }
1347
1348 /* allocate a picture (needs to do that in main thread to avoid
1349    potential locking problems */
1350 static void alloc_picture(void *opaque)
1351 {
1352     VideoState *is = opaque;
1353     VideoPicture *vp;
1354
1355     vp = &is->pictq[is->pictq_windex];
1356
1357     if (vp->bmp)
1358         SDL_FreeYUVOverlay(vp->bmp);
1359
1360 #if CONFIG_AVFILTER
1361     if (vp->picref)
1362         avfilter_unref_buffer(vp->picref);
1363     vp->picref = NULL;
1364
1365     vp->width   = is->out_video_filter->inputs[0]->w;
1366     vp->height  = is->out_video_filter->inputs[0]->h;
1367     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1368 #else
1369     vp->width   = is->video_st->codec->width;
1370     vp->height  = is->video_st->codec->height;
1371     vp->pix_fmt = is->video_st->codec->pix_fmt;
1372 #endif
1373
1374     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1375                                    SDL_YV12_OVERLAY,
1376                                    screen);
1377     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1378         /* SDL allocates a buffer smaller than requested if the video
1379          * overlay hardware is unable to support the requested size. */
1380         fprintf(stderr, "Error: the video system does not support an image\n"
1381                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1382                         "to reduce the image size.\n", vp->width, vp->height );
1383         do_exit();
1384     }
1385
1386     SDL_LockMutex(is->pictq_mutex);
1387     vp->allocated = 1;
1388     SDL_CondSignal(is->pictq_cond);
1389     SDL_UnlockMutex(is->pictq_mutex);
1390 }
1391
1392 /**
1393  *
1394  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1395  */
1396 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1397 {
1398     VideoPicture *vp;
1399     int dst_pix_fmt;
1400 #if CONFIG_AVFILTER
1401     AVPicture pict_src;
1402 #endif
1403     /* wait until we have space to put a new picture */
1404     SDL_LockMutex(is->pictq_mutex);
1405
1406     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1407         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1408
1409     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1410            !is->videoq.abort_request) {
1411         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1412     }
1413     SDL_UnlockMutex(is->pictq_mutex);
1414
1415     if (is->videoq.abort_request)
1416         return -1;
1417
1418     vp = &is->pictq[is->pictq_windex];
1419
1420     /* alloc or resize hardware picture buffer */
1421     if (!vp->bmp ||
1422 #if CONFIG_AVFILTER
1423         vp->width  != is->out_video_filter->inputs[0]->w ||
1424         vp->height != is->out_video_filter->inputs[0]->h) {
1425 #else
1426         vp->width != is->video_st->codec->width ||
1427         vp->height != is->video_st->codec->height) {
1428 #endif
1429         SDL_Event event;
1430
1431         vp->allocated = 0;
1432
1433         /* the allocation must be done in the main thread to avoid
1434            locking problems */
1435         event.type = FF_ALLOC_EVENT;
1436         event.user.data1 = is;
1437         SDL_PushEvent(&event);
1438
1439         /* wait until the picture is allocated */
1440         SDL_LockMutex(is->pictq_mutex);
1441         while (!vp->allocated && !is->videoq.abort_request) {
1442             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1443         }
1444         SDL_UnlockMutex(is->pictq_mutex);
1445
1446         if (is->videoq.abort_request)
1447             return -1;
1448     }
1449
1450     /* if the frame is not skipped, then display it */
1451     if (vp->bmp) {
1452         AVPicture pict;
1453 #if CONFIG_AVFILTER
1454         if(vp->picref)
1455             avfilter_unref_buffer(vp->picref);
1456         vp->picref = src_frame->opaque;
1457 #endif
1458
1459         /* get a pointer on the bitmap */
1460         SDL_LockYUVOverlay (vp->bmp);
1461
1462         dst_pix_fmt = PIX_FMT_YUV420P;
1463         memset(&pict,0,sizeof(AVPicture));
1464         pict.data[0] = vp->bmp->pixels[0];
1465         pict.data[1] = vp->bmp->pixels[2];
1466         pict.data[2] = vp->bmp->pixels[1];
1467
1468         pict.linesize[0] = vp->bmp->pitches[0];
1469         pict.linesize[1] = vp->bmp->pitches[2];
1470         pict.linesize[2] = vp->bmp->pitches[1];
1471
1472 #if CONFIG_AVFILTER
1473         pict_src.data[0] = src_frame->data[0];
1474         pict_src.data[1] = src_frame->data[1];
1475         pict_src.data[2] = src_frame->data[2];
1476
1477         pict_src.linesize[0] = src_frame->linesize[0];
1478         pict_src.linesize[1] = src_frame->linesize[1];
1479         pict_src.linesize[2] = src_frame->linesize[2];
1480
1481         //FIXME use direct rendering
1482         av_picture_copy(&pict, &pict_src,
1483                         vp->pix_fmt, vp->width, vp->height);
1484 #else
1485         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1486         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1487             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1488             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1489         if (is->img_convert_ctx == NULL) {
1490             fprintf(stderr, "Cannot initialize the conversion context\n");
1491             exit(1);
1492         }
1493         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1494                   0, vp->height, pict.data, pict.linesize);
1495 #endif
1496         /* update the bitmap content */
1497         SDL_UnlockYUVOverlay(vp->bmp);
1498
1499         vp->pts = pts;
1500         vp->pos = pos;
1501
1502         /* now we can update the picture count */
1503         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1504             is->pictq_windex = 0;
1505         SDL_LockMutex(is->pictq_mutex);
1506         vp->target_clock= compute_target_time(vp->pts, is);
1507
1508         is->pictq_size++;
1509         SDL_UnlockMutex(is->pictq_mutex);
1510     }
1511     return 0;
1512 }
1513
1514 /**
1515  * compute the exact PTS for the picture if it is omitted in the stream
1516  * @param pts1 the dts of the pkt / pts of the frame
1517  */
1518 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1519 {
1520     double frame_delay, pts;
1521
1522     pts = pts1;
1523
1524     if (pts != 0) {
1525         /* update video clock with pts, if present */
1526         is->video_clock = pts;
1527     } else {
1528         pts = is->video_clock;
1529     }
1530     /* update video clock for next frame */
1531     frame_delay = av_q2d(is->video_st->codec->time_base);
1532     /* for MPEG2, the frame can be repeated, so we update the
1533        clock accordingly */
1534     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1535     is->video_clock += frame_delay;
1536
1537 #if defined(DEBUG_SYNC) && 0
1538     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1539            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1540 #endif
1541     return queue_picture(is, src_frame, pts, pos);
1542 }
1543
1544 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1545 {
1546     int len1, got_picture, i;
1547
1548         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1549             return -1;
1550
1551         if(pkt->data == flush_pkt.data){
1552             avcodec_flush_buffers(is->video_st->codec);
1553
1554             SDL_LockMutex(is->pictq_mutex);
1555             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1556             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1557                 is->pictq[i].target_clock= 0;
1558             }
1559             while (is->pictq_size && !is->videoq.abort_request) {
1560                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1561             }
1562             is->video_current_pos= -1;
1563             SDL_UnlockMutex(is->pictq_mutex);
1564
1565             init_pts_correction(&is->pts_ctx);
1566             is->frame_last_pts= AV_NOPTS_VALUE;
1567             is->frame_last_delay = 0;
1568             is->frame_timer = (double)av_gettime() / 1000000.0;
1569             is->skip_frames= 1;
1570             is->skip_frames_index= 0;
1571             return 0;
1572         }
1573
1574         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1575            this packet, if any */
1576         is->video_st->codec->reordered_opaque= pkt->pts;
1577         len1 = avcodec_decode_video2(is->video_st->codec,
1578                                     frame, &got_picture,
1579                                     pkt);
1580
1581         if (got_picture) {
1582             if (decoder_reorder_pts == -1) {
1583                 *pts = guess_correct_pts(&is->pts_ctx, frame->reordered_opaque, pkt->dts);
1584             } else if (decoder_reorder_pts) {
1585                 *pts = frame->reordered_opaque;
1586             } else {
1587                 *pts = pkt->dts;
1588             }
1589
1590             if (*pts == AV_NOPTS_VALUE) {
1591                 *pts = 0;
1592             }
1593         }
1594
1595 //            if (len1 < 0)
1596 //                break;
1597     if (got_picture){
1598         is->skip_frames_index += 1;
1599         if(is->skip_frames_index >= is->skip_frames){
1600             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1601             return 1;
1602         }
1603
1604     }
1605     return 0;
1606 }
1607
1608 #if CONFIG_AVFILTER
1609 typedef struct {
1610     VideoState *is;
1611     AVFrame *frame;
1612     int use_dr1;
1613 } FilterPriv;
1614
1615 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1616 {
1617     AVFilterContext *ctx = codec->opaque;
1618     AVFilterBufferRef  *ref;
1619     int perms = AV_PERM_WRITE;
1620     int i, w, h, stride[4];
1621     unsigned edge;
1622
1623     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1624         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1625         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1626         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1627     }
1628     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1629
1630     w = codec->width;
1631     h = codec->height;
1632     avcodec_align_dimensions2(codec, &w, &h, stride);
1633     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1634     w += edge << 1;
1635     h += edge << 1;
1636
1637     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1638         return -1;
1639
1640     ref->video->w = codec->width;
1641     ref->video->h = codec->height;
1642     for(i = 0; i < 4; i ++) {
1643         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1644         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1645
1646         if (ref->data[i]) {
1647             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1648         }
1649         pic->data[i]     = ref->data[i];
1650         pic->linesize[i] = ref->linesize[i];
1651     }
1652     pic->opaque = ref;
1653     pic->age    = INT_MAX;
1654     pic->type   = FF_BUFFER_TYPE_USER;
1655     pic->reordered_opaque = codec->reordered_opaque;
1656     return 0;
1657 }
1658
1659 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1660 {
1661     memset(pic->data, 0, sizeof(pic->data));
1662     avfilter_unref_buffer(pic->opaque);
1663 }
1664
1665 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1666 {
1667     AVFilterBufferRef *ref = pic->opaque;
1668
1669     if (pic->data[0] == NULL) {
1670         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1671         return codec->get_buffer(codec, pic);
1672     }
1673
1674     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1675         (codec->pix_fmt != ref->format)) {
1676         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1677         return -1;
1678     }
1679
1680     pic->reordered_opaque = codec->reordered_opaque;
1681     return 0;
1682 }
1683
1684 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1685 {
1686     FilterPriv *priv = ctx->priv;
1687     AVCodecContext *codec;
1688     if(!opaque) return -1;
1689
1690     priv->is = opaque;
1691     codec    = priv->is->video_st->codec;
1692     codec->opaque = ctx;
1693     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1694         priv->use_dr1 = 1;
1695         codec->get_buffer     = input_get_buffer;
1696         codec->release_buffer = input_release_buffer;
1697         codec->reget_buffer   = input_reget_buffer;
1698     }
1699
1700     priv->frame = avcodec_alloc_frame();
1701
1702     return 0;
1703 }
1704
1705 static void input_uninit(AVFilterContext *ctx)
1706 {
1707     FilterPriv *priv = ctx->priv;
1708     av_free(priv->frame);
1709 }
1710
1711 static int input_request_frame(AVFilterLink *link)
1712 {
1713     FilterPriv *priv = link->src->priv;
1714     AVFilterBufferRef *picref;
1715     int64_t pts = 0;
1716     AVPacket pkt;
1717     int ret;
1718
1719     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1720         av_free_packet(&pkt);
1721     if (ret < 0)
1722         return -1;
1723
1724     if(priv->use_dr1) {
1725         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1726     } else {
1727         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1728         av_image_copy(picref->data, picref->linesize,
1729                       priv->frame->data, priv->frame->linesize,
1730                       picref->format, link->w, link->h);
1731     }
1732     av_free_packet(&pkt);
1733
1734     picref->pts = pts;
1735     picref->pos = pkt.pos;
1736     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1737     avfilter_start_frame(link, picref);
1738     avfilter_draw_slice(link, 0, link->h, 1);
1739     avfilter_end_frame(link);
1740
1741     return 0;
1742 }
1743
1744 static int input_query_formats(AVFilterContext *ctx)
1745 {
1746     FilterPriv *priv = ctx->priv;
1747     enum PixelFormat pix_fmts[] = {
1748         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1749     };
1750
1751     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1752     return 0;
1753 }
1754
1755 static int input_config_props(AVFilterLink *link)
1756 {
1757     FilterPriv *priv  = link->src->priv;
1758     AVCodecContext *c = priv->is->video_st->codec;
1759
1760     link->w = c->width;
1761     link->h = c->height;
1762
1763     return 0;
1764 }
1765
1766 static AVFilter input_filter =
1767 {
1768     .name      = "ffplay_input",
1769
1770     .priv_size = sizeof(FilterPriv),
1771
1772     .init      = input_init,
1773     .uninit    = input_uninit,
1774
1775     .query_formats = input_query_formats,
1776
1777     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1778     .outputs   = (AVFilterPad[]) {{ .name = "default",
1779                                     .type = AVMEDIA_TYPE_VIDEO,
1780                                     .request_frame = input_request_frame,
1781                                     .config_props  = input_config_props, },
1782                                   { .name = NULL }},
1783 };
1784
1785 static void output_end_frame(AVFilterLink *link)
1786 {
1787 }
1788
1789 static int output_query_formats(AVFilterContext *ctx)
1790 {
1791     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1792
1793     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1794     return 0;
1795 }
1796
1797 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1798                                     int64_t *pts, int64_t *pos)
1799 {
1800     AVFilterBufferRef *pic;
1801
1802     if(avfilter_request_frame(ctx->inputs[0]))
1803         return -1;
1804     if(!(pic = ctx->inputs[0]->cur_buf))
1805         return -1;
1806     ctx->inputs[0]->cur_buf = NULL;
1807
1808     frame->opaque = pic;
1809     *pts          = pic->pts;
1810     *pos          = pic->pos;
1811
1812     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1813     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1814
1815     return 1;
1816 }
1817
1818 static AVFilter output_filter =
1819 {
1820     .name      = "ffplay_output",
1821
1822     .query_formats = output_query_formats,
1823
1824     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1825                                     .type          = AVMEDIA_TYPE_VIDEO,
1826                                     .end_frame     = output_end_frame,
1827                                     .min_perms     = AV_PERM_READ, },
1828                                   { .name = NULL }},
1829     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1830 };
1831 #endif  /* CONFIG_AVFILTER */
1832
1833 static int video_thread(void *arg)
1834 {
1835     VideoState *is = arg;
1836     AVFrame *frame= avcodec_alloc_frame();
1837     int64_t pts_int;
1838     double pts;
1839     int ret;
1840
1841 #if CONFIG_AVFILTER
1842     int64_t pos;
1843     char sws_flags_str[128];
1844     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1845     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1846     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1847     graph->scale_sws_opts = av_strdup(sws_flags_str);
1848
1849     if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1850     if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1851
1852     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1853     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1854
1855
1856     if(vfilters) {
1857         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1858         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1859
1860         outputs->name    = av_strdup("in");
1861         outputs->filter  = filt_src;
1862         outputs->pad_idx = 0;
1863         outputs->next    = NULL;
1864
1865         inputs->name    = av_strdup("out");
1866         inputs->filter  = filt_out;
1867         inputs->pad_idx = 0;
1868         inputs->next    = NULL;
1869
1870         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1871             goto the_end;
1872         av_freep(&vfilters);
1873     } else {
1874         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1875     }
1876     avfilter_graph_add_filter(graph, filt_src);
1877     avfilter_graph_add_filter(graph, filt_out);
1878
1879     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1880     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1881     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1882
1883     is->out_video_filter = filt_out;
1884 #endif
1885
1886     for(;;) {
1887 #if !CONFIG_AVFILTER
1888         AVPacket pkt;
1889 #endif
1890         while (is->paused && !is->videoq.abort_request)
1891             SDL_Delay(10);
1892 #if CONFIG_AVFILTER
1893         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1894 #else
1895         ret = get_video_frame(is, frame, &pts_int, &pkt);
1896 #endif
1897
1898         if (ret < 0) goto the_end;
1899
1900         if (!ret)
1901             continue;
1902
1903         pts = pts_int*av_q2d(is->video_st->time_base);
1904
1905 #if CONFIG_AVFILTER
1906         ret = output_picture2(is, frame, pts, pos);
1907 #else
1908         ret = output_picture2(is, frame, pts,  pkt.pos);
1909         av_free_packet(&pkt);
1910 #endif
1911         if (ret < 0)
1912             goto the_end;
1913
1914         if (step)
1915             if (cur_stream)
1916                 stream_pause(cur_stream);
1917     }
1918  the_end:
1919 #if CONFIG_AVFILTER
1920     avfilter_graph_destroy(graph);
1921     av_freep(&graph);
1922 #endif
1923     av_free(frame);
1924     return 0;
1925 }
1926
1927 static int subtitle_thread(void *arg)
1928 {
1929     VideoState *is = arg;
1930     SubPicture *sp;
1931     AVPacket pkt1, *pkt = &pkt1;
1932     int len1, got_subtitle;
1933     double pts;
1934     int i, j;
1935     int r, g, b, y, u, v, a;
1936
1937     for(;;) {
1938         while (is->paused && !is->subtitleq.abort_request) {
1939             SDL_Delay(10);
1940         }
1941         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1942             break;
1943
1944         if(pkt->data == flush_pkt.data){
1945             avcodec_flush_buffers(is->subtitle_st->codec);
1946             continue;
1947         }
1948         SDL_LockMutex(is->subpq_mutex);
1949         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1950                !is->subtitleq.abort_request) {
1951             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1952         }
1953         SDL_UnlockMutex(is->subpq_mutex);
1954
1955         if (is->subtitleq.abort_request)
1956             goto the_end;
1957
1958         sp = &is->subpq[is->subpq_windex];
1959
1960        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1961            this packet, if any */
1962         pts = 0;
1963         if (pkt->pts != AV_NOPTS_VALUE)
1964             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1965
1966         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1967                                     &sp->sub, &got_subtitle,
1968                                     pkt);
1969 //            if (len1 < 0)
1970 //                break;
1971         if (got_subtitle && sp->sub.format == 0) {
1972             sp->pts = pts;
1973
1974             for (i = 0; i < sp->sub.num_rects; i++)
1975             {
1976                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1977                 {
1978                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1979                     y = RGB_TO_Y_CCIR(r, g, b);
1980                     u = RGB_TO_U_CCIR(r, g, b, 0);
1981                     v = RGB_TO_V_CCIR(r, g, b, 0);
1982                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1983                 }
1984             }
1985
1986             /* now we can update the picture count */
1987             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1988                 is->subpq_windex = 0;
1989             SDL_LockMutex(is->subpq_mutex);
1990             is->subpq_size++;
1991             SDL_UnlockMutex(is->subpq_mutex);
1992         }
1993         av_free_packet(pkt);
1994 //        if (step)
1995 //            if (cur_stream)
1996 //                stream_pause(cur_stream);
1997     }
1998  the_end:
1999     return 0;
2000 }
2001
2002 /* copy samples for viewing in editor window */
2003 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2004 {
2005     int size, len, channels;
2006
2007     channels = is->audio_st->codec->channels;
2008
2009     size = samples_size / sizeof(short);
2010     while (size > 0) {
2011         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2012         if (len > size)
2013             len = size;
2014         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2015         samples += len;
2016         is->sample_array_index += len;
2017         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2018             is->sample_array_index = 0;
2019         size -= len;
2020     }
2021 }
2022
2023 /* return the new audio buffer size (samples can be added or deleted
2024    to get better sync if video or external master clock) */
2025 static int synchronize_audio(VideoState *is, short *samples,
2026                              int samples_size1, double pts)
2027 {
2028     int n, samples_size;
2029     double ref_clock;
2030
2031     n = 2 * is->audio_st->codec->channels;
2032     samples_size = samples_size1;
2033
2034     /* if not master, then we try to remove or add samples to correct the clock */
2035     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2036          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2037         double diff, avg_diff;
2038         int wanted_size, min_size, max_size, nb_samples;
2039
2040         ref_clock = get_master_clock(is);
2041         diff = get_audio_clock(is) - ref_clock;
2042
2043         if (diff < AV_NOSYNC_THRESHOLD) {
2044             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2045             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2046                 /* not enough measures to have a correct estimate */
2047                 is->audio_diff_avg_count++;
2048             } else {
2049                 /* estimate the A-V difference */
2050                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2051
2052                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2053                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2054                     nb_samples = samples_size / n;
2055
2056                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2057                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2058                     if (wanted_size < min_size)
2059                         wanted_size = min_size;
2060                     else if (wanted_size > max_size)
2061                         wanted_size = max_size;
2062
2063                     /* add or remove samples to correction the synchro */
2064                     if (wanted_size < samples_size) {
2065                         /* remove samples */
2066                         samples_size = wanted_size;
2067                     } else if (wanted_size > samples_size) {
2068                         uint8_t *samples_end, *q;
2069                         int nb;
2070
2071                         /* add samples */
2072                         nb = (samples_size - wanted_size);
2073                         samples_end = (uint8_t *)samples + samples_size - n;
2074                         q = samples_end + n;
2075                         while (nb > 0) {
2076                             memcpy(q, samples_end, n);
2077                             q += n;
2078                             nb -= n;
2079                         }
2080                         samples_size = wanted_size;
2081                     }
2082                 }
2083 #if 0
2084                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2085                        diff, avg_diff, samples_size - samples_size1,
2086                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2087 #endif
2088             }
2089         } else {
2090             /* too big difference : may be initial PTS errors, so
2091                reset A-V filter */
2092             is->audio_diff_avg_count = 0;
2093             is->audio_diff_cum = 0;
2094         }
2095     }
2096
2097     return samples_size;
2098 }
2099
2100 /* decode one audio frame and returns its uncompressed size */
2101 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2102 {
2103     AVPacket *pkt_temp = &is->audio_pkt_temp;
2104     AVPacket *pkt = &is->audio_pkt;
2105     AVCodecContext *dec= is->audio_st->codec;
2106     int n, len1, data_size;
2107     double pts;
2108
2109     for(;;) {
2110         /* NOTE: the audio packet can contain several frames */
2111         while (pkt_temp->size > 0) {
2112             data_size = sizeof(is->audio_buf1);
2113             len1 = avcodec_decode_audio3(dec,
2114                                         (int16_t *)is->audio_buf1, &data_size,
2115                                         pkt_temp);
2116             if (len1 < 0) {
2117                 /* if error, we skip the frame */
2118                 pkt_temp->size = 0;
2119                 break;
2120             }
2121
2122             pkt_temp->data += len1;
2123             pkt_temp->size -= len1;
2124             if (data_size <= 0)
2125                 continue;
2126
2127             if (dec->sample_fmt != is->audio_src_fmt) {
2128                 if (is->reformat_ctx)
2129                     av_audio_convert_free(is->reformat_ctx);
2130                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2131                                                          dec->sample_fmt, 1, NULL, 0);
2132                 if (!is->reformat_ctx) {
2133                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2134                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2135                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2136                         break;
2137                 }
2138                 is->audio_src_fmt= dec->sample_fmt;
2139             }
2140
2141             if (is->reformat_ctx) {
2142                 const void *ibuf[6]= {is->audio_buf1};
2143                 void *obuf[6]= {is->audio_buf2};
2144                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2145                 int ostride[6]= {2};
2146                 int len= data_size/istride[0];
2147                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2148                     printf("av_audio_convert() failed\n");
2149                     break;
2150                 }
2151                 is->audio_buf= is->audio_buf2;
2152                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2153                           remove this legacy cruft */
2154                 data_size= len*2;
2155             }else{
2156                 is->audio_buf= is->audio_buf1;
2157             }
2158
2159             /* if no pts, then compute it */
2160             pts = is->audio_clock;
2161             *pts_ptr = pts;
2162             n = 2 * dec->channels;
2163             is->audio_clock += (double)data_size /
2164                 (double)(n * dec->sample_rate);
2165 #if defined(DEBUG_SYNC)
2166             {
2167                 static double last_clock;
2168                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2169                        is->audio_clock - last_clock,
2170                        is->audio_clock, pts);
2171                 last_clock = is->audio_clock;
2172             }
2173 #endif
2174             return data_size;
2175         }
2176
2177         /* free the current packet */
2178         if (pkt->data)
2179             av_free_packet(pkt);
2180
2181         if (is->paused || is->audioq.abort_request) {
2182             return -1;
2183         }
2184
2185         /* read next packet */
2186         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2187             return -1;
2188         if(pkt->data == flush_pkt.data){
2189             avcodec_flush_buffers(dec);
2190             continue;
2191         }
2192
2193         pkt_temp->data = pkt->data;
2194         pkt_temp->size = pkt->size;
2195
2196         /* if update the audio clock with the pts */
2197         if (pkt->pts != AV_NOPTS_VALUE) {
2198             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2199         }
2200     }
2201 }
2202
2203 /* get the current audio output buffer size, in samples. With SDL, we
2204    cannot have a precise information */
2205 static int audio_write_get_buf_size(VideoState *is)
2206 {
2207     return is->audio_buf_size - is->audio_buf_index;
2208 }
2209
2210
2211 /* prepare a new audio buffer */
2212 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2213 {
2214     VideoState *is = opaque;
2215     int audio_size, len1;
2216     double pts;
2217
2218     audio_callback_time = av_gettime();
2219
2220     while (len > 0) {
2221         if (is->audio_buf_index >= is->audio_buf_size) {
2222            audio_size = audio_decode_frame(is, &pts);
2223            if (audio_size < 0) {
2224                 /* if error, just output silence */
2225                is->audio_buf = is->audio_buf1;
2226                is->audio_buf_size = 1024;
2227                memset(is->audio_buf, 0, is->audio_buf_size);
2228            } else {
2229                if (is->show_audio)
2230                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2231                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2232                                               pts);
2233                is->audio_buf_size = audio_size;
2234            }
2235            is->audio_buf_index = 0;
2236         }
2237         len1 = is->audio_buf_size - is->audio_buf_index;
2238         if (len1 > len)
2239             len1 = len;
2240         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2241         len -= len1;
2242         stream += len1;
2243         is->audio_buf_index += len1;
2244     }
2245 }
2246
2247 /* open a given stream. Return 0 if OK */
2248 static int stream_component_open(VideoState *is, int stream_index)
2249 {
2250     AVFormatContext *ic = is->ic;
2251     AVCodecContext *avctx;
2252     AVCodec *codec;
2253     SDL_AudioSpec wanted_spec, spec;
2254
2255     if (stream_index < 0 || stream_index >= ic->nb_streams)
2256         return -1;
2257     avctx = ic->streams[stream_index]->codec;
2258
2259     /* prepare audio output */
2260     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2261         if (avctx->channels > 0) {
2262             avctx->request_channels = FFMIN(2, avctx->channels);
2263         } else {
2264             avctx->request_channels = 2;
2265         }
2266     }
2267
2268     codec = avcodec_find_decoder(avctx->codec_id);
2269     avctx->debug_mv = debug_mv;
2270     avctx->debug = debug;
2271     avctx->workaround_bugs = workaround_bugs;
2272     avctx->lowres = lowres;
2273     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2274     avctx->idct_algo= idct;
2275     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2276     avctx->skip_frame= skip_frame;
2277     avctx->skip_idct= skip_idct;
2278     avctx->skip_loop_filter= skip_loop_filter;
2279     avctx->error_recognition= error_recognition;
2280     avctx->error_concealment= error_concealment;
2281     avcodec_thread_init(avctx, thread_count);
2282
2283     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2284
2285     if (!codec ||
2286         avcodec_open(avctx, codec) < 0)
2287         return -1;
2288
2289     /* prepare audio output */
2290     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2291         wanted_spec.freq = avctx->sample_rate;
2292         wanted_spec.format = AUDIO_S16SYS;
2293         wanted_spec.channels = avctx->channels;
2294         wanted_spec.silence = 0;
2295         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2296         wanted_spec.callback = sdl_audio_callback;
2297         wanted_spec.userdata = is;
2298         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2299             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2300             return -1;
2301         }
2302         is->audio_hw_buf_size = spec.size;
2303         is->audio_src_fmt= SAMPLE_FMT_S16;
2304     }
2305
2306     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2307     switch(avctx->codec_type) {
2308     case AVMEDIA_TYPE_AUDIO:
2309         is->audio_stream = stream_index;
2310         is->audio_st = ic->streams[stream_index];
2311         is->audio_buf_size = 0;
2312         is->audio_buf_index = 0;
2313
2314         /* init averaging filter */
2315         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2316         is->audio_diff_avg_count = 0;
2317         /* since we do not have a precise anough audio fifo fullness,
2318            we correct audio sync only if larger than this threshold */
2319         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2320
2321         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2322         packet_queue_init(&is->audioq);
2323         SDL_PauseAudio(0);
2324         break;
2325     case AVMEDIA_TYPE_VIDEO:
2326         is->video_stream = stream_index;
2327         is->video_st = ic->streams[stream_index];
2328
2329 //        is->video_current_pts_time = av_gettime();
2330
2331         packet_queue_init(&is->videoq);
2332         is->video_tid = SDL_CreateThread(video_thread, is);
2333         break;
2334     case AVMEDIA_TYPE_SUBTITLE:
2335         is->subtitle_stream = stream_index;
2336         is->subtitle_st = ic->streams[stream_index];
2337         packet_queue_init(&is->subtitleq);
2338
2339         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2340         break;
2341     default:
2342         break;
2343     }
2344     return 0;
2345 }
2346
2347 static void stream_component_close(VideoState *is, int stream_index)
2348 {
2349     AVFormatContext *ic = is->ic;
2350     AVCodecContext *avctx;
2351
2352     if (stream_index < 0 || stream_index >= ic->nb_streams)
2353         return;
2354     avctx = ic->streams[stream_index]->codec;
2355
2356     switch(avctx->codec_type) {
2357     case AVMEDIA_TYPE_AUDIO:
2358         packet_queue_abort(&is->audioq);
2359
2360         SDL_CloseAudio();
2361
2362         packet_queue_end(&is->audioq);
2363         if (is->reformat_ctx)
2364             av_audio_convert_free(is->reformat_ctx);
2365         is->reformat_ctx = NULL;
2366         break;
2367     case AVMEDIA_TYPE_VIDEO:
2368         packet_queue_abort(&is->videoq);
2369
2370         /* note: we also signal this mutex to make sure we deblock the
2371            video thread in all cases */
2372         SDL_LockMutex(is->pictq_mutex);
2373         SDL_CondSignal(is->pictq_cond);
2374         SDL_UnlockMutex(is->pictq_mutex);
2375
2376         SDL_WaitThread(is->video_tid, NULL);
2377
2378         packet_queue_end(&is->videoq);
2379         break;
2380     case AVMEDIA_TYPE_SUBTITLE:
2381         packet_queue_abort(&is->subtitleq);
2382
2383         /* note: we also signal this mutex to make sure we deblock the
2384            video thread in all cases */
2385         SDL_LockMutex(is->subpq_mutex);
2386         is->subtitle_stream_changed = 1;
2387
2388         SDL_CondSignal(is->subpq_cond);
2389         SDL_UnlockMutex(is->subpq_mutex);
2390
2391         SDL_WaitThread(is->subtitle_tid, NULL);
2392
2393         packet_queue_end(&is->subtitleq);
2394         break;
2395     default:
2396         break;
2397     }
2398
2399     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2400     avcodec_close(avctx);
2401     switch(avctx->codec_type) {
2402     case AVMEDIA_TYPE_AUDIO:
2403         is->audio_st = NULL;
2404         is->audio_stream = -1;
2405         break;
2406     case AVMEDIA_TYPE_VIDEO:
2407         is->video_st = NULL;
2408         is->video_stream = -1;
2409         break;
2410     case AVMEDIA_TYPE_SUBTITLE:
2411         is->subtitle_st = NULL;
2412         is->subtitle_stream = -1;
2413         break;
2414     default:
2415         break;
2416     }
2417 }
2418
2419 /* since we have only one decoding thread, we can use a global
2420    variable instead of a thread local variable */
2421 static VideoState *global_video_state;
2422
2423 static int decode_interrupt_cb(void)
2424 {
2425     return (global_video_state && global_video_state->abort_request);
2426 }
2427
2428 /* this thread gets the stream from the disk or the network */
2429 static int decode_thread(void *arg)
2430 {
2431     VideoState *is = arg;
2432     AVFormatContext *ic;
2433     int err, i, ret;
2434     int st_index[AVMEDIA_TYPE_NB];
2435     int st_count[AVMEDIA_TYPE_NB]={0};
2436     int st_best_packet_count[AVMEDIA_TYPE_NB];
2437     AVPacket pkt1, *pkt = &pkt1;
2438     AVFormatParameters params, *ap = &params;
2439     int eof=0;
2440     int pkt_in_play_range = 0;
2441
2442     ic = avformat_alloc_context();
2443
2444     memset(st_index, -1, sizeof(st_index));
2445     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2446     is->video_stream = -1;
2447     is->audio_stream = -1;
2448     is->subtitle_stream = -1;
2449
2450     global_video_state = is;
2451     url_set_interrupt_cb(decode_interrupt_cb);
2452
2453     memset(ap, 0, sizeof(*ap));
2454
2455     ap->prealloced_context = 1;
2456     ap->width = frame_width;
2457     ap->height= frame_height;
2458     ap->time_base= (AVRational){1, 25};
2459     ap->pix_fmt = frame_pix_fmt;
2460
2461     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2462
2463     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2464     if (err < 0) {
2465         print_error(is->filename, err);
2466         ret = -1;
2467         goto fail;
2468     }
2469     is->ic = ic;
2470
2471     if(genpts)
2472         ic->flags |= AVFMT_FLAG_GENPTS;
2473
2474     err = av_find_stream_info(ic);
2475     if (err < 0) {
2476         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2477         ret = -1;
2478         goto fail;
2479     }
2480     if(ic->pb)
2481         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2482
2483     if(seek_by_bytes<0)
2484         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2485
2486     /* if seeking requested, we execute it */
2487     if (start_time != AV_NOPTS_VALUE) {
2488         int64_t timestamp;
2489
2490         timestamp = start_time;
2491         /* add the stream start time */
2492         if (ic->start_time != AV_NOPTS_VALUE)
2493             timestamp += ic->start_time;
2494         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2495         if (ret < 0) {
2496             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2497                     is->filename, (double)timestamp / AV_TIME_BASE);
2498         }
2499     }
2500
2501     for(i = 0; i < ic->nb_streams; i++) {
2502         AVStream *st= ic->streams[i];
2503         AVCodecContext *avctx = st->codec;
2504         ic->streams[i]->discard = AVDISCARD_ALL;
2505         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2506             continue;
2507         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2508             continue;
2509
2510         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2511             continue;
2512         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2513
2514         switch(avctx->codec_type) {
2515         case AVMEDIA_TYPE_AUDIO:
2516             if (!audio_disable)
2517                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2518             break;
2519         case AVMEDIA_TYPE_VIDEO:
2520         case AVMEDIA_TYPE_SUBTITLE:
2521             if (!video_disable)
2522                 st_index[avctx->codec_type] = i;
2523             break;
2524         default:
2525             break;
2526         }
2527     }
2528     if (show_status) {
2529         dump_format(ic, 0, is->filename, 0);
2530     }
2531
2532     /* open the streams */
2533     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2534         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2535     }
2536
2537     ret=-1;
2538     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2539         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2540     }
2541     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2542     if(ret<0) {
2543         if (!display_disable)
2544             is->show_audio = 2;
2545     }
2546
2547     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2548         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2549     }
2550
2551     if (is->video_stream < 0 && is->audio_stream < 0) {
2552         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2553         ret = -1;
2554         goto fail;
2555     }
2556
2557     for(;;) {
2558         if (is->abort_request)
2559             break;
2560         if (is->paused != is->last_paused) {
2561             is->last_paused = is->paused;
2562             if (is->paused)
2563                 is->read_pause_return= av_read_pause(ic);
2564             else
2565                 av_read_play(ic);
2566         }
2567 #if CONFIG_RTSP_DEMUXER
2568         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2569             /* wait 10 ms to avoid trying to get another packet */
2570             /* XXX: horrible */
2571             SDL_Delay(10);
2572             continue;
2573         }
2574 #endif
2575         if (is->seek_req) {
2576             int64_t seek_target= is->seek_pos;
2577             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2578             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2579 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2580 //      of the seek_pos/seek_rel variables
2581
2582             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2583             if (ret < 0) {
2584                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2585             }else{
2586                 if (is->audio_stream >= 0) {
2587                     packet_queue_flush(&is->audioq);
2588                     packet_queue_put(&is->audioq, &flush_pkt);
2589                 }
2590                 if (is->subtitle_stream >= 0) {
2591                     packet_queue_flush(&is->subtitleq);
2592                     packet_queue_put(&is->subtitleq, &flush_pkt);
2593                 }
2594                 if (is->video_stream >= 0) {
2595                     packet_queue_flush(&is->videoq);
2596                     packet_queue_put(&is->videoq, &flush_pkt);
2597                 }
2598             }
2599             is->seek_req = 0;
2600             eof= 0;
2601         }
2602
2603         /* if the queue are full, no need to read more */
2604         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2605             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2606                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2607                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2608             /* wait 10 ms */
2609             SDL_Delay(10);
2610             continue;
2611         }
2612         if(url_feof(ic->pb) || eof) {
2613             if(is->video_stream >= 0){
2614                 av_init_packet(pkt);
2615                 pkt->data=NULL;
2616                 pkt->size=0;
2617                 pkt->stream_index= is->video_stream;
2618                 packet_queue_put(&is->videoq, pkt);
2619             }
2620             SDL_Delay(10);
2621             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2622                 if(loop!=1 && (!loop || --loop)){
2623                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2624                 }else if(autoexit){
2625                     ret=AVERROR_EOF;
2626                     goto fail;
2627                 }
2628             }
2629             continue;
2630         }
2631         ret = av_read_frame(ic, pkt);
2632         if (ret < 0) {
2633             if (ret == AVERROR_EOF)
2634                 eof=1;
2635             if (url_ferror(ic->pb))
2636                 break;
2637             SDL_Delay(100); /* wait for user event */
2638             continue;
2639         }
2640         /* check if packet is in play range specified by user, then queue, otherwise discard */
2641         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2642                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2643                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2644                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2645                 <= ((double)duration/1000000);
2646         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2647             packet_queue_put(&is->audioq, pkt);
2648         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2649             packet_queue_put(&is->videoq, pkt);
2650         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2651             packet_queue_put(&is->subtitleq, pkt);
2652         } else {
2653             av_free_packet(pkt);
2654         }
2655     }
2656     /* wait until the end */
2657     while (!is->abort_request) {
2658         SDL_Delay(100);
2659     }
2660
2661     ret = 0;
2662  fail:
2663     /* disable interrupting */
2664     global_video_state = NULL;
2665
2666     /* close each stream */
2667     if (is->audio_stream >= 0)
2668         stream_component_close(is, is->audio_stream);
2669     if (is->video_stream >= 0)
2670         stream_component_close(is, is->video_stream);
2671     if (is->subtitle_stream >= 0)
2672         stream_component_close(is, is->subtitle_stream);
2673     if (is->ic) {
2674         av_close_input_file(is->ic);
2675         is->ic = NULL; /* safety */
2676     }
2677     url_set_interrupt_cb(NULL);
2678
2679     if (ret != 0) {
2680         SDL_Event event;
2681
2682         event.type = FF_QUIT_EVENT;
2683         event.user.data1 = is;
2684         SDL_PushEvent(&event);
2685     }
2686     return 0;
2687 }
2688
2689 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2690 {
2691     VideoState *is;
2692
2693     is = av_mallocz(sizeof(VideoState));
2694     if (!is)
2695         return NULL;
2696     av_strlcpy(is->filename, filename, sizeof(is->filename));
2697     is->iformat = iformat;
2698     is->ytop = 0;
2699     is->xleft = 0;
2700
2701     /* start video display */
2702     is->pictq_mutex = SDL_CreateMutex();
2703     is->pictq_cond = SDL_CreateCond();
2704
2705     is->subpq_mutex = SDL_CreateMutex();
2706     is->subpq_cond = SDL_CreateCond();
2707
2708     is->av_sync_type = av_sync_type;
2709     is->parse_tid = SDL_CreateThread(decode_thread, is);
2710     if (!is->parse_tid) {
2711         av_free(is);
2712         return NULL;
2713     }
2714     return is;
2715 }
2716
2717 static void stream_cycle_channel(VideoState *is, int codec_type)
2718 {
2719     AVFormatContext *ic = is->ic;
2720     int start_index, stream_index;
2721     AVStream *st;
2722
2723     if (codec_type == AVMEDIA_TYPE_VIDEO)
2724         start_index = is->video_stream;
2725     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2726         start_index = is->audio_stream;
2727     else
2728         start_index = is->subtitle_stream;
2729     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2730         return;
2731     stream_index = start_index;
2732     for(;;) {
2733         if (++stream_index >= is->ic->nb_streams)
2734         {
2735             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2736             {
2737                 stream_index = -1;
2738                 goto the_end;
2739             } else
2740                 stream_index = 0;
2741         }
2742         if (stream_index == start_index)
2743             return;
2744         st = ic->streams[stream_index];
2745         if (st->codec->codec_type == codec_type) {
2746             /* check that parameters are OK */
2747             switch(codec_type) {
2748             case AVMEDIA_TYPE_AUDIO:
2749                 if (st->codec->sample_rate != 0 &&
2750                     st->codec->channels != 0)
2751                     goto the_end;
2752                 break;
2753             case AVMEDIA_TYPE_VIDEO:
2754             case AVMEDIA_TYPE_SUBTITLE:
2755                 goto the_end;
2756             default:
2757                 break;
2758             }
2759         }
2760     }
2761  the_end:
2762     stream_component_close(is, start_index);
2763     stream_component_open(is, stream_index);
2764 }
2765
2766
2767 static void toggle_full_screen(void)
2768 {
2769     is_full_screen = !is_full_screen;
2770     if (!fs_screen_width) {
2771         /* use default SDL method */
2772 //        SDL_WM_ToggleFullScreen(screen);
2773     }
2774     video_open(cur_stream);
2775 }
2776
2777 static void toggle_pause(void)
2778 {
2779     if (cur_stream)
2780         stream_pause(cur_stream);
2781     step = 0;
2782 }
2783
2784 static void step_to_next_frame(void)
2785 {
2786     if (cur_stream) {
2787         /* if the stream is paused unpause it, then step */
2788         if (cur_stream->paused)
2789             stream_pause(cur_stream);
2790     }
2791     step = 1;
2792 }
2793
2794 static void toggle_audio_display(void)
2795 {
2796     if (cur_stream) {
2797         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2798         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2799         fill_rectangle(screen,
2800                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2801                     bgcolor);
2802         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2803     }
2804 }
2805
2806 /* handle an event sent by the GUI */
2807 static void event_loop(void)
2808 {
2809     SDL_Event event;
2810     double incr, pos, frac;
2811
2812     for(;;) {
2813         double x;
2814         SDL_WaitEvent(&event);
2815         switch(event.type) {
2816         case SDL_KEYDOWN:
2817             if (exit_on_keydown) {
2818                 do_exit();
2819                 break;
2820             }
2821             switch(event.key.keysym.sym) {
2822             case SDLK_ESCAPE:
2823             case SDLK_q:
2824                 do_exit();
2825                 break;
2826             case SDLK_f:
2827                 toggle_full_screen();
2828                 break;
2829             case SDLK_p:
2830             case SDLK_SPACE:
2831                 toggle_pause();
2832                 break;
2833             case SDLK_s: //S: Step to next frame
2834                 step_to_next_frame();
2835                 break;
2836             case SDLK_a:
2837                 if (cur_stream)
2838                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2839                 break;
2840             case SDLK_v:
2841                 if (cur_stream)
2842                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2843                 break;
2844             case SDLK_t:
2845                 if (cur_stream)
2846                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2847                 break;
2848             case SDLK_w:
2849                 toggle_audio_display();
2850                 break;
2851             case SDLK_LEFT:
2852                 incr = -10.0;
2853                 goto do_seek;
2854             case SDLK_RIGHT:
2855                 incr = 10.0;
2856                 goto do_seek;
2857             case SDLK_UP:
2858                 incr = 60.0;
2859                 goto do_seek;
2860             case SDLK_DOWN:
2861                 incr = -60.0;
2862             do_seek:
2863                 if (cur_stream) {
2864                     if (seek_by_bytes) {
2865                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2866                             pos= cur_stream->video_current_pos;
2867                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2868                             pos= cur_stream->audio_pkt.pos;
2869                         }else
2870                             pos = url_ftell(cur_stream->ic->pb);
2871                         if (cur_stream->ic->bit_rate)
2872                             incr *= cur_stream->ic->bit_rate / 8.0;
2873                         else
2874                             incr *= 180000.0;
2875                         pos += incr;
2876                         stream_seek(cur_stream, pos, incr, 1);
2877                     } else {
2878                         pos = get_master_clock(cur_stream);
2879                         pos += incr;
2880                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2881                     }
2882                 }
2883                 break;
2884             default:
2885                 break;
2886             }
2887             break;
2888         case SDL_MOUSEBUTTONDOWN:
2889             if (exit_on_mousedown) {
2890                 do_exit();
2891                 break;
2892             }
2893         case SDL_MOUSEMOTION:
2894             if(event.type ==SDL_MOUSEBUTTONDOWN){
2895                 x= event.button.x;
2896             }else{
2897                 if(event.motion.state != SDL_PRESSED)
2898                     break;
2899                 x= event.motion.x;
2900             }
2901             if (cur_stream) {
2902                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2903                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2904                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2905                 }else{
2906                     int64_t ts;
2907                     int ns, hh, mm, ss;
2908                     int tns, thh, tmm, tss;
2909                     tns = cur_stream->ic->duration/1000000LL;
2910                     thh = tns/3600;
2911                     tmm = (tns%3600)/60;
2912                     tss = (tns%60);
2913                     frac = x/cur_stream->width;
2914                     ns = frac*tns;
2915                     hh = ns/3600;
2916                     mm = (ns%3600)/60;
2917                     ss = (ns%60);
2918                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2919                             hh, mm, ss, thh, tmm, tss);
2920                     ts = frac*cur_stream->ic->duration;
2921                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2922                         ts += cur_stream->ic->start_time;
2923                     stream_seek(cur_stream, ts, 0, 0);
2924                 }
2925             }
2926             break;
2927         case SDL_VIDEORESIZE:
2928             if (cur_stream) {
2929                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2930                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2931                 screen_width = cur_stream->width = event.resize.w;
2932                 screen_height= cur_stream->height= event.resize.h;
2933             }
2934             break;
2935         case SDL_QUIT:
2936         case FF_QUIT_EVENT:
2937             do_exit();
2938             break;
2939         case FF_ALLOC_EVENT:
2940             video_open(event.user.data1);
2941             alloc_picture(event.user.data1);
2942             break;
2943         case FF_REFRESH_EVENT:
2944             video_refresh_timer(event.user.data1);
2945             cur_stream->refresh=0;
2946             break;
2947         default:
2948             break;
2949         }
2950     }
2951 }
2952
2953 static void opt_frame_size(const char *arg)
2954 {
2955     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2956         fprintf(stderr, "Incorrect frame size\n");
2957         exit(1);
2958     }
2959     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2960         fprintf(stderr, "Frame size must be a multiple of 2\n");
2961         exit(1);
2962     }
2963 }
2964
2965 static int opt_width(const char *opt, const char *arg)
2966 {
2967     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2968     return 0;
2969 }
2970
2971 static int opt_height(const char *opt, const char *arg)
2972 {
2973     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2974     return 0;
2975 }
2976
2977 static void opt_format(const char *arg)
2978 {
2979     file_iformat = av_find_input_format(arg);
2980     if (!file_iformat) {
2981         fprintf(stderr, "Unknown input format: %s\n", arg);
2982         exit(1);
2983     }
2984 }
2985
2986 static void opt_frame_pix_fmt(const char *arg)
2987 {
2988     frame_pix_fmt = av_get_pix_fmt(arg);
2989 }
2990
2991 static int opt_sync(const char *opt, const char *arg)
2992 {
2993     if (!strcmp(arg, "audio"))
2994         av_sync_type = AV_SYNC_AUDIO_MASTER;
2995     else if (!strcmp(arg, "video"))
2996         av_sync_type = AV_SYNC_VIDEO_MASTER;
2997     else if (!strcmp(arg, "ext"))
2998         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2999     else {
3000         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3001         exit(1);
3002     }
3003     return 0;
3004 }
3005
3006 static int opt_seek(const char *opt, const char *arg)
3007 {
3008     start_time = parse_time_or_die(opt, arg, 1);
3009     return 0;
3010 }
3011
3012 static int opt_duration(const char *opt, const char *arg)
3013 {
3014     duration = parse_time_or_die(opt, arg, 1);
3015     return 0;
3016 }
3017
3018 static int opt_debug(const char *opt, const char *arg)
3019 {
3020     av_log_set_level(99);
3021     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3022     return 0;
3023 }
3024
3025 static int opt_vismv(const char *opt, const char *arg)
3026 {
3027     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3028     return 0;
3029 }
3030
3031 static int opt_thread_count(const char *opt, const char *arg)
3032 {
3033     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3034 #if !HAVE_THREADS
3035     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3036 #endif
3037     return 0;
3038 }
3039
3040 static const OptionDef options[] = {
3041 #include "cmdutils_common_opts.h"
3042     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3043     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3044     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3045     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3046     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3047     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3048     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3049     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3050     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3051     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3052     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3053     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3054     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3055     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3056     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3057     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3058     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3059     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3060     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3061     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3062     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3063     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3064     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3065     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3066     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3067     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3068     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3069     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3070     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3071     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3072     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3073     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3074     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3075     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3076     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3077     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3078     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3079 #if CONFIG_AVFILTER
3080     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3081 #endif
3082     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3083     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3084     { NULL, },
3085 };
3086
3087 static void show_usage(void)
3088 {
3089     printf("Simple media player\n");
3090     printf("usage: ffplay [options] input_file\n");
3091     printf("\n");
3092 }
3093
3094 static void show_help(void)
3095 {
3096     av_log_set_callback(log_callback_help);
3097     show_usage();
3098     show_help_options(options, "Main options:\n",
3099                       OPT_EXPERT, 0);
3100     show_help_options(options, "\nAdvanced options:\n",
3101                       OPT_EXPERT, OPT_EXPERT);
3102     printf("\n");
3103     av_opt_show2(avcodec_opts[0], NULL,
3104                  AV_OPT_FLAG_DECODING_PARAM, 0);
3105     printf("\n");
3106     av_opt_show2(avformat_opts, NULL,
3107                  AV_OPT_FLAG_DECODING_PARAM, 0);
3108 #if !CONFIG_AVFILTER
3109     printf("\n");
3110     av_opt_show2(sws_opts, NULL,
3111                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3112 #endif
3113     printf("\nWhile playing:\n"
3114            "q, ESC              quit\n"
3115            "f                   toggle full screen\n"
3116            "p, SPC              pause\n"
3117            "a                   cycle audio channel\n"
3118            "v                   cycle video channel\n"
3119            "t                   cycle subtitle channel\n"
3120            "w                   show audio waves\n"
3121            "s                   activate frame-step mode\n"
3122            "left/right          seek backward/forward 10 seconds\n"
3123            "down/up             seek backward/forward 1 minute\n"
3124            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3125            );
3126 }
3127
3128 static void opt_input_file(const char *filename)
3129 {
3130     if (input_filename) {
3131         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3132                 filename, input_filename);
3133         exit(1);
3134     }
3135     if (!strcmp(filename, "-"))
3136         filename = "pipe:";
3137     input_filename = filename;
3138 }
3139
3140 /* Called from the main */
3141 int main(int argc, char **argv)
3142 {
3143     int flags, i;
3144
3145     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3146
3147     /* register all codecs, demux and protocols */
3148     avcodec_register_all();
3149 #if CONFIG_AVDEVICE
3150     avdevice_register_all();
3151 #endif
3152 #if CONFIG_AVFILTER
3153     avfilter_register_all();
3154 #endif
3155     av_register_all();
3156
3157     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3158         avcodec_opts[i]= avcodec_alloc_context2(i);
3159     }
3160     avformat_opts = avformat_alloc_context();
3161 #if !CONFIG_AVFILTER
3162     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3163 #endif
3164
3165     show_banner();
3166
3167     parse_options(argc, argv, options, opt_input_file);
3168
3169     if (!input_filename) {
3170         show_usage();
3171         fprintf(stderr, "An input file must be specified\n");
3172         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3173         exit(1);
3174     }
3175
3176     if (display_disable) {
3177         video_disable = 1;
3178     }
3179     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3180 #if !defined(__MINGW32__) && !defined(__APPLE__)
3181     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3182 #endif
3183     if (SDL_Init (flags)) {
3184         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3185         exit(1);
3186     }
3187
3188     if (!display_disable) {
3189 #if HAVE_SDL_VIDEO_SIZE
3190         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3191         fs_screen_width = vi->current_w;
3192         fs_screen_height = vi->current_h;
3193 #endif
3194     }
3195
3196     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3197     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3198     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3199
3200     av_init_packet(&flush_pkt);
3201     flush_pkt.data= "FLUSH";
3202
3203     cur_stream = stream_open(input_filename, file_iformat);
3204
3205     event_loop();
3206
3207     /* never returns */
3208
3209     return 0;
3210 }