]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Support fourcc XVIX.
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 #endif
46
47 #include "cmdutils.h"
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #ifdef __MINGW32__
53 #undef main /* We don't want SDL to override our main() */
54 #endif
55
56 #include <unistd.h>
57 #include <assert.h>
58
59 const char program_name[] = "ffplay";
60 const int program_birth_year = 2003;
61
62 //#define DEBUG
63 //#define DEBUG_SYNC
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2*65536)
88
89 static int sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;                                  ///<presentation time stamp for this picture
105     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106     int64_t pos;                                 ///<byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *parse_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     enum {
171         SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
172     } show_mode;
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///<current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB]={
237     [AVMEDIA_TYPE_AUDIO]=-1,
238     [AVMEDIA_TYPE_VIDEO]=-1,
239     [AVMEDIA_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=1;
267 static int show_mode = SHOW_MODE_VIDEO;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
288 {
289     AVPacketList *pkt1;
290
291     /* duplicate the packet */
292     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
293         return -1;
294
295     pkt1 = av_malloc(sizeof(AVPacketList));
296     if (!pkt1)
297         return -1;
298     pkt1->pkt = *pkt;
299     pkt1->next = NULL;
300
301
302     SDL_LockMutex(q->mutex);
303
304     if (!q->last_pkt)
305
306         q->first_pkt = pkt1;
307     else
308         q->last_pkt->next = pkt1;
309     q->last_pkt = pkt1;
310     q->nb_packets++;
311     q->size += pkt1->pkt.size + sizeof(*pkt1);
312     /* XXX: should duplicate packet data in DV case */
313     SDL_CondSignal(q->cond);
314
315     SDL_UnlockMutex(q->mutex);
316     return 0;
317 }
318
319 /* packet queue handling */
320 static void packet_queue_init(PacketQueue *q)
321 {
322     memset(q, 0, sizeof(PacketQueue));
323     q->mutex = SDL_CreateMutex();
324     q->cond = SDL_CreateCond();
325     packet_queue_put(q, &flush_pkt);
326 }
327
328 static void packet_queue_flush(PacketQueue *q)
329 {
330     AVPacketList *pkt, *pkt1;
331
332     SDL_LockMutex(q->mutex);
333     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
334         pkt1 = pkt->next;
335         av_free_packet(&pkt->pkt);
336         av_freep(&pkt);
337     }
338     q->last_pkt = NULL;
339     q->first_pkt = NULL;
340     q->nb_packets = 0;
341     q->size = 0;
342     SDL_UnlockMutex(q->mutex);
343 }
344
345 static void packet_queue_end(PacketQueue *q)
346 {
347     packet_queue_flush(q);
348     SDL_DestroyMutex(q->mutex);
349     SDL_DestroyCond(q->cond);
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414     int w1, w2, h1, h2;
415
416     /* fill the background */
417     w1 = x;
418     if (w1 < 0)
419         w1 = 0;
420     w2 = s->width - (x + w);
421     if (w2 < 0)
422         w2 = 0;
423     h1 = y;
424     if (h1 < 0)
425         h1 = 0;
426     h2 = s->height - (y + h);
427     if (h2 < 0)
428         h2 = 0;
429     fill_rectangle(screen,
430                    s->xleft, s->ytop,
431                    w1, s->height,
432                    color);
433     fill_rectangle(screen,
434                    s->xleft + s->width - w2, s->ytop,
435                    w2, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + w1, s->ytop,
439                    s->width - w1 - w2, h1,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop + s->height - h2,
443                    s->width - w1 - w2, h2,
444                    color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694 #if CONFIG_AVFILTER
695          if (vp->picref->video->pixel_aspect.num == 0)
696              aspect_ratio = 0;
697          else
698              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
699 #else
700
701         /* XXX: use variable in the frame */
702         if (is->video_st->sample_aspect_ratio.num)
703             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
704         else if (is->video_st->codec->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
706         else
707             aspect_ratio = 0;
708 #endif
709         if (aspect_ratio <= 0.0)
710             aspect_ratio = 1.0;
711         aspect_ratio *= (float)vp->width / (float)vp->height;
712
713         if (is->subtitle_st) {
714             if (is->subpq_size > 0) {
715                 sp = &is->subpq[is->subpq_rindex];
716
717                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
718                     SDL_LockYUVOverlay (vp->bmp);
719
720                     pict.data[0] = vp->bmp->pixels[0];
721                     pict.data[1] = vp->bmp->pixels[2];
722                     pict.data[2] = vp->bmp->pixels[1];
723
724                     pict.linesize[0] = vp->bmp->pitches[0];
725                     pict.linesize[1] = vp->bmp->pitches[2];
726                     pict.linesize[2] = vp->bmp->pitches[1];
727
728                     for (i = 0; i < sp->sub.num_rects; i++)
729                         blend_subrect(&pict, sp->sub.rects[i],
730                                       vp->bmp->w, vp->bmp->h);
731
732                     SDL_UnlockYUVOverlay (vp->bmp);
733                 }
734             }
735         }
736
737
738         /* XXX: we suppose the screen has a 1.0 pixel ratio */
739         height = is->height;
740         width = ((int)rint(height * aspect_ratio)) & ~1;
741         if (width > is->width) {
742             width = is->width;
743             height = ((int)rint(width / aspect_ratio)) & ~1;
744         }
745         x = (is->width - width) / 2;
746         y = (is->height - height) / 2;
747         if (!is->no_background) {
748             /* fill the background */
749             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
750         } else {
751             is->no_background = 0;
752         }
753         rect.x = is->xleft + x;
754         rect.y = is->ytop  + y;
755         rect.w = FFMAX(width,  1);
756         rect.h = FFMAX(height, 1);
757         SDL_DisplayYUVOverlay(vp->bmp, &rect);
758     } else {
759 #if 0
760         fill_rectangle(screen,
761                        is->xleft, is->ytop, is->width, is->height,
762                        QERGB(0x00, 0x00, 0x00));
763 #endif
764     }
765 }
766
767 /* get the current audio output buffer size, in samples. With SDL, we
768    cannot have a precise information */
769 static int audio_write_get_buf_size(VideoState *is)
770 {
771     return is->audio_buf_size - is->audio_buf_index;
772 }
773
774 static inline int compute_mod(int a, int b)
775 {
776     return a < 0 ? a%b + b : a%b;
777 }
778
779 static void video_audio_display(VideoState *s)
780 {
781     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
782     int ch, channels, h, h2, bgcolor, fgcolor;
783     int16_t time_diff;
784     int rdft_bits, nb_freq;
785
786     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
787         ;
788     nb_freq= 1<<(rdft_bits-1);
789
790     /* compute display index : center on currently output samples */
791     channels = s->audio_st->codec->channels;
792     nb_display_channels = channels;
793     if (!s->paused) {
794         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
795         n = 2 * channels;
796         delay = audio_write_get_buf_size(s);
797         delay /= n;
798
799         /* to be more precise, we take into account the time spent since
800            the last buffer computation */
801         if (audio_callback_time) {
802             time_diff = av_gettime() - audio_callback_time;
803             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
804         }
805
806         delay += 2*data_used;
807         if (delay < data_used)
808             delay = data_used;
809
810         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
811         if (s->show_mode == SHOW_MODE_WAVES) {
812             h= INT_MIN;
813             for(i=0; i<1000; i+=channels){
814                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
815                 int a= s->sample_array[idx];
816                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
817                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
818                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
819                 int score= a-d;
820                 if(h<score && (b^c)<0){
821                     h= score;
822                     i_start= idx;
823                 }
824             }
825         }
826
827         s->last_i_start = i_start;
828     } else {
829         i_start = s->last_i_start;
830     }
831
832     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
833     if (s->show_mode == SHOW_MODE_WAVES) {
834         fill_rectangle(screen,
835                        s->xleft, s->ytop, s->width, s->height,
836                        bgcolor);
837
838         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
839
840         /* total height for one channel */
841         h = s->height / nb_display_channels;
842         /* graph height / 2 */
843         h2 = (h * 9) / 20;
844         for(ch = 0;ch < nb_display_channels; ch++) {
845             i = i_start + ch;
846             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
847             for(x = 0; x < s->width; x++) {
848                 y = (s->sample_array[i] * h2) >> 15;
849                 if (y < 0) {
850                     y = -y;
851                     ys = y1 - y;
852                 } else {
853                     ys = y1;
854                 }
855                 fill_rectangle(screen,
856                                s->xleft + x, ys, 1, y,
857                                fgcolor);
858                 i += channels;
859                 if (i >= SAMPLE_ARRAY_SIZE)
860                     i -= SAMPLE_ARRAY_SIZE;
861             }
862         }
863
864         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
865
866         for(ch = 1;ch < nb_display_channels; ch++) {
867             y = s->ytop + ch * h;
868             fill_rectangle(screen,
869                            s->xleft, y, s->width, 1,
870                            fgcolor);
871         }
872         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
873     }else{
874         nb_display_channels= FFMIN(nb_display_channels, 2);
875         if(rdft_bits != s->rdft_bits){
876             av_rdft_end(s->rdft);
877             av_free(s->rdft_data);
878             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
879             s->rdft_bits= rdft_bits;
880             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
881         }
882         {
883             FFTSample *data[2];
884             for(ch = 0;ch < nb_display_channels; ch++) {
885                 data[ch] = s->rdft_data + 2*nb_freq*ch;
886                 i = i_start + ch;
887                 for(x = 0; x < 2*nb_freq; x++) {
888                     double w= (x-nb_freq)*(1.0/nb_freq);
889                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
890                     i += channels;
891                     if (i >= SAMPLE_ARRAY_SIZE)
892                         i -= SAMPLE_ARRAY_SIZE;
893                 }
894                 av_rdft_calc(s->rdft, data[ch]);
895             }
896             //least efficient way to do this, we should of course directly access it but its more than fast enough
897             for(y=0; y<s->height; y++){
898                 double w= 1/sqrt(nb_freq);
899                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
900                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
901                        + data[1][2*y+1]*data[1][2*y+1])) : a;
902                 a= FFMIN(a,255);
903                 b= FFMIN(b,255);
904                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
905
906                 fill_rectangle(screen,
907                             s->xpos, s->height-y, 1, 1,
908                             fgcolor);
909             }
910         }
911         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
912         s->xpos++;
913         if(s->xpos >= s->width)
914             s->xpos= s->xleft;
915     }
916 }
917
918 static int video_open(VideoState *is){
919     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
920     int w,h;
921
922     if(is_full_screen) flags |= SDL_FULLSCREEN;
923     else               flags |= SDL_RESIZABLE;
924
925     if (is_full_screen && fs_screen_width) {
926         w = fs_screen_width;
927         h = fs_screen_height;
928     } else if(!is_full_screen && screen_width){
929         w = screen_width;
930         h = screen_height;
931 #if CONFIG_AVFILTER
932     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
933         w = is->out_video_filter->inputs[0]->w;
934         h = is->out_video_filter->inputs[0]->h;
935 #else
936     }else if (is->video_st && is->video_st->codec->width){
937         w = is->video_st->codec->width;
938         h = is->video_st->codec->height;
939 #endif
940     } else {
941         w = 640;
942         h = 480;
943     }
944     if(screen && is->width == screen->w && screen->w == w
945        && is->height== screen->h && screen->h == h)
946         return 0;
947
948 #ifndef __APPLE__
949     screen = SDL_SetVideoMode(w, h, 0, flags);
950 #else
951     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
952     screen = SDL_SetVideoMode(w, h, 24, flags);
953 #endif
954     if (!screen) {
955         fprintf(stderr, "SDL: could not set video mode - exiting\n");
956         return -1;
957     }
958     if (!window_title)
959         window_title = input_filename;
960     SDL_WM_SetCaption(window_title, window_title);
961
962     is->width = screen->w;
963     is->height = screen->h;
964
965     return 0;
966 }
967
968 /* display the current picture, if any */
969 static void video_display(VideoState *is)
970 {
971     if(!screen)
972         video_open(cur_stream);
973     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
974         video_audio_display(is);
975     else if (is->video_st)
976         video_image_display(is);
977 }
978
979 static int refresh_thread(void *opaque)
980 {
981     VideoState *is= opaque;
982     while(!is->abort_request){
983         SDL_Event event;
984         event.type = FF_REFRESH_EVENT;
985         event.user.data1 = opaque;
986         if(!is->refresh){
987             is->refresh=1;
988             SDL_PushEvent(&event);
989         }
990         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
991         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
992     }
993     return 0;
994 }
995
996 /* get the current audio clock value */
997 static double get_audio_clock(VideoState *is)
998 {
999     double pts;
1000     int hw_buf_size, bytes_per_sec;
1001     pts = is->audio_clock;
1002     hw_buf_size = audio_write_get_buf_size(is);
1003     bytes_per_sec = 0;
1004     if (is->audio_st) {
1005         bytes_per_sec = is->audio_st->codec->sample_rate *
1006             2 * is->audio_st->codec->channels;
1007     }
1008     if (bytes_per_sec)
1009         pts -= (double)hw_buf_size / bytes_per_sec;
1010     return pts;
1011 }
1012
1013 /* get the current video clock value */
1014 static double get_video_clock(VideoState *is)
1015 {
1016     if (is->paused) {
1017         return is->video_current_pts;
1018     } else {
1019         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1020     }
1021 }
1022
1023 /* get the current external clock value */
1024 static double get_external_clock(VideoState *is)
1025 {
1026     int64_t ti;
1027     ti = av_gettime();
1028     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1029 }
1030
1031 /* get the current master clock value */
1032 static double get_master_clock(VideoState *is)
1033 {
1034     double val;
1035
1036     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1037         if (is->video_st)
1038             val = get_video_clock(is);
1039         else
1040             val = get_audio_clock(is);
1041     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1042         if (is->audio_st)
1043             val = get_audio_clock(is);
1044         else
1045             val = get_video_clock(is);
1046     } else {
1047         val = get_external_clock(is);
1048     }
1049     return val;
1050 }
1051
1052 /* seek in the stream */
1053 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1054 {
1055     if (!is->seek_req) {
1056         is->seek_pos = pos;
1057         is->seek_rel = rel;
1058         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1059         if (seek_by_bytes)
1060             is->seek_flags |= AVSEEK_FLAG_BYTE;
1061         is->seek_req = 1;
1062     }
1063 }
1064
1065 /* pause or resume the video */
1066 static void stream_toggle_pause(VideoState *is)
1067 {
1068     if (is->paused) {
1069         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1070         if(is->read_pause_return != AVERROR(ENOSYS)){
1071             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1072         }
1073         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1074     }
1075     is->paused = !is->paused;
1076 }
1077
1078 static double compute_target_time(double frame_current_pts, VideoState *is)
1079 {
1080     double delay, sync_threshold, diff;
1081
1082     /* compute nominal delay */
1083     delay = frame_current_pts - is->frame_last_pts;
1084     if (delay <= 0 || delay >= 10.0) {
1085         /* if incorrect delay, use previous one */
1086         delay = is->frame_last_delay;
1087     } else {
1088         is->frame_last_delay = delay;
1089     }
1090     is->frame_last_pts = frame_current_pts;
1091
1092     /* update delay to follow master synchronisation source */
1093     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1094          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1095         /* if video is slave, we try to correct big delays by
1096            duplicating or deleting a frame */
1097         diff = get_video_clock(is) - get_master_clock(is);
1098
1099         /* skip or repeat frame. We take into account the
1100            delay to compute the threshold. I still don't know
1101            if it is the best guess */
1102         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1103         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1104             if (diff <= -sync_threshold)
1105                 delay = 0;
1106             else if (diff >= sync_threshold)
1107                 delay = 2 * delay;
1108         }
1109     }
1110     is->frame_timer += delay;
1111 #if defined(DEBUG_SYNC)
1112     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1113             delay, actual_delay, frame_current_pts, -diff);
1114 #endif
1115
1116     return is->frame_timer;
1117 }
1118
1119 /* called to display each frame */
1120 static void video_refresh_timer(void *opaque)
1121 {
1122     VideoState *is = opaque;
1123     VideoPicture *vp;
1124
1125     SubPicture *sp, *sp2;
1126
1127     if (is->video_st) {
1128 retry:
1129         if (is->pictq_size == 0) {
1130             //nothing to do, no picture to display in the que
1131         } else {
1132             double time= av_gettime()/1000000.0;
1133             double next_target;
1134             /* dequeue the picture */
1135             vp = &is->pictq[is->pictq_rindex];
1136
1137             if(time < vp->target_clock)
1138                 return;
1139             /* update current video pts */
1140             is->video_current_pts = vp->pts;
1141             is->video_current_pts_drift = is->video_current_pts - time;
1142             is->video_current_pos = vp->pos;
1143             if(is->pictq_size > 1){
1144                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1145                 assert(nextvp->target_clock >= vp->target_clock);
1146                 next_target= nextvp->target_clock;
1147             }else{
1148                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1149             }
1150             if(framedrop && time > next_target){
1151                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1152                 if(is->pictq_size > 1 || time > next_target + 0.5){
1153                     /* update queue size and signal for next picture */
1154                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1155                         is->pictq_rindex = 0;
1156
1157                     SDL_LockMutex(is->pictq_mutex);
1158                     is->pictq_size--;
1159                     SDL_CondSignal(is->pictq_cond);
1160                     SDL_UnlockMutex(is->pictq_mutex);
1161                     goto retry;
1162                 }
1163             }
1164
1165             if(is->subtitle_st) {
1166                 if (is->subtitle_stream_changed) {
1167                     SDL_LockMutex(is->subpq_mutex);
1168
1169                     while (is->subpq_size) {
1170                         free_subpicture(&is->subpq[is->subpq_rindex]);
1171
1172                         /* update queue size and signal for next picture */
1173                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1174                             is->subpq_rindex = 0;
1175
1176                         is->subpq_size--;
1177                     }
1178                     is->subtitle_stream_changed = 0;
1179
1180                     SDL_CondSignal(is->subpq_cond);
1181                     SDL_UnlockMutex(is->subpq_mutex);
1182                 } else {
1183                     if (is->subpq_size > 0) {
1184                         sp = &is->subpq[is->subpq_rindex];
1185
1186                         if (is->subpq_size > 1)
1187                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1188                         else
1189                             sp2 = NULL;
1190
1191                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1192                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1193                         {
1194                             free_subpicture(sp);
1195
1196                             /* update queue size and signal for next picture */
1197                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1198                                 is->subpq_rindex = 0;
1199
1200                             SDL_LockMutex(is->subpq_mutex);
1201                             is->subpq_size--;
1202                             SDL_CondSignal(is->subpq_cond);
1203                             SDL_UnlockMutex(is->subpq_mutex);
1204                         }
1205                     }
1206                 }
1207             }
1208
1209             /* display picture */
1210             if (!display_disable)
1211                 video_display(is);
1212
1213             /* update queue size and signal for next picture */
1214             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1215                 is->pictq_rindex = 0;
1216
1217             SDL_LockMutex(is->pictq_mutex);
1218             is->pictq_size--;
1219             SDL_CondSignal(is->pictq_cond);
1220             SDL_UnlockMutex(is->pictq_mutex);
1221         }
1222     } else if (is->audio_st) {
1223         /* draw the next audio frame */
1224
1225         /* if only audio stream, then display the audio bars (better
1226            than nothing, just to test the implementation */
1227
1228         /* display picture */
1229         if (!display_disable)
1230             video_display(is);
1231     }
1232     if (show_status) {
1233         static int64_t last_time;
1234         int64_t cur_time;
1235         int aqsize, vqsize, sqsize;
1236         double av_diff;
1237
1238         cur_time = av_gettime();
1239         if (!last_time || (cur_time - last_time) >= 30000) {
1240             aqsize = 0;
1241             vqsize = 0;
1242             sqsize = 0;
1243             if (is->audio_st)
1244                 aqsize = is->audioq.size;
1245             if (is->video_st)
1246                 vqsize = is->videoq.size;
1247             if (is->subtitle_st)
1248                 sqsize = is->subtitleq.size;
1249             av_diff = 0;
1250             if (is->audio_st && is->video_st)
1251                 av_diff = get_audio_clock(is) - get_video_clock(is);
1252             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1253                    get_master_clock(is),
1254                    av_diff,
1255                    FFMAX(is->skip_frames-1, 0),
1256                    aqsize / 1024,
1257                    vqsize / 1024,
1258                    sqsize,
1259                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1260                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1261             fflush(stdout);
1262             last_time = cur_time;
1263         }
1264     }
1265 }
1266
1267 static void stream_close(VideoState *is)
1268 {
1269     VideoPicture *vp;
1270     int i;
1271     /* XXX: use a special url_shutdown call to abort parse cleanly */
1272     is->abort_request = 1;
1273     SDL_WaitThread(is->parse_tid, NULL);
1274     SDL_WaitThread(is->refresh_tid, NULL);
1275
1276     /* free all pictures */
1277     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1278         vp = &is->pictq[i];
1279 #if CONFIG_AVFILTER
1280         if (vp->picref) {
1281             avfilter_unref_buffer(vp->picref);
1282             vp->picref = NULL;
1283         }
1284 #endif
1285         if (vp->bmp) {
1286             SDL_FreeYUVOverlay(vp->bmp);
1287             vp->bmp = NULL;
1288         }
1289     }
1290     SDL_DestroyMutex(is->pictq_mutex);
1291     SDL_DestroyCond(is->pictq_cond);
1292     SDL_DestroyMutex(is->subpq_mutex);
1293     SDL_DestroyCond(is->subpq_cond);
1294 #if !CONFIG_AVFILTER
1295     if (is->img_convert_ctx)
1296         sws_freeContext(is->img_convert_ctx);
1297 #endif
1298     av_free(is);
1299 }
1300
1301 static void do_exit(void)
1302 {
1303     if (cur_stream) {
1304         stream_close(cur_stream);
1305         cur_stream = NULL;
1306     }
1307     uninit_opts();
1308 #if CONFIG_AVFILTER
1309     avfilter_uninit();
1310 #endif
1311     if (show_status)
1312         printf("\n");
1313     SDL_Quit();
1314     av_log(NULL, AV_LOG_QUIET, "");
1315     exit(0);
1316 }
1317
1318 /* allocate a picture (needs to do that in main thread to avoid
1319    potential locking problems */
1320 static void alloc_picture(void *opaque)
1321 {
1322     VideoState *is = opaque;
1323     VideoPicture *vp;
1324
1325     vp = &is->pictq[is->pictq_windex];
1326
1327     if (vp->bmp)
1328         SDL_FreeYUVOverlay(vp->bmp);
1329
1330 #if CONFIG_AVFILTER
1331     if (vp->picref)
1332         avfilter_unref_buffer(vp->picref);
1333     vp->picref = NULL;
1334
1335     vp->width   = is->out_video_filter->inputs[0]->w;
1336     vp->height  = is->out_video_filter->inputs[0]->h;
1337     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1338 #else
1339     vp->width   = is->video_st->codec->width;
1340     vp->height  = is->video_st->codec->height;
1341     vp->pix_fmt = is->video_st->codec->pix_fmt;
1342 #endif
1343
1344     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1345                                    SDL_YV12_OVERLAY,
1346                                    screen);
1347     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1348         /* SDL allocates a buffer smaller than requested if the video
1349          * overlay hardware is unable to support the requested size. */
1350         fprintf(stderr, "Error: the video system does not support an image\n"
1351                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1352                         "to reduce the image size.\n", vp->width, vp->height );
1353         do_exit();
1354     }
1355
1356     SDL_LockMutex(is->pictq_mutex);
1357     vp->allocated = 1;
1358     SDL_CondSignal(is->pictq_cond);
1359     SDL_UnlockMutex(is->pictq_mutex);
1360 }
1361
1362 /**
1363  *
1364  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1365  */
1366 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1367 {
1368     VideoPicture *vp;
1369
1370     /* wait until we have space to put a new picture */
1371     SDL_LockMutex(is->pictq_mutex);
1372
1373     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1374         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1375
1376     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1377            !is->videoq.abort_request) {
1378         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1379     }
1380     SDL_UnlockMutex(is->pictq_mutex);
1381
1382     if (is->videoq.abort_request)
1383         return -1;
1384
1385     vp = &is->pictq[is->pictq_windex];
1386
1387     /* alloc or resize hardware picture buffer */
1388     if (!vp->bmp ||
1389 #if CONFIG_AVFILTER
1390         vp->width  != is->out_video_filter->inputs[0]->w ||
1391         vp->height != is->out_video_filter->inputs[0]->h) {
1392 #else
1393         vp->width != is->video_st->codec->width ||
1394         vp->height != is->video_st->codec->height) {
1395 #endif
1396         SDL_Event event;
1397
1398         vp->allocated = 0;
1399
1400         /* the allocation must be done in the main thread to avoid
1401            locking problems */
1402         event.type = FF_ALLOC_EVENT;
1403         event.user.data1 = is;
1404         SDL_PushEvent(&event);
1405
1406         /* wait until the picture is allocated */
1407         SDL_LockMutex(is->pictq_mutex);
1408         while (!vp->allocated && !is->videoq.abort_request) {
1409             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1410         }
1411         SDL_UnlockMutex(is->pictq_mutex);
1412
1413         if (is->videoq.abort_request)
1414             return -1;
1415     }
1416
1417     /* if the frame is not skipped, then display it */
1418     if (vp->bmp) {
1419         AVPicture pict;
1420 #if CONFIG_AVFILTER
1421         if(vp->picref)
1422             avfilter_unref_buffer(vp->picref);
1423         vp->picref = src_frame->opaque;
1424 #endif
1425
1426         /* get a pointer on the bitmap */
1427         SDL_LockYUVOverlay (vp->bmp);
1428
1429         memset(&pict,0,sizeof(AVPicture));
1430         pict.data[0] = vp->bmp->pixels[0];
1431         pict.data[1] = vp->bmp->pixels[2];
1432         pict.data[2] = vp->bmp->pixels[1];
1433
1434         pict.linesize[0] = vp->bmp->pitches[0];
1435         pict.linesize[1] = vp->bmp->pitches[2];
1436         pict.linesize[2] = vp->bmp->pitches[1];
1437
1438 #if CONFIG_AVFILTER
1439         //FIXME use direct rendering
1440         av_picture_copy(&pict, (AVPicture *)src_frame,
1441                         vp->pix_fmt, vp->width, vp->height);
1442 #else
1443         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1444         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1445             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1446             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1447         if (is->img_convert_ctx == NULL) {
1448             fprintf(stderr, "Cannot initialize the conversion context\n");
1449             exit(1);
1450         }
1451         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1452                   0, vp->height, pict.data, pict.linesize);
1453 #endif
1454         /* update the bitmap content */
1455         SDL_UnlockYUVOverlay(vp->bmp);
1456
1457         vp->pts = pts;
1458         vp->pos = pos;
1459
1460         /* now we can update the picture count */
1461         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1462             is->pictq_windex = 0;
1463         SDL_LockMutex(is->pictq_mutex);
1464         vp->target_clock= compute_target_time(vp->pts, is);
1465
1466         is->pictq_size++;
1467         SDL_UnlockMutex(is->pictq_mutex);
1468     }
1469     return 0;
1470 }
1471
1472 /**
1473  * compute the exact PTS for the picture if it is omitted in the stream
1474  * @param pts1 the dts of the pkt / pts of the frame
1475  */
1476 static int output_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1477 {
1478     double frame_delay, pts;
1479
1480     pts = pts1;
1481
1482     if (pts != 0) {
1483         /* update video clock with pts, if present */
1484         is->video_clock = pts;
1485     } else {
1486         pts = is->video_clock;
1487     }
1488     /* update video clock for next frame */
1489     frame_delay = av_q2d(is->video_st->codec->time_base);
1490     /* for MPEG2, the frame can be repeated, so we update the
1491        clock accordingly */
1492     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1493     is->video_clock += frame_delay;
1494
1495 #if defined(DEBUG_SYNC) && 0
1496     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1497            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1498 #endif
1499     return queue_picture(is, src_frame, pts, pos);
1500 }
1501
1502 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1503 {
1504     int len1, got_picture, i;
1505
1506     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1507         return -1;
1508
1509     if (pkt->data == flush_pkt.data) {
1510         avcodec_flush_buffers(is->video_st->codec);
1511
1512         SDL_LockMutex(is->pictq_mutex);
1513         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1514         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1515             is->pictq[i].target_clock= 0;
1516         }
1517         while (is->pictq_size && !is->videoq.abort_request) {
1518             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1519         }
1520         is->video_current_pos = -1;
1521         SDL_UnlockMutex(is->pictq_mutex);
1522
1523         is->frame_last_pts = AV_NOPTS_VALUE;
1524         is->frame_last_delay = 0;
1525         is->frame_timer = (double)av_gettime() / 1000000.0;
1526         is->skip_frames = 1;
1527         is->skip_frames_index = 0;
1528         return 0;
1529     }
1530
1531     len1 = avcodec_decode_video2(is->video_st->codec,
1532                                  frame, &got_picture,
1533                                  pkt);
1534
1535     if (got_picture) {
1536         if (decoder_reorder_pts == -1) {
1537             *pts = frame->best_effort_timestamp;
1538         } else if (decoder_reorder_pts) {
1539             *pts = frame->pkt_pts;
1540         } else {
1541             *pts = frame->pkt_dts;
1542         }
1543
1544         if (*pts == AV_NOPTS_VALUE) {
1545             *pts = 0;
1546         }
1547
1548         is->skip_frames_index += 1;
1549         if(is->skip_frames_index >= is->skip_frames){
1550             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1551             return 1;
1552         }
1553
1554     }
1555     return 0;
1556 }
1557
1558 #if CONFIG_AVFILTER
1559 typedef struct {
1560     VideoState *is;
1561     AVFrame *frame;
1562     int use_dr1;
1563 } FilterPriv;
1564
1565 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1566 {
1567     AVFilterContext *ctx = codec->opaque;
1568     AVFilterBufferRef  *ref;
1569     int perms = AV_PERM_WRITE;
1570     int i, w, h, stride[4];
1571     unsigned edge;
1572     int pixel_size;
1573
1574     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1575
1576     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1577         perms |= AV_PERM_NEG_LINESIZES;
1578
1579     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1580         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1581         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1582         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1583     }
1584     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1585
1586     w = codec->width;
1587     h = codec->height;
1588
1589     if(av_image_check_size(w, h, 0, codec))
1590         return -1;
1591
1592     avcodec_align_dimensions2(codec, &w, &h, stride);
1593     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1594     w += edge << 1;
1595     h += edge << 1;
1596
1597     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1598         return -1;
1599
1600     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1601     ref->video->w = codec->width;
1602     ref->video->h = codec->height;
1603     for(i = 0; i < 4; i ++) {
1604         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1605         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1606
1607         if (ref->data[i]) {
1608             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1609         }
1610         pic->data[i]     = ref->data[i];
1611         pic->linesize[i] = ref->linesize[i];
1612     }
1613     pic->opaque = ref;
1614     pic->age    = INT_MAX;
1615     pic->type   = FF_BUFFER_TYPE_USER;
1616     pic->reordered_opaque = codec->reordered_opaque;
1617     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1618     else           pic->pkt_pts = AV_NOPTS_VALUE;
1619     return 0;
1620 }
1621
1622 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1623 {
1624     memset(pic->data, 0, sizeof(pic->data));
1625     avfilter_unref_buffer(pic->opaque);
1626 }
1627
1628 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1629 {
1630     AVFilterBufferRef *ref = pic->opaque;
1631
1632     if (pic->data[0] == NULL) {
1633         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1634         return codec->get_buffer(codec, pic);
1635     }
1636
1637     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1638         (codec->pix_fmt != ref->format)) {
1639         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1640         return -1;
1641     }
1642
1643     pic->reordered_opaque = codec->reordered_opaque;
1644     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1645     else           pic->pkt_pts = AV_NOPTS_VALUE;
1646     return 0;
1647 }
1648
1649 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1650 {
1651     FilterPriv *priv = ctx->priv;
1652     AVCodecContext *codec;
1653     if(!opaque) return -1;
1654
1655     priv->is = opaque;
1656     codec    = priv->is->video_st->codec;
1657     codec->opaque = ctx;
1658     if((codec->codec->capabilities & CODEC_CAP_DR1)
1659     ) {
1660         codec->flags |= CODEC_FLAG_EMU_EDGE;
1661         priv->use_dr1 = 1;
1662         codec->get_buffer     = input_get_buffer;
1663         codec->release_buffer = input_release_buffer;
1664         codec->reget_buffer   = input_reget_buffer;
1665         codec->thread_safe_callbacks = 1;
1666     }
1667
1668     priv->frame = avcodec_alloc_frame();
1669
1670     return 0;
1671 }
1672
1673 static void input_uninit(AVFilterContext *ctx)
1674 {
1675     FilterPriv *priv = ctx->priv;
1676     av_free(priv->frame);
1677 }
1678
1679 static int input_request_frame(AVFilterLink *link)
1680 {
1681     FilterPriv *priv = link->src->priv;
1682     AVFilterBufferRef *picref;
1683     int64_t pts = 0;
1684     AVPacket pkt;
1685     int ret;
1686
1687     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1688         av_free_packet(&pkt);
1689     if (ret < 0)
1690         return -1;
1691
1692     if(priv->use_dr1) {
1693         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1694     } else {
1695         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1696         av_image_copy(picref->data, picref->linesize,
1697                       priv->frame->data, priv->frame->linesize,
1698                       picref->format, link->w, link->h);
1699     }
1700     av_free_packet(&pkt);
1701
1702     picref->pts = pts;
1703     picref->pos = pkt.pos;
1704     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1705     avfilter_start_frame(link, picref);
1706     avfilter_draw_slice(link, 0, link->h, 1);
1707     avfilter_end_frame(link);
1708
1709     return 0;
1710 }
1711
1712 static int input_query_formats(AVFilterContext *ctx)
1713 {
1714     FilterPriv *priv = ctx->priv;
1715     enum PixelFormat pix_fmts[] = {
1716         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1717     };
1718
1719     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1720     return 0;
1721 }
1722
1723 static int input_config_props(AVFilterLink *link)
1724 {
1725     FilterPriv *priv  = link->src->priv;
1726     AVCodecContext *c = priv->is->video_st->codec;
1727
1728     link->w = c->width;
1729     link->h = c->height;
1730     link->time_base = priv->is->video_st->time_base;
1731
1732     return 0;
1733 }
1734
1735 static AVFilter input_filter =
1736 {
1737     .name      = "ffplay_input",
1738
1739     .priv_size = sizeof(FilterPriv),
1740
1741     .init      = input_init,
1742     .uninit    = input_uninit,
1743
1744     .query_formats = input_query_formats,
1745
1746     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1747     .outputs   = (AVFilterPad[]) {{ .name = "default",
1748                                     .type = AVMEDIA_TYPE_VIDEO,
1749                                     .request_frame = input_request_frame,
1750                                     .config_props  = input_config_props, },
1751                                   { .name = NULL }},
1752 };
1753
1754 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1755 {
1756     char sws_flags_str[128];
1757     int ret;
1758     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1759     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1760     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1761     graph->scale_sws_opts = av_strdup(sws_flags_str);
1762
1763     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1764                                             NULL, is, graph)) < 0)
1765         goto the_end;
1766     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1767                                             NULL, &ffsink_ctx, graph)) < 0)
1768         goto the_end;
1769
1770     if(vfilters) {
1771         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1772         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1773
1774         outputs->name    = av_strdup("in");
1775         outputs->filter_ctx = filt_src;
1776         outputs->pad_idx = 0;
1777         outputs->next    = NULL;
1778
1779         inputs->name    = av_strdup("out");
1780         inputs->filter_ctx = filt_out;
1781         inputs->pad_idx = 0;
1782         inputs->next    = NULL;
1783
1784         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1785             goto the_end;
1786         av_freep(&vfilters);
1787     } else {
1788         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1789             goto the_end;
1790     }
1791
1792     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1793         goto the_end;
1794
1795     is->out_video_filter = filt_out;
1796 the_end:
1797     return ret;
1798 }
1799
1800 #endif  /* CONFIG_AVFILTER */
1801
1802 static int video_thread(void *arg)
1803 {
1804     VideoState *is = arg;
1805     AVFrame *frame= avcodec_alloc_frame();
1806     int64_t pts_int, pos;
1807     double pts;
1808     int ret;
1809
1810 #if CONFIG_AVFILTER
1811     AVFilterGraph *graph = avfilter_graph_alloc();
1812     AVFilterContext *filt_out = NULL;
1813
1814     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1815         goto the_end;
1816     filt_out = is->out_video_filter;
1817 #endif
1818
1819     for(;;) {
1820 #if !CONFIG_AVFILTER
1821         AVPacket pkt;
1822 #else
1823         AVFilterBufferRef *picref;
1824         AVRational tb;
1825 #endif
1826         while (is->paused && !is->videoq.abort_request)
1827             SDL_Delay(10);
1828 #if CONFIG_AVFILTER
1829         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1830         if (picref) {
1831             pts_int = picref->pts;
1832             pos     = picref->pos;
1833             frame->opaque = picref;
1834         }
1835
1836         if (av_cmp_q(tb, is->video_st->time_base)) {
1837             av_unused int64_t pts1 = pts_int;
1838             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1839             av_dlog(NULL, "video_thread(): "
1840                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1841                     tb.num, tb.den, pts1,
1842                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1843         }
1844 #else
1845         ret = get_video_frame(is, frame, &pts_int, &pkt);
1846         pos = pkt.pos;
1847 #endif
1848
1849         if (ret < 0) goto the_end;
1850
1851         if (!ret)
1852             continue;
1853
1854         pts = pts_int*av_q2d(is->video_st->time_base);
1855
1856         ret = output_picture(is, frame, pts, pos);
1857 #if !CONFIG_AVFILTER
1858         av_free_packet(&pkt);
1859 #endif
1860         if (ret < 0)
1861             goto the_end;
1862
1863         if (step)
1864             if (cur_stream)
1865                 stream_toggle_pause(cur_stream);
1866     }
1867  the_end:
1868 #if CONFIG_AVFILTER
1869     avfilter_graph_free(&graph);
1870 #endif
1871     av_free(frame);
1872     return 0;
1873 }
1874
1875 static int subtitle_thread(void *arg)
1876 {
1877     VideoState *is = arg;
1878     SubPicture *sp;
1879     AVPacket pkt1, *pkt = &pkt1;
1880     int len1, got_subtitle;
1881     double pts;
1882     int i, j;
1883     int r, g, b, y, u, v, a;
1884
1885     for(;;) {
1886         while (is->paused && !is->subtitleq.abort_request) {
1887             SDL_Delay(10);
1888         }
1889         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1890             break;
1891
1892         if(pkt->data == flush_pkt.data){
1893             avcodec_flush_buffers(is->subtitle_st->codec);
1894             continue;
1895         }
1896         SDL_LockMutex(is->subpq_mutex);
1897         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1898                !is->subtitleq.abort_request) {
1899             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1900         }
1901         SDL_UnlockMutex(is->subpq_mutex);
1902
1903         if (is->subtitleq.abort_request)
1904             goto the_end;
1905
1906         sp = &is->subpq[is->subpq_windex];
1907
1908        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1909            this packet, if any */
1910         pts = 0;
1911         if (pkt->pts != AV_NOPTS_VALUE)
1912             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1913
1914         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1915                                     &sp->sub, &got_subtitle,
1916                                     pkt);
1917 //            if (len1 < 0)
1918 //                break;
1919         if (got_subtitle && sp->sub.format == 0) {
1920             sp->pts = pts;
1921
1922             for (i = 0; i < sp->sub.num_rects; i++)
1923             {
1924                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1925                 {
1926                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1927                     y = RGB_TO_Y_CCIR(r, g, b);
1928                     u = RGB_TO_U_CCIR(r, g, b, 0);
1929                     v = RGB_TO_V_CCIR(r, g, b, 0);
1930                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1931                 }
1932             }
1933
1934             /* now we can update the picture count */
1935             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1936                 is->subpq_windex = 0;
1937             SDL_LockMutex(is->subpq_mutex);
1938             is->subpq_size++;
1939             SDL_UnlockMutex(is->subpq_mutex);
1940         }
1941         av_free_packet(pkt);
1942 //        if (step)
1943 //            if (cur_stream)
1944 //                stream_toggle_pause(cur_stream);
1945     }
1946  the_end:
1947     return 0;
1948 }
1949
1950 /* copy samples for viewing in editor window */
1951 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1952 {
1953     int size, len, channels;
1954
1955     channels = is->audio_st->codec->channels;
1956
1957     size = samples_size / sizeof(short);
1958     while (size > 0) {
1959         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1960         if (len > size)
1961             len = size;
1962         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1963         samples += len;
1964         is->sample_array_index += len;
1965         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1966             is->sample_array_index = 0;
1967         size -= len;
1968     }
1969 }
1970
1971 /* return the new audio buffer size (samples can be added or deleted
1972    to get better sync if video or external master clock) */
1973 static int synchronize_audio(VideoState *is, short *samples,
1974                              int samples_size1, double pts)
1975 {
1976     int n, samples_size;
1977     double ref_clock;
1978
1979     n = 2 * is->audio_st->codec->channels;
1980     samples_size = samples_size1;
1981
1982     /* if not master, then we try to remove or add samples to correct the clock */
1983     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1984          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1985         double diff, avg_diff;
1986         int wanted_size, min_size, max_size, nb_samples;
1987
1988         ref_clock = get_master_clock(is);
1989         diff = get_audio_clock(is) - ref_clock;
1990
1991         if (diff < AV_NOSYNC_THRESHOLD) {
1992             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1993             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1994                 /* not enough measures to have a correct estimate */
1995                 is->audio_diff_avg_count++;
1996             } else {
1997                 /* estimate the A-V difference */
1998                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1999
2000                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2001                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2002                     nb_samples = samples_size / n;
2003
2004                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2005                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2006                     if (wanted_size < min_size)
2007                         wanted_size = min_size;
2008                     else if (wanted_size > max_size)
2009                         wanted_size = max_size;
2010
2011                     /* add or remove samples to correction the synchro */
2012                     if (wanted_size < samples_size) {
2013                         /* remove samples */
2014                         samples_size = wanted_size;
2015                     } else if (wanted_size > samples_size) {
2016                         uint8_t *samples_end, *q;
2017                         int nb;
2018
2019                         /* add samples */
2020                         nb = (samples_size - wanted_size);
2021                         samples_end = (uint8_t *)samples + samples_size - n;
2022                         q = samples_end + n;
2023                         while (nb > 0) {
2024                             memcpy(q, samples_end, n);
2025                             q += n;
2026                             nb -= n;
2027                         }
2028                         samples_size = wanted_size;
2029                     }
2030                 }
2031 #if 0
2032                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2033                        diff, avg_diff, samples_size - samples_size1,
2034                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2035 #endif
2036             }
2037         } else {
2038             /* too big difference : may be initial PTS errors, so
2039                reset A-V filter */
2040             is->audio_diff_avg_count = 0;
2041             is->audio_diff_cum = 0;
2042         }
2043     }
2044
2045     return samples_size;
2046 }
2047
2048 /* decode one audio frame and returns its uncompressed size */
2049 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2050 {
2051     AVPacket *pkt_temp = &is->audio_pkt_temp;
2052     AVPacket *pkt = &is->audio_pkt;
2053     AVCodecContext *dec= is->audio_st->codec;
2054     int n, len1, data_size;
2055     double pts;
2056
2057     for(;;) {
2058         /* NOTE: the audio packet can contain several frames */
2059         while (pkt_temp->size > 0) {
2060             data_size = sizeof(is->audio_buf1);
2061             len1 = avcodec_decode_audio3(dec,
2062                                         (int16_t *)is->audio_buf1, &data_size,
2063                                         pkt_temp);
2064             if (len1 < 0) {
2065                 /* if error, we skip the frame */
2066                 pkt_temp->size = 0;
2067                 break;
2068             }
2069
2070             pkt_temp->data += len1;
2071             pkt_temp->size -= len1;
2072             if (data_size <= 0)
2073                 continue;
2074
2075             if (dec->sample_fmt != is->audio_src_fmt) {
2076                 if (is->reformat_ctx)
2077                     av_audio_convert_free(is->reformat_ctx);
2078                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2079                                                          dec->sample_fmt, 1, NULL, 0);
2080                 if (!is->reformat_ctx) {
2081                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2082                         av_get_sample_fmt_name(dec->sample_fmt),
2083                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2084                         break;
2085                 }
2086                 is->audio_src_fmt= dec->sample_fmt;
2087             }
2088
2089             if (is->reformat_ctx) {
2090                 const void *ibuf[6]= {is->audio_buf1};
2091                 void *obuf[6]= {is->audio_buf2};
2092                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2093                 int ostride[6]= {2};
2094                 int len= data_size/istride[0];
2095                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2096                     printf("av_audio_convert() failed\n");
2097                     break;
2098                 }
2099                 is->audio_buf= is->audio_buf2;
2100                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2101                           remove this legacy cruft */
2102                 data_size= len*2;
2103             }else{
2104                 is->audio_buf= is->audio_buf1;
2105             }
2106
2107             /* if no pts, then compute it */
2108             pts = is->audio_clock;
2109             *pts_ptr = pts;
2110             n = 2 * dec->channels;
2111             is->audio_clock += (double)data_size /
2112                 (double)(n * dec->sample_rate);
2113 #if defined(DEBUG_SYNC)
2114             {
2115                 static double last_clock;
2116                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2117                        is->audio_clock - last_clock,
2118                        is->audio_clock, pts);
2119                 last_clock = is->audio_clock;
2120             }
2121 #endif
2122             return data_size;
2123         }
2124
2125         /* free the current packet */
2126         if (pkt->data)
2127             av_free_packet(pkt);
2128
2129         if (is->paused || is->audioq.abort_request) {
2130             return -1;
2131         }
2132
2133         /* read next packet */
2134         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2135             return -1;
2136         if(pkt->data == flush_pkt.data){
2137             avcodec_flush_buffers(dec);
2138             continue;
2139         }
2140
2141         pkt_temp->data = pkt->data;
2142         pkt_temp->size = pkt->size;
2143
2144         /* if update the audio clock with the pts */
2145         if (pkt->pts != AV_NOPTS_VALUE) {
2146             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2147         }
2148     }
2149 }
2150
2151 /* prepare a new audio buffer */
2152 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2153 {
2154     VideoState *is = opaque;
2155     int audio_size, len1;
2156     double pts;
2157
2158     audio_callback_time = av_gettime();
2159
2160     while (len > 0) {
2161         if (is->audio_buf_index >= is->audio_buf_size) {
2162            audio_size = audio_decode_frame(is, &pts);
2163            if (audio_size < 0) {
2164                 /* if error, just output silence */
2165                is->audio_buf = is->audio_buf1;
2166                is->audio_buf_size = 1024;
2167                memset(is->audio_buf, 0, is->audio_buf_size);
2168            } else {
2169                if (is->show_mode != SHOW_MODE_VIDEO)
2170                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2171                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2172                                               pts);
2173                is->audio_buf_size = audio_size;
2174            }
2175            is->audio_buf_index = 0;
2176         }
2177         len1 = is->audio_buf_size - is->audio_buf_index;
2178         if (len1 > len)
2179             len1 = len;
2180         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2181         len -= len1;
2182         stream += len1;
2183         is->audio_buf_index += len1;
2184     }
2185 }
2186
2187 /* open a given stream. Return 0 if OK */
2188 static int stream_component_open(VideoState *is, int stream_index)
2189 {
2190     AVFormatContext *ic = is->ic;
2191     AVCodecContext *avctx;
2192     AVCodec *codec;
2193     SDL_AudioSpec wanted_spec, spec;
2194
2195     if (stream_index < 0 || stream_index >= ic->nb_streams)
2196         return -1;
2197     avctx = ic->streams[stream_index]->codec;
2198
2199     /* prepare audio output */
2200     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2201         if (avctx->channels > 0) {
2202             avctx->request_channels = FFMIN(2, avctx->channels);
2203         } else {
2204             avctx->request_channels = 2;
2205         }
2206     }
2207
2208     codec = avcodec_find_decoder(avctx->codec_id);
2209     avctx->debug_mv = debug_mv;
2210     avctx->debug = debug;
2211     avctx->workaround_bugs = workaround_bugs;
2212     avctx->lowres = lowres;
2213     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2214     avctx->idct_algo= idct;
2215     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2216     avctx->skip_frame= skip_frame;
2217     avctx->skip_idct= skip_idct;
2218     avctx->skip_loop_filter= skip_loop_filter;
2219     avctx->error_recognition= error_recognition;
2220     avctx->error_concealment= error_concealment;
2221     avctx->thread_count= thread_count;
2222
2223     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2224
2225     if (!codec ||
2226         avcodec_open(avctx, codec) < 0)
2227         return -1;
2228
2229     /* prepare audio output */
2230     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2231         wanted_spec.freq = avctx->sample_rate;
2232         wanted_spec.format = AUDIO_S16SYS;
2233         wanted_spec.channels = avctx->channels;
2234         wanted_spec.silence = 0;
2235         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2236         wanted_spec.callback = sdl_audio_callback;
2237         wanted_spec.userdata = is;
2238         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2239             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2240             return -1;
2241         }
2242         is->audio_hw_buf_size = spec.size;
2243         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2244     }
2245
2246     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2247     switch(avctx->codec_type) {
2248     case AVMEDIA_TYPE_AUDIO:
2249         is->audio_stream = stream_index;
2250         is->audio_st = ic->streams[stream_index];
2251         is->audio_buf_size = 0;
2252         is->audio_buf_index = 0;
2253
2254         /* init averaging filter */
2255         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2256         is->audio_diff_avg_count = 0;
2257         /* since we do not have a precise anough audio fifo fullness,
2258            we correct audio sync only if larger than this threshold */
2259         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2260
2261         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2262         packet_queue_init(&is->audioq);
2263         SDL_PauseAudio(0);
2264         break;
2265     case AVMEDIA_TYPE_VIDEO:
2266         is->video_stream = stream_index;
2267         is->video_st = ic->streams[stream_index];
2268
2269 //        is->video_current_pts_time = av_gettime();
2270
2271         packet_queue_init(&is->videoq);
2272         is->video_tid = SDL_CreateThread(video_thread, is);
2273         break;
2274     case AVMEDIA_TYPE_SUBTITLE:
2275         is->subtitle_stream = stream_index;
2276         is->subtitle_st = ic->streams[stream_index];
2277         packet_queue_init(&is->subtitleq);
2278
2279         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2280         break;
2281     default:
2282         break;
2283     }
2284     return 0;
2285 }
2286
2287 static void stream_component_close(VideoState *is, int stream_index)
2288 {
2289     AVFormatContext *ic = is->ic;
2290     AVCodecContext *avctx;
2291
2292     if (stream_index < 0 || stream_index >= ic->nb_streams)
2293         return;
2294     avctx = ic->streams[stream_index]->codec;
2295
2296     switch(avctx->codec_type) {
2297     case AVMEDIA_TYPE_AUDIO:
2298         packet_queue_abort(&is->audioq);
2299
2300         SDL_CloseAudio();
2301
2302         packet_queue_end(&is->audioq);
2303         if (is->reformat_ctx)
2304             av_audio_convert_free(is->reformat_ctx);
2305         is->reformat_ctx = NULL;
2306         break;
2307     case AVMEDIA_TYPE_VIDEO:
2308         packet_queue_abort(&is->videoq);
2309
2310         /* note: we also signal this mutex to make sure we deblock the
2311            video thread in all cases */
2312         SDL_LockMutex(is->pictq_mutex);
2313         SDL_CondSignal(is->pictq_cond);
2314         SDL_UnlockMutex(is->pictq_mutex);
2315
2316         SDL_WaitThread(is->video_tid, NULL);
2317
2318         packet_queue_end(&is->videoq);
2319         break;
2320     case AVMEDIA_TYPE_SUBTITLE:
2321         packet_queue_abort(&is->subtitleq);
2322
2323         /* note: we also signal this mutex to make sure we deblock the
2324            video thread in all cases */
2325         SDL_LockMutex(is->subpq_mutex);
2326         is->subtitle_stream_changed = 1;
2327
2328         SDL_CondSignal(is->subpq_cond);
2329         SDL_UnlockMutex(is->subpq_mutex);
2330
2331         SDL_WaitThread(is->subtitle_tid, NULL);
2332
2333         packet_queue_end(&is->subtitleq);
2334         break;
2335     default:
2336         break;
2337     }
2338
2339     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2340     avcodec_close(avctx);
2341     switch(avctx->codec_type) {
2342     case AVMEDIA_TYPE_AUDIO:
2343         is->audio_st = NULL;
2344         is->audio_stream = -1;
2345         break;
2346     case AVMEDIA_TYPE_VIDEO:
2347         is->video_st = NULL;
2348         is->video_stream = -1;
2349         break;
2350     case AVMEDIA_TYPE_SUBTITLE:
2351         is->subtitle_st = NULL;
2352         is->subtitle_stream = -1;
2353         break;
2354     default:
2355         break;
2356     }
2357 }
2358
2359 /* since we have only one decoding thread, we can use a global
2360    variable instead of a thread local variable */
2361 static VideoState *global_video_state;
2362
2363 static int decode_interrupt_cb(void)
2364 {
2365     return (global_video_state && global_video_state->abort_request);
2366 }
2367
2368 /* this thread gets the stream from the disk or the network */
2369 static int decode_thread(void *arg)
2370 {
2371     VideoState *is = arg;
2372     AVFormatContext *ic;
2373     int err, i, ret;
2374     int st_index[AVMEDIA_TYPE_NB];
2375     AVPacket pkt1, *pkt = &pkt1;
2376     AVFormatParameters params, *ap = &params;
2377     int eof=0;
2378     int pkt_in_play_range = 0;
2379
2380     ic = avformat_alloc_context();
2381
2382     memset(st_index, -1, sizeof(st_index));
2383     is->video_stream = -1;
2384     is->audio_stream = -1;
2385     is->subtitle_stream = -1;
2386
2387     global_video_state = is;
2388     avio_set_interrupt_cb(decode_interrupt_cb);
2389
2390     memset(ap, 0, sizeof(*ap));
2391
2392     ap->prealloced_context = 1;
2393     ap->width = frame_width;
2394     ap->height= frame_height;
2395     ap->time_base= (AVRational){1, 25};
2396     ap->pix_fmt = frame_pix_fmt;
2397
2398     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2399
2400     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2401     if (err < 0) {
2402         print_error(is->filename, err);
2403         ret = -1;
2404         goto fail;
2405     }
2406     is->ic = ic;
2407
2408     if(genpts)
2409         ic->flags |= AVFMT_FLAG_GENPTS;
2410
2411     err = av_find_stream_info(ic);
2412     if (err < 0) {
2413         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2414         ret = -1;
2415         goto fail;
2416     }
2417     if(ic->pb)
2418         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2419
2420     if(seek_by_bytes<0)
2421         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2422
2423     /* if seeking requested, we execute it */
2424     if (start_time != AV_NOPTS_VALUE) {
2425         int64_t timestamp;
2426
2427         timestamp = start_time;
2428         /* add the stream start time */
2429         if (ic->start_time != AV_NOPTS_VALUE)
2430             timestamp += ic->start_time;
2431         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2432         if (ret < 0) {
2433             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2434                     is->filename, (double)timestamp / AV_TIME_BASE);
2435         }
2436     }
2437
2438     for (i = 0; i < ic->nb_streams; i++)
2439         ic->streams[i]->discard = AVDISCARD_ALL;
2440     if (!video_disable)
2441         st_index[AVMEDIA_TYPE_VIDEO] =
2442             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2443                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2444     if (!audio_disable)
2445         st_index[AVMEDIA_TYPE_AUDIO] =
2446             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2447                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2448                                 st_index[AVMEDIA_TYPE_VIDEO],
2449                                 NULL, 0);
2450     if (!video_disable)
2451         st_index[AVMEDIA_TYPE_SUBTITLE] =
2452             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2453                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2454                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2455                                  st_index[AVMEDIA_TYPE_AUDIO] :
2456                                  st_index[AVMEDIA_TYPE_VIDEO]),
2457                                 NULL, 0);
2458     if (show_status) {
2459         av_dump_format(ic, 0, is->filename, 0);
2460     }
2461
2462     is->show_mode = show_mode;
2463
2464     /* open the streams */
2465     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2466         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2467     }
2468
2469     ret=-1;
2470     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2471         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2472     }
2473     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2474     if(ret<0) {
2475         if (!display_disable)
2476             is->show_mode = SHOW_MODE_RDFT;
2477     }
2478
2479     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2480         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2481     }
2482
2483     if (is->video_stream < 0 && is->audio_stream < 0) {
2484         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2485         ret = -1;
2486         goto fail;
2487     }
2488
2489     for(;;) {
2490         if (is->abort_request)
2491             break;
2492         if (is->paused != is->last_paused) {
2493             is->last_paused = is->paused;
2494             if (is->paused)
2495                 is->read_pause_return= av_read_pause(ic);
2496             else
2497                 av_read_play(ic);
2498         }
2499 #if CONFIG_RTSP_DEMUXER
2500         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2501             /* wait 10 ms to avoid trying to get another packet */
2502             /* XXX: horrible */
2503             SDL_Delay(10);
2504             continue;
2505         }
2506 #endif
2507         if (is->seek_req) {
2508             int64_t seek_target= is->seek_pos;
2509             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2510             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2511 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2512 //      of the seek_pos/seek_rel variables
2513
2514             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2515             if (ret < 0) {
2516                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2517             }else{
2518                 if (is->audio_stream >= 0) {
2519                     packet_queue_flush(&is->audioq);
2520                     packet_queue_put(&is->audioq, &flush_pkt);
2521                 }
2522                 if (is->subtitle_stream >= 0) {
2523                     packet_queue_flush(&is->subtitleq);
2524                     packet_queue_put(&is->subtitleq, &flush_pkt);
2525                 }
2526                 if (is->video_stream >= 0) {
2527                     packet_queue_flush(&is->videoq);
2528                     packet_queue_put(&is->videoq, &flush_pkt);
2529                 }
2530             }
2531             is->seek_req = 0;
2532             eof= 0;
2533         }
2534
2535         /* if the queue are full, no need to read more */
2536         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2537             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2538                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2539                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2540             /* wait 10 ms */
2541             SDL_Delay(10);
2542             continue;
2543         }
2544         if(eof) {
2545             if(is->video_stream >= 0){
2546                 av_init_packet(pkt);
2547                 pkt->data=NULL;
2548                 pkt->size=0;
2549                 pkt->stream_index= is->video_stream;
2550                 packet_queue_put(&is->videoq, pkt);
2551             }
2552             SDL_Delay(10);
2553             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2554                 if(loop!=1 && (!loop || --loop)){
2555                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2556                 }else if(autoexit){
2557                     ret=AVERROR_EOF;
2558                     goto fail;
2559                 }
2560             }
2561             eof=0;
2562             continue;
2563         }
2564         ret = av_read_frame(ic, pkt);
2565         if (ret < 0) {
2566             if (ret == AVERROR_EOF || url_feof(ic->pb))
2567                 eof=1;
2568             if (ic->pb && ic->pb->error)
2569                 break;
2570             SDL_Delay(100); /* wait for user event */
2571             continue;
2572         }
2573         /* check if packet is in play range specified by user, then queue, otherwise discard */
2574         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2575                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2576                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2577                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2578                 <= ((double)duration/1000000);
2579         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2580             packet_queue_put(&is->audioq, pkt);
2581         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2582             packet_queue_put(&is->videoq, pkt);
2583         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2584             packet_queue_put(&is->subtitleq, pkt);
2585         } else {
2586             av_free_packet(pkt);
2587         }
2588     }
2589     /* wait until the end */
2590     while (!is->abort_request) {
2591         SDL_Delay(100);
2592     }
2593
2594     ret = 0;
2595  fail:
2596     /* disable interrupting */
2597     global_video_state = NULL;
2598
2599     /* close each stream */
2600     if (is->audio_stream >= 0)
2601         stream_component_close(is, is->audio_stream);
2602     if (is->video_stream >= 0)
2603         stream_component_close(is, is->video_stream);
2604     if (is->subtitle_stream >= 0)
2605         stream_component_close(is, is->subtitle_stream);
2606     if (is->ic) {
2607         av_close_input_file(is->ic);
2608         is->ic = NULL; /* safety */
2609     }
2610     avio_set_interrupt_cb(NULL);
2611
2612     if (ret != 0) {
2613         SDL_Event event;
2614
2615         event.type = FF_QUIT_EVENT;
2616         event.user.data1 = is;
2617         SDL_PushEvent(&event);
2618     }
2619     return 0;
2620 }
2621
2622 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2623 {
2624     VideoState *is;
2625
2626     is = av_mallocz(sizeof(VideoState));
2627     if (!is)
2628         return NULL;
2629     av_strlcpy(is->filename, filename, sizeof(is->filename));
2630     is->iformat = iformat;
2631     is->ytop = 0;
2632     is->xleft = 0;
2633
2634     /* start video display */
2635     is->pictq_mutex = SDL_CreateMutex();
2636     is->pictq_cond = SDL_CreateCond();
2637
2638     is->subpq_mutex = SDL_CreateMutex();
2639     is->subpq_cond = SDL_CreateCond();
2640
2641     is->av_sync_type = av_sync_type;
2642     is->parse_tid = SDL_CreateThread(decode_thread, is);
2643     if (!is->parse_tid) {
2644         av_free(is);
2645         return NULL;
2646     }
2647     return is;
2648 }
2649
2650 static void stream_cycle_channel(VideoState *is, int codec_type)
2651 {
2652     AVFormatContext *ic = is->ic;
2653     int start_index, stream_index;
2654     AVStream *st;
2655
2656     if (codec_type == AVMEDIA_TYPE_VIDEO)
2657         start_index = is->video_stream;
2658     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2659         start_index = is->audio_stream;
2660     else
2661         start_index = is->subtitle_stream;
2662     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2663         return;
2664     stream_index = start_index;
2665     for(;;) {
2666         if (++stream_index >= is->ic->nb_streams)
2667         {
2668             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2669             {
2670                 stream_index = -1;
2671                 goto the_end;
2672             } else
2673                 stream_index = 0;
2674         }
2675         if (stream_index == start_index)
2676             return;
2677         st = ic->streams[stream_index];
2678         if (st->codec->codec_type == codec_type) {
2679             /* check that parameters are OK */
2680             switch(codec_type) {
2681             case AVMEDIA_TYPE_AUDIO:
2682                 if (st->codec->sample_rate != 0 &&
2683                     st->codec->channels != 0)
2684                     goto the_end;
2685                 break;
2686             case AVMEDIA_TYPE_VIDEO:
2687             case AVMEDIA_TYPE_SUBTITLE:
2688                 goto the_end;
2689             default:
2690                 break;
2691             }
2692         }
2693     }
2694  the_end:
2695     stream_component_close(is, start_index);
2696     stream_component_open(is, stream_index);
2697 }
2698
2699
2700 static void toggle_full_screen(void)
2701 {
2702     is_full_screen = !is_full_screen;
2703     if (!fs_screen_width) {
2704         /* use default SDL method */
2705 //        SDL_WM_ToggleFullScreen(screen);
2706     }
2707     video_open(cur_stream);
2708 }
2709
2710 static void toggle_pause(void)
2711 {
2712     if (cur_stream)
2713         stream_toggle_pause(cur_stream);
2714     step = 0;
2715 }
2716
2717 static void step_to_next_frame(void)
2718 {
2719     if (cur_stream) {
2720         /* if the stream is paused unpause it, then step */
2721         if (cur_stream->paused)
2722             stream_toggle_pause(cur_stream);
2723     }
2724     step = 1;
2725 }
2726
2727 static void toggle_audio_display(void)
2728 {
2729     if (cur_stream) {
2730         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2731         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2732         fill_rectangle(screen,
2733                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2734                     bgcolor);
2735         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2736     }
2737 }
2738
2739 /* handle an event sent by the GUI */
2740 static void event_loop(void)
2741 {
2742     SDL_Event event;
2743     double incr, pos, frac;
2744
2745     for(;;) {
2746         double x;
2747         SDL_WaitEvent(&event);
2748         switch(event.type) {
2749         case SDL_KEYDOWN:
2750             if (exit_on_keydown) {
2751                 do_exit();
2752                 break;
2753             }
2754             switch(event.key.keysym.sym) {
2755             case SDLK_ESCAPE:
2756             case SDLK_q:
2757                 do_exit();
2758                 break;
2759             case SDLK_f:
2760                 toggle_full_screen();
2761                 break;
2762             case SDLK_p:
2763             case SDLK_SPACE:
2764                 toggle_pause();
2765                 break;
2766             case SDLK_s: //S: Step to next frame
2767                 step_to_next_frame();
2768                 break;
2769             case SDLK_a:
2770                 if (cur_stream)
2771                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2772                 break;
2773             case SDLK_v:
2774                 if (cur_stream)
2775                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2776                 break;
2777             case SDLK_t:
2778                 if (cur_stream)
2779                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2780                 break;
2781             case SDLK_w:
2782                 toggle_audio_display();
2783                 break;
2784             case SDLK_LEFT:
2785                 incr = -10.0;
2786                 goto do_seek;
2787             case SDLK_RIGHT:
2788                 incr = 10.0;
2789                 goto do_seek;
2790             case SDLK_UP:
2791                 incr = 60.0;
2792                 goto do_seek;
2793             case SDLK_DOWN:
2794                 incr = -60.0;
2795             do_seek:
2796                 if (cur_stream) {
2797                     if (seek_by_bytes) {
2798                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2799                             pos= cur_stream->video_current_pos;
2800                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2801                             pos= cur_stream->audio_pkt.pos;
2802                         }else
2803                             pos = avio_tell(cur_stream->ic->pb);
2804                         if (cur_stream->ic->bit_rate)
2805                             incr *= cur_stream->ic->bit_rate / 8.0;
2806                         else
2807                             incr *= 180000.0;
2808                         pos += incr;
2809                         stream_seek(cur_stream, pos, incr, 1);
2810                     } else {
2811                         pos = get_master_clock(cur_stream);
2812                         pos += incr;
2813                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2814                     }
2815                 }
2816                 break;
2817             default:
2818                 break;
2819             }
2820             break;
2821         case SDL_MOUSEBUTTONDOWN:
2822             if (exit_on_mousedown) {
2823                 do_exit();
2824                 break;
2825             }
2826         case SDL_MOUSEMOTION:
2827             if(event.type ==SDL_MOUSEBUTTONDOWN){
2828                 x= event.button.x;
2829             }else{
2830                 if(event.motion.state != SDL_PRESSED)
2831                     break;
2832                 x= event.motion.x;
2833             }
2834             if (cur_stream) {
2835                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2836                     uint64_t size=  avio_size(cur_stream->ic->pb);
2837                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2838                 }else{
2839                     int64_t ts;
2840                     int ns, hh, mm, ss;
2841                     int tns, thh, tmm, tss;
2842                     tns = cur_stream->ic->duration/1000000LL;
2843                     thh = tns/3600;
2844                     tmm = (tns%3600)/60;
2845                     tss = (tns%60);
2846                     frac = x/cur_stream->width;
2847                     ns = frac*tns;
2848                     hh = ns/3600;
2849                     mm = (ns%3600)/60;
2850                     ss = (ns%60);
2851                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2852                             hh, mm, ss, thh, tmm, tss);
2853                     ts = frac*cur_stream->ic->duration;
2854                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2855                         ts += cur_stream->ic->start_time;
2856                     stream_seek(cur_stream, ts, 0, 0);
2857                 }
2858             }
2859             break;
2860         case SDL_VIDEORESIZE:
2861             if (cur_stream) {
2862                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2863                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2864                 screen_width = cur_stream->width = event.resize.w;
2865                 screen_height= cur_stream->height= event.resize.h;
2866             }
2867             break;
2868         case SDL_QUIT:
2869         case FF_QUIT_EVENT:
2870             do_exit();
2871             break;
2872         case FF_ALLOC_EVENT:
2873             video_open(event.user.data1);
2874             alloc_picture(event.user.data1);
2875             break;
2876         case FF_REFRESH_EVENT:
2877             video_refresh_timer(event.user.data1);
2878             cur_stream->refresh=0;
2879             break;
2880         default:
2881             break;
2882         }
2883     }
2884 }
2885
2886 static void opt_frame_size(const char *arg)
2887 {
2888     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2889         fprintf(stderr, "Incorrect frame size\n");
2890         exit(1);
2891     }
2892     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2893         fprintf(stderr, "Frame size must be a multiple of 2\n");
2894         exit(1);
2895     }
2896 }
2897
2898 static int opt_width(const char *opt, const char *arg)
2899 {
2900     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2901     return 0;
2902 }
2903
2904 static int opt_height(const char *opt, const char *arg)
2905 {
2906     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2907     return 0;
2908 }
2909
2910 static void opt_format(const char *arg)
2911 {
2912     file_iformat = av_find_input_format(arg);
2913     if (!file_iformat) {
2914         fprintf(stderr, "Unknown input format: %s\n", arg);
2915         exit(1);
2916     }
2917 }
2918
2919 static void opt_frame_pix_fmt(const char *arg)
2920 {
2921     frame_pix_fmt = av_get_pix_fmt(arg);
2922 }
2923
2924 static int opt_sync(const char *opt, const char *arg)
2925 {
2926     if (!strcmp(arg, "audio"))
2927         av_sync_type = AV_SYNC_AUDIO_MASTER;
2928     else if (!strcmp(arg, "video"))
2929         av_sync_type = AV_SYNC_VIDEO_MASTER;
2930     else if (!strcmp(arg, "ext"))
2931         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2932     else {
2933         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2934         exit(1);
2935     }
2936     return 0;
2937 }
2938
2939 static int opt_seek(const char *opt, const char *arg)
2940 {
2941     start_time = parse_time_or_die(opt, arg, 1);
2942     return 0;
2943 }
2944
2945 static int opt_duration(const char *opt, const char *arg)
2946 {
2947     duration = parse_time_or_die(opt, arg, 1);
2948     return 0;
2949 }
2950
2951 static int opt_debug(const char *opt, const char *arg)
2952 {
2953     av_log_set_level(99);
2954     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2955     return 0;
2956 }
2957
2958 static int opt_vismv(const char *opt, const char *arg)
2959 {
2960     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2961     return 0;
2962 }
2963
2964 static int opt_thread_count(const char *opt, const char *arg)
2965 {
2966     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2967 #if !HAVE_THREADS
2968     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2969 #endif
2970     return 0;
2971 }
2972
2973 static int opt_show_mode(const char *opt, const char *arg)
2974 {
2975     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2976                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2977                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2978                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2979     return 0;
2980 }
2981
2982 static const OptionDef options[] = {
2983 #include "cmdutils_common_opts.h"
2984     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2985     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2986     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2987     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2988     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2989     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2990     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2991     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2992     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2993     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2994     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2995     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2996     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2997     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2998     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2999     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3000     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3001     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3002     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3003     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3004     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3005     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3006     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3007     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3008     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3009     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3010     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3011     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3012     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3013     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3014     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3015     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3016     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3017     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3018     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3019     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3020     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3021 #if CONFIG_AVFILTER
3022     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3023 #endif
3024     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3025     { "showmode", HAS_ARG | OPT_FUNC2, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3026     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3027     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3028     { NULL, },
3029 };
3030
3031 static void show_usage(void)
3032 {
3033     printf("Simple media player\n");
3034     printf("usage: ffplay [options] input_file\n");
3035     printf("\n");
3036 }
3037
3038 static void show_help(void)
3039 {
3040     av_log_set_callback(log_callback_help);
3041     show_usage();
3042     show_help_options(options, "Main options:\n",
3043                       OPT_EXPERT, 0);
3044     show_help_options(options, "\nAdvanced options:\n",
3045                       OPT_EXPERT, OPT_EXPERT);
3046     printf("\n");
3047     av_opt_show2(avcodec_opts[0], NULL,
3048                  AV_OPT_FLAG_DECODING_PARAM, 0);
3049     printf("\n");
3050     av_opt_show2(avformat_opts, NULL,
3051                  AV_OPT_FLAG_DECODING_PARAM, 0);
3052 #if !CONFIG_AVFILTER
3053     printf("\n");
3054     av_opt_show2(sws_opts, NULL,
3055                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3056 #endif
3057     printf("\nWhile playing:\n"
3058            "q, ESC              quit\n"
3059            "f                   toggle full screen\n"
3060            "p, SPC              pause\n"
3061            "a                   cycle audio channel\n"
3062            "v                   cycle video channel\n"
3063            "t                   cycle subtitle channel\n"
3064            "w                   show audio waves\n"
3065            "s                   activate frame-step mode\n"
3066            "left/right          seek backward/forward 10 seconds\n"
3067            "down/up             seek backward/forward 1 minute\n"
3068            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3069            );
3070 }
3071
3072 static void opt_input_file(const char *filename)
3073 {
3074     if (input_filename) {
3075         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3076                 filename, input_filename);
3077         exit(1);
3078     }
3079     if (!strcmp(filename, "-"))
3080         filename = "pipe:";
3081     input_filename = filename;
3082 }
3083
3084 /* Called from the main */
3085 int main(int argc, char **argv)
3086 {
3087     int flags;
3088
3089     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3090
3091     /* register all codecs, demux and protocols */
3092     avcodec_register_all();
3093 #if CONFIG_AVDEVICE
3094     avdevice_register_all();
3095 #endif
3096 #if CONFIG_AVFILTER
3097     avfilter_register_all();
3098 #endif
3099     av_register_all();
3100
3101     init_opts();
3102
3103     show_banner();
3104
3105     parse_options(argc, argv, options, opt_input_file);
3106
3107     if (!input_filename) {
3108         show_usage();
3109         fprintf(stderr, "An input file must be specified\n");
3110         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3111         exit(1);
3112     }
3113
3114     if (display_disable) {
3115         video_disable = 1;
3116     }
3117     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3118 #if !defined(__MINGW32__) && !defined(__APPLE__)
3119     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3120 #endif
3121     if (SDL_Init (flags)) {
3122         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3123         exit(1);
3124     }
3125
3126     if (!display_disable) {
3127 #if HAVE_SDL_VIDEO_SIZE
3128         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3129         fs_screen_width = vi->current_w;
3130         fs_screen_height = vi->current_h;
3131 #endif
3132     }
3133
3134     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3135     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3136     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3137
3138     av_init_packet(&flush_pkt);
3139     flush_pkt.data= "FLUSH";
3140
3141     cur_stream = stream_open(input_filename, file_iformat);
3142
3143     event_loop();
3144
3145     /* never returns */
3146
3147     return 0;
3148 }