]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: rename stream_pause() to stream_toggle_pause()
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 #endif
46
47 #include "cmdutils.h"
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #ifdef __MINGW32__
53 #undef main /* We don't want SDL to override our main() */
54 #endif
55
56 #include <unistd.h>
57 #include <assert.h>
58
59 const char program_name[] = "FFplay";
60 const int program_birth_year = 2003;
61
62 //#define DEBUG
63 //#define DEBUG_SYNC
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2*65536)
88
89 static int sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;                                  ///<presentation time stamp for this picture
105     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106     int64_t pos;                                 ///<byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *parse_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     enum {
171         SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
172     } show_mode;
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///<current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213 #if CONFIG_AVFILTER
214     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
215 #endif
216
217     float skip_frames;
218     float skip_frames_index;
219     int refresh;
220 } VideoState;
221
222 static void show_help(void);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268 static int show_mode = SHOW_MODE_VIDEO;
269
270 static int rdftspeed=20;
271 #if CONFIG_AVFILTER
272 static char *vfilters = NULL;
273 #endif
274
275 /* current context */
276 static int is_full_screen;
277 static VideoState *cur_stream;
278 static int64_t audio_callback_time;
279
280 static AVPacket flush_pkt;
281
282 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
283 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
284 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
285
286 static SDL_Surface *screen;
287
288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
289 {
290     AVPacketList *pkt1;
291
292     /* duplicate the packet */
293     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
294         return -1;
295
296     pkt1 = av_malloc(sizeof(AVPacketList));
297     if (!pkt1)
298         return -1;
299     pkt1->pkt = *pkt;
300     pkt1->next = NULL;
301
302
303     SDL_LockMutex(q->mutex);
304
305     if (!q->last_pkt)
306
307         q->first_pkt = pkt1;
308     else
309         q->last_pkt->next = pkt1;
310     q->last_pkt = pkt1;
311     q->nb_packets++;
312     q->size += pkt1->pkt.size + sizeof(*pkt1);
313     /* XXX: should duplicate packet data in DV case */
314     SDL_CondSignal(q->cond);
315
316     SDL_UnlockMutex(q->mutex);
317     return 0;
318 }
319
320 /* packet queue handling */
321 static void packet_queue_init(PacketQueue *q)
322 {
323     memset(q, 0, sizeof(PacketQueue));
324     q->mutex = SDL_CreateMutex();
325     q->cond = SDL_CreateCond();
326     packet_queue_put(q, &flush_pkt);
327 }
328
329 static void packet_queue_flush(PacketQueue *q)
330 {
331     AVPacketList *pkt, *pkt1;
332
333     SDL_LockMutex(q->mutex);
334     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
335         pkt1 = pkt->next;
336         av_free_packet(&pkt->pkt);
337         av_freep(&pkt);
338     }
339     q->last_pkt = NULL;
340     q->first_pkt = NULL;
341     q->nb_packets = 0;
342     q->size = 0;
343     SDL_UnlockMutex(q->mutex);
344 }
345
346 static void packet_queue_end(PacketQueue *q)
347 {
348     packet_queue_flush(q);
349     SDL_DestroyMutex(q->mutex);
350     SDL_DestroyCond(q->cond);
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for(;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #if 0
412 /* draw only the border of a rectangle */
413 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414 {
415     int w1, w2, h1, h2;
416
417     /* fill the background */
418     w1 = x;
419     if (w1 < 0)
420         w1 = 0;
421     w2 = s->width - (x + w);
422     if (w2 < 0)
423         w2 = 0;
424     h1 = y;
425     if (h1 < 0)
426         h1 = 0;
427     h2 = s->height - (y + h);
428     if (h2 < 0)
429         h2 = 0;
430     fill_rectangle(screen,
431                    s->xleft, s->ytop,
432                    w1, s->height,
433                    color);
434     fill_rectangle(screen,
435                    s->xleft + s->width - w2, s->ytop,
436                    w2, s->height,
437                    color);
438     fill_rectangle(screen,
439                    s->xleft + w1, s->ytop,
440                    s->width - w1 - w2, h1,
441                    color);
442     fill_rectangle(screen,
443                    s->xleft + w1, s->ytop + s->height - h2,
444                    s->width - w1 - w2, h2,
445                    color);
446 }
447 #endif
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454     unsigned int v = ((const uint32_t *)(s))[0];\
455     a = (v >> 24) & 0xff;\
456     r = (v >> 16) & 0xff;\
457     g = (v >> 8) & 0xff;\
458     b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464     a = (val >> 24) & 0xff;\
465     y = (val >> 16) & 0xff;\
466     u = (val >> 8) & 0xff;\
467     v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480     int wrap, wrap3, width2, skip2;
481     int y, u, v, a, u1, v1, a1, w, h;
482     uint8_t *lum, *cb, *cr;
483     const uint8_t *p;
484     const uint32_t *pal;
485     int dstx, dsty, dstw, dsth;
486
487     dstw = av_clip(rect->w, 0, imgw);
488     dsth = av_clip(rect->h, 0, imgh);
489     dstx = av_clip(rect->x, 0, imgw - dstw);
490     dsty = av_clip(rect->y, 0, imgh - dsth);
491     lum = dst->data[0] + dsty * dst->linesize[0];
492     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496     skip2 = dstx >> 1;
497     wrap = dst->linesize[0];
498     wrap3 = rect->pict.linesize[0];
499     p = rect->pict.data[0];
500     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501
502     if (dsty & 1) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512             cb++;
513             cr++;
514             lum++;
515             p += BPP;
516         }
517         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524             YUVA_IN(y, u, v, a, p + BPP, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += 2 * BPP;
534             lum += 2;
535         }
536         if (w) {
537             YUVA_IN(y, u, v, a, p, pal);
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541             p++;
542             lum++;
543         }
544         p += wrap3 - dstw * BPP;
545         lum += wrap - dstw - dstx;
546         cb += dst->linesize[1] - width2 - skip2;
547         cr += dst->linesize[2] - width2 - skip2;
548     }
549     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550         lum += dstx;
551         cb += skip2;
552         cr += skip2;
553
554         if (dstx & 1) {
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 = u;
557             v1 = v;
558             a1 = a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560             p += wrap3;
561             lum += wrap;
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 += u;
564             v1 += v;
565             a1 += a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569             cb++;
570             cr++;
571             p += -wrap3 + BPP;
572             lum += -wrap + 1;
573         }
574         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581             YUVA_IN(y, u, v, a, p + BPP, pal);
582             u1 += u;
583             v1 += v;
584             a1 += a;
585             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586             p += wrap3;
587             lum += wrap;
588
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595             YUVA_IN(y, u, v, a, p + BPP, pal);
596             u1 += u;
597             v1 += v;
598             a1 += a;
599             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604             cb++;
605             cr++;
606             p += -wrap3 + 2 * BPP;
607             lum += -wrap + 2;
608         }
609         if (w) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             p += wrap3;
616             lum += wrap;
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624             cb++;
625             cr++;
626             p += -wrap3 + BPP;
627             lum += -wrap + 1;
628         }
629         p += wrap3 + (wrap3 - dstw * BPP);
630         lum += wrap + (wrap - dstw - dstx);
631         cb += dst->linesize[1] - width2 - skip2;
632         cr += dst->linesize[2] - width2 - skip2;
633     }
634     /* handle odd height */
635     if (h) {
636         lum += dstx;
637         cb += skip2;
638         cr += skip2;
639
640         if (dstx & 1) {
641             YUVA_IN(y, u, v, a, p, pal);
642             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645             cb++;
646             cr++;
647             lum++;
648             p += BPP;
649         }
650         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 = u;
653             v1 = v;
654             a1 = a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657             YUVA_IN(y, u, v, a, p + BPP, pal);
658             u1 += u;
659             v1 += v;
660             a1 += a;
661             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664             cb++;
665             cr++;
666             p += 2 * BPP;
667             lum += 2;
668         }
669         if (w) {
670             YUVA_IN(y, u, v, a, p, pal);
671             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674         }
675     }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680     avsubtitle_free(&sp->sub);
681 }
682
683 static void video_image_display(VideoState *is)
684 {
685     VideoPicture *vp;
686     SubPicture *sp;
687     AVPicture pict;
688     float aspect_ratio;
689     int width, height, x, y;
690     SDL_Rect rect;
691     int i;
692
693     vp = &is->pictq[is->pictq_rindex];
694     if (vp->bmp) {
695 #if CONFIG_AVFILTER
696          if (vp->picref->video->pixel_aspect.num == 0)
697              aspect_ratio = 0;
698          else
699              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
700 #else
701
702         /* XXX: use variable in the frame */
703         if (is->video_st->sample_aspect_ratio.num)
704             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705         else if (is->video_st->codec->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707         else
708             aspect_ratio = 0;
709 #endif
710         if (aspect_ratio <= 0.0)
711             aspect_ratio = 1.0;
712         aspect_ratio *= (float)vp->width / (float)vp->height;
713
714         if (is->subtitle_st) {
715             if (is->subpq_size > 0) {
716                 sp = &is->subpq[is->subpq_rindex];
717
718                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
719                     SDL_LockYUVOverlay (vp->bmp);
720
721                     pict.data[0] = vp->bmp->pixels[0];
722                     pict.data[1] = vp->bmp->pixels[2];
723                     pict.data[2] = vp->bmp->pixels[1];
724
725                     pict.linesize[0] = vp->bmp->pitches[0];
726                     pict.linesize[1] = vp->bmp->pitches[2];
727                     pict.linesize[2] = vp->bmp->pitches[1];
728
729                     for (i = 0; i < sp->sub.num_rects; i++)
730                         blend_subrect(&pict, sp->sub.rects[i],
731                                       vp->bmp->w, vp->bmp->h);
732
733                     SDL_UnlockYUVOverlay (vp->bmp);
734                 }
735             }
736         }
737
738
739         /* XXX: we suppose the screen has a 1.0 pixel ratio */
740         height = is->height;
741         width = ((int)rint(height * aspect_ratio)) & ~1;
742         if (width > is->width) {
743             width = is->width;
744             height = ((int)rint(width / aspect_ratio)) & ~1;
745         }
746         x = (is->width - width) / 2;
747         y = (is->height - height) / 2;
748         if (!is->no_background) {
749             /* fill the background */
750             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
751         } else {
752             is->no_background = 0;
753         }
754         rect.x = is->xleft + x;
755         rect.y = is->ytop  + y;
756         rect.w = width;
757         rect.h = height;
758         SDL_DisplayYUVOverlay(vp->bmp, &rect);
759     } else {
760 #if 0
761         fill_rectangle(screen,
762                        is->xleft, is->ytop, is->width, is->height,
763                        QERGB(0x00, 0x00, 0x00));
764 #endif
765     }
766 }
767
768 /* get the current audio output buffer size, in samples. With SDL, we
769    cannot have a precise information */
770 static int audio_write_get_buf_size(VideoState *is)
771 {
772     return is->audio_buf_size - is->audio_buf_index;
773 }
774
775 static inline int compute_mod(int a, int b)
776 {
777     return a < 0 ? a%b + b : a%b;
778 }
779
780 static void video_audio_display(VideoState *s)
781 {
782     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
783     int ch, channels, h, h2, bgcolor, fgcolor;
784     int16_t time_diff;
785     int rdft_bits, nb_freq;
786
787     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
788         ;
789     nb_freq= 1<<(rdft_bits-1);
790
791     /* compute display index : center on currently output samples */
792     channels = s->audio_st->codec->channels;
793     nb_display_channels = channels;
794     if (!s->paused) {
795         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
796         n = 2 * channels;
797         delay = audio_write_get_buf_size(s);
798         delay /= n;
799
800         /* to be more precise, we take into account the time spent since
801            the last buffer computation */
802         if (audio_callback_time) {
803             time_diff = av_gettime() - audio_callback_time;
804             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
805         }
806
807         delay += 2*data_used;
808         if (delay < data_used)
809             delay = data_used;
810
811         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
812         if (s->show_mode == SHOW_MODE_WAVES) {
813             h= INT_MIN;
814             for(i=0; i<1000; i+=channels){
815                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
816                 int a= s->sample_array[idx];
817                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
818                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
819                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
820                 int score= a-d;
821                 if(h<score && (b^c)<0){
822                     h= score;
823                     i_start= idx;
824                 }
825             }
826         }
827
828         s->last_i_start = i_start;
829     } else {
830         i_start = s->last_i_start;
831     }
832
833     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
834     if (s->show_mode == SHOW_MODE_WAVES) {
835         fill_rectangle(screen,
836                        s->xleft, s->ytop, s->width, s->height,
837                        bgcolor);
838
839         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
840
841         /* total height for one channel */
842         h = s->height / nb_display_channels;
843         /* graph height / 2 */
844         h2 = (h * 9) / 20;
845         for(ch = 0;ch < nb_display_channels; ch++) {
846             i = i_start + ch;
847             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
848             for(x = 0; x < s->width; x++) {
849                 y = (s->sample_array[i] * h2) >> 15;
850                 if (y < 0) {
851                     y = -y;
852                     ys = y1 - y;
853                 } else {
854                     ys = y1;
855                 }
856                 fill_rectangle(screen,
857                                s->xleft + x, ys, 1, y,
858                                fgcolor);
859                 i += channels;
860                 if (i >= SAMPLE_ARRAY_SIZE)
861                     i -= SAMPLE_ARRAY_SIZE;
862             }
863         }
864
865         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
866
867         for(ch = 1;ch < nb_display_channels; ch++) {
868             y = s->ytop + ch * h;
869             fill_rectangle(screen,
870                            s->xleft, y, s->width, 1,
871                            fgcolor);
872         }
873         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
874     }else{
875         nb_display_channels= FFMIN(nb_display_channels, 2);
876         if(rdft_bits != s->rdft_bits){
877             av_rdft_end(s->rdft);
878             av_free(s->rdft_data);
879             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
880             s->rdft_bits= rdft_bits;
881             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
882         }
883         {
884             FFTSample *data[2];
885             for(ch = 0;ch < nb_display_channels; ch++) {
886                 data[ch] = s->rdft_data + 2*nb_freq*ch;
887                 i = i_start + ch;
888                 for(x = 0; x < 2*nb_freq; x++) {
889                     double w= (x-nb_freq)*(1.0/nb_freq);
890                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
891                     i += channels;
892                     if (i >= SAMPLE_ARRAY_SIZE)
893                         i -= SAMPLE_ARRAY_SIZE;
894                 }
895                 av_rdft_calc(s->rdft, data[ch]);
896             }
897             //least efficient way to do this, we should of course directly access it but its more than fast enough
898             for(y=0; y<s->height; y++){
899                 double w= 1/sqrt(nb_freq);
900                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
901                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
902                        + data[1][2*y+1]*data[1][2*y+1])) : a;
903                 a= FFMIN(a,255);
904                 b= FFMIN(b,255);
905                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
906
907                 fill_rectangle(screen,
908                             s->xpos, s->height-y, 1, 1,
909                             fgcolor);
910             }
911         }
912         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
913         s->xpos++;
914         if(s->xpos >= s->width)
915             s->xpos= s->xleft;
916     }
917 }
918
919 static int video_open(VideoState *is){
920     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
921     int w,h;
922
923     if(is_full_screen) flags |= SDL_FULLSCREEN;
924     else               flags |= SDL_RESIZABLE;
925
926     if (is_full_screen && fs_screen_width) {
927         w = fs_screen_width;
928         h = fs_screen_height;
929     } else if(!is_full_screen && screen_width){
930         w = screen_width;
931         h = screen_height;
932 #if CONFIG_AVFILTER
933     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
934         w = is->out_video_filter->inputs[0]->w;
935         h = is->out_video_filter->inputs[0]->h;
936 #else
937     }else if (is->video_st && is->video_st->codec->width){
938         w = is->video_st->codec->width;
939         h = is->video_st->codec->height;
940 #endif
941     } else {
942         w = 640;
943         h = 480;
944     }
945     if(screen && is->width == screen->w && screen->w == w
946        && is->height== screen->h && screen->h == h)
947         return 0;
948
949 #ifndef __APPLE__
950     screen = SDL_SetVideoMode(w, h, 0, flags);
951 #else
952     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
953     screen = SDL_SetVideoMode(w, h, 24, flags);
954 #endif
955     if (!screen) {
956         fprintf(stderr, "SDL: could not set video mode - exiting\n");
957         return -1;
958     }
959     if (!window_title)
960         window_title = input_filename;
961     SDL_WM_SetCaption(window_title, window_title);
962
963     is->width = screen->w;
964     is->height = screen->h;
965
966     return 0;
967 }
968
969 /* display the current picture, if any */
970 static void video_display(VideoState *is)
971 {
972     if(!screen)
973         video_open(cur_stream);
974     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
975         video_audio_display(is);
976     else if (is->video_st)
977         video_image_display(is);
978 }
979
980 static int refresh_thread(void *opaque)
981 {
982     VideoState *is= opaque;
983     while(!is->abort_request){
984         SDL_Event event;
985         event.type = FF_REFRESH_EVENT;
986         event.user.data1 = opaque;
987         if(!is->refresh){
988             is->refresh=1;
989             SDL_PushEvent(&event);
990         }
991         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
992         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
993     }
994     return 0;
995 }
996
997 /* get the current audio clock value */
998 static double get_audio_clock(VideoState *is)
999 {
1000     double pts;
1001     int hw_buf_size, bytes_per_sec;
1002     pts = is->audio_clock;
1003     hw_buf_size = audio_write_get_buf_size(is);
1004     bytes_per_sec = 0;
1005     if (is->audio_st) {
1006         bytes_per_sec = is->audio_st->codec->sample_rate *
1007             2 * is->audio_st->codec->channels;
1008     }
1009     if (bytes_per_sec)
1010         pts -= (double)hw_buf_size / bytes_per_sec;
1011     return pts;
1012 }
1013
1014 /* get the current video clock value */
1015 static double get_video_clock(VideoState *is)
1016 {
1017     if (is->paused) {
1018         return is->video_current_pts;
1019     } else {
1020         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1021     }
1022 }
1023
1024 /* get the current external clock value */
1025 static double get_external_clock(VideoState *is)
1026 {
1027     int64_t ti;
1028     ti = av_gettime();
1029     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1030 }
1031
1032 /* get the current master clock value */
1033 static double get_master_clock(VideoState *is)
1034 {
1035     double val;
1036
1037     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1038         if (is->video_st)
1039             val = get_video_clock(is);
1040         else
1041             val = get_audio_clock(is);
1042     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1043         if (is->audio_st)
1044             val = get_audio_clock(is);
1045         else
1046             val = get_video_clock(is);
1047     } else {
1048         val = get_external_clock(is);
1049     }
1050     return val;
1051 }
1052
1053 /* seek in the stream */
1054 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1055 {
1056     if (!is->seek_req) {
1057         is->seek_pos = pos;
1058         is->seek_rel = rel;
1059         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1060         if (seek_by_bytes)
1061             is->seek_flags |= AVSEEK_FLAG_BYTE;
1062         is->seek_req = 1;
1063     }
1064 }
1065
1066 /* pause or resume the video */
1067 static void stream_toggle_pause(VideoState *is)
1068 {
1069     if (is->paused) {
1070         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1071         if(is->read_pause_return != AVERROR(ENOSYS)){
1072             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1073         }
1074         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1075     }
1076     is->paused = !is->paused;
1077 }
1078
1079 static double compute_target_time(double frame_current_pts, VideoState *is)
1080 {
1081     double delay, sync_threshold, diff;
1082
1083     /* compute nominal delay */
1084     delay = frame_current_pts - is->frame_last_pts;
1085     if (delay <= 0 || delay >= 10.0) {
1086         /* if incorrect delay, use previous one */
1087         delay = is->frame_last_delay;
1088     } else {
1089         is->frame_last_delay = delay;
1090     }
1091     is->frame_last_pts = frame_current_pts;
1092
1093     /* update delay to follow master synchronisation source */
1094     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1095          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1096         /* if video is slave, we try to correct big delays by
1097            duplicating or deleting a frame */
1098         diff = get_video_clock(is) - get_master_clock(is);
1099
1100         /* skip or repeat frame. We take into account the
1101            delay to compute the threshold. I still don't know
1102            if it is the best guess */
1103         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1104         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1105             if (diff <= -sync_threshold)
1106                 delay = 0;
1107             else if (diff >= sync_threshold)
1108                 delay = 2 * delay;
1109         }
1110     }
1111     is->frame_timer += delay;
1112 #if defined(DEBUG_SYNC)
1113     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1114             delay, actual_delay, frame_current_pts, -diff);
1115 #endif
1116
1117     return is->frame_timer;
1118 }
1119
1120 /* called to display each frame */
1121 static void video_refresh_timer(void *opaque)
1122 {
1123     VideoState *is = opaque;
1124     VideoPicture *vp;
1125
1126     SubPicture *sp, *sp2;
1127
1128     if (is->video_st) {
1129 retry:
1130         if (is->pictq_size == 0) {
1131             //nothing to do, no picture to display in the que
1132         } else {
1133             double time= av_gettime()/1000000.0;
1134             double next_target;
1135             /* dequeue the picture */
1136             vp = &is->pictq[is->pictq_rindex];
1137
1138             if(time < vp->target_clock)
1139                 return;
1140             /* update current video pts */
1141             is->video_current_pts = vp->pts;
1142             is->video_current_pts_drift = is->video_current_pts - time;
1143             is->video_current_pos = vp->pos;
1144             if(is->pictq_size > 1){
1145                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1146                 assert(nextvp->target_clock >= vp->target_clock);
1147                 next_target= nextvp->target_clock;
1148             }else{
1149                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1150             }
1151             if(framedrop && time > next_target){
1152                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1153                 if(is->pictq_size > 1 || time > next_target + 0.5){
1154                     /* update queue size and signal for next picture */
1155                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1156                         is->pictq_rindex = 0;
1157
1158                     SDL_LockMutex(is->pictq_mutex);
1159                     is->pictq_size--;
1160                     SDL_CondSignal(is->pictq_cond);
1161                     SDL_UnlockMutex(is->pictq_mutex);
1162                     goto retry;
1163                 }
1164             }
1165
1166             if(is->subtitle_st) {
1167                 if (is->subtitle_stream_changed) {
1168                     SDL_LockMutex(is->subpq_mutex);
1169
1170                     while (is->subpq_size) {
1171                         free_subpicture(&is->subpq[is->subpq_rindex]);
1172
1173                         /* update queue size and signal for next picture */
1174                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1175                             is->subpq_rindex = 0;
1176
1177                         is->subpq_size--;
1178                     }
1179                     is->subtitle_stream_changed = 0;
1180
1181                     SDL_CondSignal(is->subpq_cond);
1182                     SDL_UnlockMutex(is->subpq_mutex);
1183                 } else {
1184                     if (is->subpq_size > 0) {
1185                         sp = &is->subpq[is->subpq_rindex];
1186
1187                         if (is->subpq_size > 1)
1188                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1189                         else
1190                             sp2 = NULL;
1191
1192                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1193                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1194                         {
1195                             free_subpicture(sp);
1196
1197                             /* update queue size and signal for next picture */
1198                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1199                                 is->subpq_rindex = 0;
1200
1201                             SDL_LockMutex(is->subpq_mutex);
1202                             is->subpq_size--;
1203                             SDL_CondSignal(is->subpq_cond);
1204                             SDL_UnlockMutex(is->subpq_mutex);
1205                         }
1206                     }
1207                 }
1208             }
1209
1210             /* display picture */
1211             if (!display_disable)
1212                 video_display(is);
1213
1214             /* update queue size and signal for next picture */
1215             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1216                 is->pictq_rindex = 0;
1217
1218             SDL_LockMutex(is->pictq_mutex);
1219             is->pictq_size--;
1220             SDL_CondSignal(is->pictq_cond);
1221             SDL_UnlockMutex(is->pictq_mutex);
1222         }
1223     } else if (is->audio_st) {
1224         /* draw the next audio frame */
1225
1226         /* if only audio stream, then display the audio bars (better
1227            than nothing, just to test the implementation */
1228
1229         /* display picture */
1230         if (!display_disable)
1231             video_display(is);
1232     }
1233     if (show_status) {
1234         static int64_t last_time;
1235         int64_t cur_time;
1236         int aqsize, vqsize, sqsize;
1237         double av_diff;
1238
1239         cur_time = av_gettime();
1240         if (!last_time || (cur_time - last_time) >= 30000) {
1241             aqsize = 0;
1242             vqsize = 0;
1243             sqsize = 0;
1244             if (is->audio_st)
1245                 aqsize = is->audioq.size;
1246             if (is->video_st)
1247                 vqsize = is->videoq.size;
1248             if (is->subtitle_st)
1249                 sqsize = is->subtitleq.size;
1250             av_diff = 0;
1251             if (is->audio_st && is->video_st)
1252                 av_diff = get_audio_clock(is) - get_video_clock(is);
1253             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1254                    get_master_clock(is),
1255                    av_diff,
1256                    FFMAX(is->skip_frames-1, 0),
1257                    aqsize / 1024,
1258                    vqsize / 1024,
1259                    sqsize,
1260                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1261                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1262             fflush(stdout);
1263             last_time = cur_time;
1264         }
1265     }
1266 }
1267
1268 static void stream_close(VideoState *is)
1269 {
1270     VideoPicture *vp;
1271     int i;
1272     /* XXX: use a special url_shutdown call to abort parse cleanly */
1273     is->abort_request = 1;
1274     SDL_WaitThread(is->parse_tid, NULL);
1275     SDL_WaitThread(is->refresh_tid, NULL);
1276
1277     /* free all pictures */
1278     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1279         vp = &is->pictq[i];
1280 #if CONFIG_AVFILTER
1281         if (vp->picref) {
1282             avfilter_unref_buffer(vp->picref);
1283             vp->picref = NULL;
1284         }
1285 #endif
1286         if (vp->bmp) {
1287             SDL_FreeYUVOverlay(vp->bmp);
1288             vp->bmp = NULL;
1289         }
1290     }
1291     SDL_DestroyMutex(is->pictq_mutex);
1292     SDL_DestroyCond(is->pictq_cond);
1293     SDL_DestroyMutex(is->subpq_mutex);
1294     SDL_DestroyCond(is->subpq_cond);
1295 #if !CONFIG_AVFILTER
1296     if (is->img_convert_ctx)
1297         sws_freeContext(is->img_convert_ctx);
1298 #endif
1299     av_free(is);
1300 }
1301
1302 static void do_exit(void)
1303 {
1304     if (cur_stream) {
1305         stream_close(cur_stream);
1306         cur_stream = NULL;
1307     }
1308     uninit_opts();
1309 #if CONFIG_AVFILTER
1310     avfilter_uninit();
1311 #endif
1312     if (show_status)
1313         printf("\n");
1314     SDL_Quit();
1315     av_log(NULL, AV_LOG_QUIET, "");
1316     exit(0);
1317 }
1318
1319 /* allocate a picture (needs to do that in main thread to avoid
1320    potential locking problems */
1321 static void alloc_picture(void *opaque)
1322 {
1323     VideoState *is = opaque;
1324     VideoPicture *vp;
1325
1326     vp = &is->pictq[is->pictq_windex];
1327
1328     if (vp->bmp)
1329         SDL_FreeYUVOverlay(vp->bmp);
1330
1331 #if CONFIG_AVFILTER
1332     if (vp->picref)
1333         avfilter_unref_buffer(vp->picref);
1334     vp->picref = NULL;
1335
1336     vp->width   = is->out_video_filter->inputs[0]->w;
1337     vp->height  = is->out_video_filter->inputs[0]->h;
1338     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1339 #else
1340     vp->width   = is->video_st->codec->width;
1341     vp->height  = is->video_st->codec->height;
1342     vp->pix_fmt = is->video_st->codec->pix_fmt;
1343 #endif
1344
1345     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1346                                    SDL_YV12_OVERLAY,
1347                                    screen);
1348     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1349         /* SDL allocates a buffer smaller than requested if the video
1350          * overlay hardware is unable to support the requested size. */
1351         fprintf(stderr, "Error: the video system does not support an image\n"
1352                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1353                         "to reduce the image size.\n", vp->width, vp->height );
1354         do_exit();
1355     }
1356
1357     SDL_LockMutex(is->pictq_mutex);
1358     vp->allocated = 1;
1359     SDL_CondSignal(is->pictq_cond);
1360     SDL_UnlockMutex(is->pictq_mutex);
1361 }
1362
1363 /**
1364  *
1365  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1366  */
1367 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1368 {
1369     VideoPicture *vp;
1370
1371     /* wait until we have space to put a new picture */
1372     SDL_LockMutex(is->pictq_mutex);
1373
1374     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1375         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1376
1377     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1378            !is->videoq.abort_request) {
1379         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1380     }
1381     SDL_UnlockMutex(is->pictq_mutex);
1382
1383     if (is->videoq.abort_request)
1384         return -1;
1385
1386     vp = &is->pictq[is->pictq_windex];
1387
1388     /* alloc or resize hardware picture buffer */
1389     if (!vp->bmp ||
1390 #if CONFIG_AVFILTER
1391         vp->width  != is->out_video_filter->inputs[0]->w ||
1392         vp->height != is->out_video_filter->inputs[0]->h) {
1393 #else
1394         vp->width != is->video_st->codec->width ||
1395         vp->height != is->video_st->codec->height) {
1396 #endif
1397         SDL_Event event;
1398
1399         vp->allocated = 0;
1400
1401         /* the allocation must be done in the main thread to avoid
1402            locking problems */
1403         event.type = FF_ALLOC_EVENT;
1404         event.user.data1 = is;
1405         SDL_PushEvent(&event);
1406
1407         /* wait until the picture is allocated */
1408         SDL_LockMutex(is->pictq_mutex);
1409         while (!vp->allocated && !is->videoq.abort_request) {
1410             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1411         }
1412         SDL_UnlockMutex(is->pictq_mutex);
1413
1414         if (is->videoq.abort_request)
1415             return -1;
1416     }
1417
1418     /* if the frame is not skipped, then display it */
1419     if (vp->bmp) {
1420         AVPicture pict;
1421 #if CONFIG_AVFILTER
1422         if(vp->picref)
1423             avfilter_unref_buffer(vp->picref);
1424         vp->picref = src_frame->opaque;
1425 #endif
1426
1427         /* get a pointer on the bitmap */
1428         SDL_LockYUVOverlay (vp->bmp);
1429
1430         memset(&pict,0,sizeof(AVPicture));
1431         pict.data[0] = vp->bmp->pixels[0];
1432         pict.data[1] = vp->bmp->pixels[2];
1433         pict.data[2] = vp->bmp->pixels[1];
1434
1435         pict.linesize[0] = vp->bmp->pitches[0];
1436         pict.linesize[1] = vp->bmp->pitches[2];
1437         pict.linesize[2] = vp->bmp->pitches[1];
1438
1439 #if CONFIG_AVFILTER
1440         //FIXME use direct rendering
1441         av_picture_copy(&pict, (AVPicture *)src_frame,
1442                         vp->pix_fmt, vp->width, vp->height);
1443 #else
1444         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1445         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1446             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1447             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1448         if (is->img_convert_ctx == NULL) {
1449             fprintf(stderr, "Cannot initialize the conversion context\n");
1450             exit(1);
1451         }
1452         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1453                   0, vp->height, pict.data, pict.linesize);
1454 #endif
1455         /* update the bitmap content */
1456         SDL_UnlockYUVOverlay(vp->bmp);
1457
1458         vp->pts = pts;
1459         vp->pos = pos;
1460
1461         /* now we can update the picture count */
1462         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1463             is->pictq_windex = 0;
1464         SDL_LockMutex(is->pictq_mutex);
1465         vp->target_clock= compute_target_time(vp->pts, is);
1466
1467         is->pictq_size++;
1468         SDL_UnlockMutex(is->pictq_mutex);
1469     }
1470     return 0;
1471 }
1472
1473 /**
1474  * compute the exact PTS for the picture if it is omitted in the stream
1475  * @param pts1 the dts of the pkt / pts of the frame
1476  */
1477 static int output_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1478 {
1479     double frame_delay, pts;
1480
1481     pts = pts1;
1482
1483     if (pts != 0) {
1484         /* update video clock with pts, if present */
1485         is->video_clock = pts;
1486     } else {
1487         pts = is->video_clock;
1488     }
1489     /* update video clock for next frame */
1490     frame_delay = av_q2d(is->video_st->codec->time_base);
1491     /* for MPEG2, the frame can be repeated, so we update the
1492        clock accordingly */
1493     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1494     is->video_clock += frame_delay;
1495
1496 #if defined(DEBUG_SYNC) && 0
1497     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1498            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1499 #endif
1500     return queue_picture(is, src_frame, pts, pos);
1501 }
1502
1503 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1504 {
1505     int len1, got_picture, i;
1506
1507     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1508         return -1;
1509
1510     if (pkt->data == flush_pkt.data) {
1511         avcodec_flush_buffers(is->video_st->codec);
1512
1513         SDL_LockMutex(is->pictq_mutex);
1514         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1515         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1516             is->pictq[i].target_clock= 0;
1517         }
1518         while (is->pictq_size && !is->videoq.abort_request) {
1519             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1520         }
1521         is->video_current_pos = -1;
1522         SDL_UnlockMutex(is->pictq_mutex);
1523
1524         is->frame_last_pts = AV_NOPTS_VALUE;
1525         is->frame_last_delay = 0;
1526         is->frame_timer = (double)av_gettime() / 1000000.0;
1527         is->skip_frames = 1;
1528         is->skip_frames_index = 0;
1529         return 0;
1530     }
1531
1532     len1 = avcodec_decode_video2(is->video_st->codec,
1533                                  frame, &got_picture,
1534                                  pkt);
1535
1536     if (got_picture) {
1537         if (decoder_reorder_pts == -1) {
1538             *pts = frame->best_effort_timestamp;
1539         } else if (decoder_reorder_pts) {
1540             *pts = frame->pkt_pts;
1541         } else {
1542             *pts = frame->pkt_dts;
1543         }
1544
1545         if (*pts == AV_NOPTS_VALUE) {
1546             *pts = 0;
1547         }
1548
1549         is->skip_frames_index += 1;
1550         if(is->skip_frames_index >= is->skip_frames){
1551             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1552             return 1;
1553         }
1554
1555     }
1556     return 0;
1557 }
1558
1559 #if CONFIG_AVFILTER
1560 typedef struct {
1561     VideoState *is;
1562     AVFrame *frame;
1563     int use_dr1;
1564 } FilterPriv;
1565
1566 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1567 {
1568     AVFilterContext *ctx = codec->opaque;
1569     AVFilterBufferRef  *ref;
1570     int perms = AV_PERM_WRITE;
1571     int i, w, h, stride[4];
1572     unsigned edge;
1573     int pixel_size;
1574
1575     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1576
1577     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1578         perms |= AV_PERM_NEG_LINESIZES;
1579
1580     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1581         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1582         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1583         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1584     }
1585     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1586
1587     w = codec->width;
1588     h = codec->height;
1589
1590     if(av_image_check_size(w, h, 0, codec))
1591         return -1;
1592
1593     avcodec_align_dimensions2(codec, &w, &h, stride);
1594     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1595     w += edge << 1;
1596     h += edge << 1;
1597
1598     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1599         return -1;
1600
1601     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1602     ref->video->w = codec->width;
1603     ref->video->h = codec->height;
1604     for(i = 0; i < 4; i ++) {
1605         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1606         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1607
1608         if (ref->data[i]) {
1609             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1610         }
1611         pic->data[i]     = ref->data[i];
1612         pic->linesize[i] = ref->linesize[i];
1613     }
1614     pic->opaque = ref;
1615     pic->age    = INT_MAX;
1616     pic->type   = FF_BUFFER_TYPE_USER;
1617     pic->reordered_opaque = codec->reordered_opaque;
1618     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1619     else           pic->pkt_pts = AV_NOPTS_VALUE;
1620     return 0;
1621 }
1622
1623 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1624 {
1625     memset(pic->data, 0, sizeof(pic->data));
1626     avfilter_unref_buffer(pic->opaque);
1627 }
1628
1629 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1630 {
1631     AVFilterBufferRef *ref = pic->opaque;
1632
1633     if (pic->data[0] == NULL) {
1634         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1635         return codec->get_buffer(codec, pic);
1636     }
1637
1638     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1639         (codec->pix_fmt != ref->format)) {
1640         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1641         return -1;
1642     }
1643
1644     pic->reordered_opaque = codec->reordered_opaque;
1645     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1646     else           pic->pkt_pts = AV_NOPTS_VALUE;
1647     return 0;
1648 }
1649
1650 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1651 {
1652     FilterPriv *priv = ctx->priv;
1653     AVCodecContext *codec;
1654     if(!opaque) return -1;
1655
1656     priv->is = opaque;
1657     codec    = priv->is->video_st->codec;
1658     codec->opaque = ctx;
1659     if((codec->codec->capabilities & CODEC_CAP_DR1)
1660     ) {
1661         codec->flags |= CODEC_FLAG_EMU_EDGE;
1662         priv->use_dr1 = 1;
1663         codec->get_buffer     = input_get_buffer;
1664         codec->release_buffer = input_release_buffer;
1665         codec->reget_buffer   = input_reget_buffer;
1666         codec->thread_safe_callbacks = 1;
1667     }
1668
1669     priv->frame = avcodec_alloc_frame();
1670
1671     return 0;
1672 }
1673
1674 static void input_uninit(AVFilterContext *ctx)
1675 {
1676     FilterPriv *priv = ctx->priv;
1677     av_free(priv->frame);
1678 }
1679
1680 static int input_request_frame(AVFilterLink *link)
1681 {
1682     FilterPriv *priv = link->src->priv;
1683     AVFilterBufferRef *picref;
1684     int64_t pts = 0;
1685     AVPacket pkt;
1686     int ret;
1687
1688     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1689         av_free_packet(&pkt);
1690     if (ret < 0)
1691         return -1;
1692
1693     if(priv->use_dr1) {
1694         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1695     } else {
1696         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1697         av_image_copy(picref->data, picref->linesize,
1698                       priv->frame->data, priv->frame->linesize,
1699                       picref->format, link->w, link->h);
1700     }
1701     av_free_packet(&pkt);
1702
1703     picref->pts = pts;
1704     picref->pos = pkt.pos;
1705     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1706     avfilter_start_frame(link, picref);
1707     avfilter_draw_slice(link, 0, link->h, 1);
1708     avfilter_end_frame(link);
1709
1710     return 0;
1711 }
1712
1713 static int input_query_formats(AVFilterContext *ctx)
1714 {
1715     FilterPriv *priv = ctx->priv;
1716     enum PixelFormat pix_fmts[] = {
1717         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1718     };
1719
1720     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1721     return 0;
1722 }
1723
1724 static int input_config_props(AVFilterLink *link)
1725 {
1726     FilterPriv *priv  = link->src->priv;
1727     AVCodecContext *c = priv->is->video_st->codec;
1728
1729     link->w = c->width;
1730     link->h = c->height;
1731     link->time_base = priv->is->video_st->time_base;
1732
1733     return 0;
1734 }
1735
1736 static AVFilter input_filter =
1737 {
1738     .name      = "ffplay_input",
1739
1740     .priv_size = sizeof(FilterPriv),
1741
1742     .init      = input_init,
1743     .uninit    = input_uninit,
1744
1745     .query_formats = input_query_formats,
1746
1747     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1748     .outputs   = (AVFilterPad[]) {{ .name = "default",
1749                                     .type = AVMEDIA_TYPE_VIDEO,
1750                                     .request_frame = input_request_frame,
1751                                     .config_props  = input_config_props, },
1752                                   { .name = NULL }},
1753 };
1754
1755 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1756 {
1757     char sws_flags_str[128];
1758     int ret;
1759     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1760     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1761     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1762     graph->scale_sws_opts = av_strdup(sws_flags_str);
1763
1764     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1765                                             NULL, is, graph)) < 0)
1766         goto the_end;
1767     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1768                                             NULL, &ffsink_ctx, graph)) < 0)
1769         goto the_end;
1770
1771     if(vfilters) {
1772         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1773         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1774
1775         outputs->name    = av_strdup("in");
1776         outputs->filter_ctx = filt_src;
1777         outputs->pad_idx = 0;
1778         outputs->next    = NULL;
1779
1780         inputs->name    = av_strdup("out");
1781         inputs->filter_ctx = filt_out;
1782         inputs->pad_idx = 0;
1783         inputs->next    = NULL;
1784
1785         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1786             goto the_end;
1787         av_freep(&vfilters);
1788     } else {
1789         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1790             goto the_end;
1791     }
1792
1793     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1794         goto the_end;
1795
1796     is->out_video_filter = filt_out;
1797 the_end:
1798     return ret;
1799 }
1800
1801 #endif  /* CONFIG_AVFILTER */
1802
1803 static int video_thread(void *arg)
1804 {
1805     VideoState *is = arg;
1806     AVFrame *frame= avcodec_alloc_frame();
1807     int64_t pts_int;
1808     double pts;
1809     int ret;
1810
1811 #if CONFIG_AVFILTER
1812     AVFilterGraph *graph = avfilter_graph_alloc();
1813     AVFilterContext *filt_out = NULL;
1814     int64_t pos;
1815
1816     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1817         goto the_end;
1818     filt_out = is->out_video_filter;
1819 #endif
1820
1821     for(;;) {
1822 #if !CONFIG_AVFILTER
1823         AVPacket pkt;
1824 #else
1825         AVFilterBufferRef *picref;
1826         AVRational tb;
1827 #endif
1828         while (is->paused && !is->videoq.abort_request)
1829             SDL_Delay(10);
1830 #if CONFIG_AVFILTER
1831         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1832         if (picref) {
1833             pts_int = picref->pts;
1834             pos     = picref->pos;
1835             frame->opaque = picref;
1836         }
1837
1838         if (av_cmp_q(tb, is->video_st->time_base)) {
1839             av_unused int64_t pts1 = pts_int;
1840             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1841             av_dlog(NULL, "video_thread(): "
1842                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1843                     tb.num, tb.den, pts1,
1844                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1845         }
1846 #else
1847         ret = get_video_frame(is, frame, &pts_int, &pkt);
1848 #endif
1849
1850         if (ret < 0) goto the_end;
1851
1852         if (!ret)
1853             continue;
1854
1855         pts = pts_int*av_q2d(is->video_st->time_base);
1856
1857 #if CONFIG_AVFILTER
1858         ret = output_picture(is, frame, pts, pos);
1859 #else
1860         ret = output_picture(is, frame, pts,  pkt.pos);
1861         av_free_packet(&pkt);
1862 #endif
1863         if (ret < 0)
1864             goto the_end;
1865
1866         if (step)
1867             if (cur_stream)
1868                 stream_toggle_pause(cur_stream);
1869     }
1870  the_end:
1871 #if CONFIG_AVFILTER
1872     avfilter_graph_free(&graph);
1873 #endif
1874     av_free(frame);
1875     return 0;
1876 }
1877
1878 static int subtitle_thread(void *arg)
1879 {
1880     VideoState *is = arg;
1881     SubPicture *sp;
1882     AVPacket pkt1, *pkt = &pkt1;
1883     int len1, got_subtitle;
1884     double pts;
1885     int i, j;
1886     int r, g, b, y, u, v, a;
1887
1888     for(;;) {
1889         while (is->paused && !is->subtitleq.abort_request) {
1890             SDL_Delay(10);
1891         }
1892         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1893             break;
1894
1895         if(pkt->data == flush_pkt.data){
1896             avcodec_flush_buffers(is->subtitle_st->codec);
1897             continue;
1898         }
1899         SDL_LockMutex(is->subpq_mutex);
1900         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1901                !is->subtitleq.abort_request) {
1902             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1903         }
1904         SDL_UnlockMutex(is->subpq_mutex);
1905
1906         if (is->subtitleq.abort_request)
1907             goto the_end;
1908
1909         sp = &is->subpq[is->subpq_windex];
1910
1911        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1912            this packet, if any */
1913         pts = 0;
1914         if (pkt->pts != AV_NOPTS_VALUE)
1915             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1916
1917         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1918                                     &sp->sub, &got_subtitle,
1919                                     pkt);
1920 //            if (len1 < 0)
1921 //                break;
1922         if (got_subtitle && sp->sub.format == 0) {
1923             sp->pts = pts;
1924
1925             for (i = 0; i < sp->sub.num_rects; i++)
1926             {
1927                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1928                 {
1929                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1930                     y = RGB_TO_Y_CCIR(r, g, b);
1931                     u = RGB_TO_U_CCIR(r, g, b, 0);
1932                     v = RGB_TO_V_CCIR(r, g, b, 0);
1933                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1934                 }
1935             }
1936
1937             /* now we can update the picture count */
1938             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1939                 is->subpq_windex = 0;
1940             SDL_LockMutex(is->subpq_mutex);
1941             is->subpq_size++;
1942             SDL_UnlockMutex(is->subpq_mutex);
1943         }
1944         av_free_packet(pkt);
1945 //        if (step)
1946 //            if (cur_stream)
1947 //                stream_toggle_pause(cur_stream);
1948     }
1949  the_end:
1950     return 0;
1951 }
1952
1953 /* copy samples for viewing in editor window */
1954 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1955 {
1956     int size, len, channels;
1957
1958     channels = is->audio_st->codec->channels;
1959
1960     size = samples_size / sizeof(short);
1961     while (size > 0) {
1962         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1963         if (len > size)
1964             len = size;
1965         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1966         samples += len;
1967         is->sample_array_index += len;
1968         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1969             is->sample_array_index = 0;
1970         size -= len;
1971     }
1972 }
1973
1974 /* return the new audio buffer size (samples can be added or deleted
1975    to get better sync if video or external master clock) */
1976 static int synchronize_audio(VideoState *is, short *samples,
1977                              int samples_size1, double pts)
1978 {
1979     int n, samples_size;
1980     double ref_clock;
1981
1982     n = 2 * is->audio_st->codec->channels;
1983     samples_size = samples_size1;
1984
1985     /* if not master, then we try to remove or add samples to correct the clock */
1986     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1987          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1988         double diff, avg_diff;
1989         int wanted_size, min_size, max_size, nb_samples;
1990
1991         ref_clock = get_master_clock(is);
1992         diff = get_audio_clock(is) - ref_clock;
1993
1994         if (diff < AV_NOSYNC_THRESHOLD) {
1995             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1996             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1997                 /* not enough measures to have a correct estimate */
1998                 is->audio_diff_avg_count++;
1999             } else {
2000                 /* estimate the A-V difference */
2001                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2002
2003                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2004                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2005                     nb_samples = samples_size / n;
2006
2007                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2008                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2009                     if (wanted_size < min_size)
2010                         wanted_size = min_size;
2011                     else if (wanted_size > max_size)
2012                         wanted_size = max_size;
2013
2014                     /* add or remove samples to correction the synchro */
2015                     if (wanted_size < samples_size) {
2016                         /* remove samples */
2017                         samples_size = wanted_size;
2018                     } else if (wanted_size > samples_size) {
2019                         uint8_t *samples_end, *q;
2020                         int nb;
2021
2022                         /* add samples */
2023                         nb = (samples_size - wanted_size);
2024                         samples_end = (uint8_t *)samples + samples_size - n;
2025                         q = samples_end + n;
2026                         while (nb > 0) {
2027                             memcpy(q, samples_end, n);
2028                             q += n;
2029                             nb -= n;
2030                         }
2031                         samples_size = wanted_size;
2032                     }
2033                 }
2034 #if 0
2035                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2036                        diff, avg_diff, samples_size - samples_size1,
2037                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2038 #endif
2039             }
2040         } else {
2041             /* too big difference : may be initial PTS errors, so
2042                reset A-V filter */
2043             is->audio_diff_avg_count = 0;
2044             is->audio_diff_cum = 0;
2045         }
2046     }
2047
2048     return samples_size;
2049 }
2050
2051 /* decode one audio frame and returns its uncompressed size */
2052 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2053 {
2054     AVPacket *pkt_temp = &is->audio_pkt_temp;
2055     AVPacket *pkt = &is->audio_pkt;
2056     AVCodecContext *dec= is->audio_st->codec;
2057     int n, len1, data_size;
2058     double pts;
2059
2060     for(;;) {
2061         /* NOTE: the audio packet can contain several frames */
2062         while (pkt_temp->size > 0) {
2063             data_size = sizeof(is->audio_buf1);
2064             len1 = avcodec_decode_audio3(dec,
2065                                         (int16_t *)is->audio_buf1, &data_size,
2066                                         pkt_temp);
2067             if (len1 < 0) {
2068                 /* if error, we skip the frame */
2069                 pkt_temp->size = 0;
2070                 break;
2071             }
2072
2073             pkt_temp->data += len1;
2074             pkt_temp->size -= len1;
2075             if (data_size <= 0)
2076                 continue;
2077
2078             if (dec->sample_fmt != is->audio_src_fmt) {
2079                 if (is->reformat_ctx)
2080                     av_audio_convert_free(is->reformat_ctx);
2081                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2082                                                          dec->sample_fmt, 1, NULL, 0);
2083                 if (!is->reformat_ctx) {
2084                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2085                         av_get_sample_fmt_name(dec->sample_fmt),
2086                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2087                         break;
2088                 }
2089                 is->audio_src_fmt= dec->sample_fmt;
2090             }
2091
2092             if (is->reformat_ctx) {
2093                 const void *ibuf[6]= {is->audio_buf1};
2094                 void *obuf[6]= {is->audio_buf2};
2095                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2096                 int ostride[6]= {2};
2097                 int len= data_size/istride[0];
2098                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2099                     printf("av_audio_convert() failed\n");
2100                     break;
2101                 }
2102                 is->audio_buf= is->audio_buf2;
2103                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2104                           remove this legacy cruft */
2105                 data_size= len*2;
2106             }else{
2107                 is->audio_buf= is->audio_buf1;
2108             }
2109
2110             /* if no pts, then compute it */
2111             pts = is->audio_clock;
2112             *pts_ptr = pts;
2113             n = 2 * dec->channels;
2114             is->audio_clock += (double)data_size /
2115                 (double)(n * dec->sample_rate);
2116 #if defined(DEBUG_SYNC)
2117             {
2118                 static double last_clock;
2119                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2120                        is->audio_clock - last_clock,
2121                        is->audio_clock, pts);
2122                 last_clock = is->audio_clock;
2123             }
2124 #endif
2125             return data_size;
2126         }
2127
2128         /* free the current packet */
2129         if (pkt->data)
2130             av_free_packet(pkt);
2131
2132         if (is->paused || is->audioq.abort_request) {
2133             return -1;
2134         }
2135
2136         /* read next packet */
2137         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2138             return -1;
2139         if(pkt->data == flush_pkt.data){
2140             avcodec_flush_buffers(dec);
2141             continue;
2142         }
2143
2144         pkt_temp->data = pkt->data;
2145         pkt_temp->size = pkt->size;
2146
2147         /* if update the audio clock with the pts */
2148         if (pkt->pts != AV_NOPTS_VALUE) {
2149             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2150         }
2151     }
2152 }
2153
2154 /* prepare a new audio buffer */
2155 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2156 {
2157     VideoState *is = opaque;
2158     int audio_size, len1;
2159     double pts;
2160
2161     audio_callback_time = av_gettime();
2162
2163     while (len > 0) {
2164         if (is->audio_buf_index >= is->audio_buf_size) {
2165            audio_size = audio_decode_frame(is, &pts);
2166            if (audio_size < 0) {
2167                 /* if error, just output silence */
2168                is->audio_buf = is->audio_buf1;
2169                is->audio_buf_size = 1024;
2170                memset(is->audio_buf, 0, is->audio_buf_size);
2171            } else {
2172                if (is->show_mode != SHOW_MODE_VIDEO)
2173                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2174                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2175                                               pts);
2176                is->audio_buf_size = audio_size;
2177            }
2178            is->audio_buf_index = 0;
2179         }
2180         len1 = is->audio_buf_size - is->audio_buf_index;
2181         if (len1 > len)
2182             len1 = len;
2183         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2184         len -= len1;
2185         stream += len1;
2186         is->audio_buf_index += len1;
2187     }
2188 }
2189
2190 /* open a given stream. Return 0 if OK */
2191 static int stream_component_open(VideoState *is, int stream_index)
2192 {
2193     AVFormatContext *ic = is->ic;
2194     AVCodecContext *avctx;
2195     AVCodec *codec;
2196     SDL_AudioSpec wanted_spec, spec;
2197
2198     if (stream_index < 0 || stream_index >= ic->nb_streams)
2199         return -1;
2200     avctx = ic->streams[stream_index]->codec;
2201
2202     /* prepare audio output */
2203     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2204         if (avctx->channels > 0) {
2205             avctx->request_channels = FFMIN(2, avctx->channels);
2206         } else {
2207             avctx->request_channels = 2;
2208         }
2209     }
2210
2211     codec = avcodec_find_decoder(avctx->codec_id);
2212     avctx->debug_mv = debug_mv;
2213     avctx->debug = debug;
2214     avctx->workaround_bugs = workaround_bugs;
2215     avctx->lowres = lowres;
2216     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2217     avctx->idct_algo= idct;
2218     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2219     avctx->skip_frame= skip_frame;
2220     avctx->skip_idct= skip_idct;
2221     avctx->skip_loop_filter= skip_loop_filter;
2222     avctx->error_recognition= error_recognition;
2223     avctx->error_concealment= error_concealment;
2224     avctx->thread_count= thread_count;
2225
2226     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2227
2228     if (!codec ||
2229         avcodec_open(avctx, codec) < 0)
2230         return -1;
2231
2232     /* prepare audio output */
2233     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2234         wanted_spec.freq = avctx->sample_rate;
2235         wanted_spec.format = AUDIO_S16SYS;
2236         wanted_spec.channels = avctx->channels;
2237         wanted_spec.silence = 0;
2238         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2239         wanted_spec.callback = sdl_audio_callback;
2240         wanted_spec.userdata = is;
2241         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2242             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2243             return -1;
2244         }
2245         is->audio_hw_buf_size = spec.size;
2246         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2247     }
2248
2249     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2250     switch(avctx->codec_type) {
2251     case AVMEDIA_TYPE_AUDIO:
2252         is->audio_stream = stream_index;
2253         is->audio_st = ic->streams[stream_index];
2254         is->audio_buf_size = 0;
2255         is->audio_buf_index = 0;
2256
2257         /* init averaging filter */
2258         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2259         is->audio_diff_avg_count = 0;
2260         /* since we do not have a precise anough audio fifo fullness,
2261            we correct audio sync only if larger than this threshold */
2262         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2263
2264         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2265         packet_queue_init(&is->audioq);
2266         SDL_PauseAudio(0);
2267         break;
2268     case AVMEDIA_TYPE_VIDEO:
2269         is->video_stream = stream_index;
2270         is->video_st = ic->streams[stream_index];
2271
2272 //        is->video_current_pts_time = av_gettime();
2273
2274         packet_queue_init(&is->videoq);
2275         is->video_tid = SDL_CreateThread(video_thread, is);
2276         break;
2277     case AVMEDIA_TYPE_SUBTITLE:
2278         is->subtitle_stream = stream_index;
2279         is->subtitle_st = ic->streams[stream_index];
2280         packet_queue_init(&is->subtitleq);
2281
2282         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2283         break;
2284     default:
2285         break;
2286     }
2287     return 0;
2288 }
2289
2290 static void stream_component_close(VideoState *is, int stream_index)
2291 {
2292     AVFormatContext *ic = is->ic;
2293     AVCodecContext *avctx;
2294
2295     if (stream_index < 0 || stream_index >= ic->nb_streams)
2296         return;
2297     avctx = ic->streams[stream_index]->codec;
2298
2299     switch(avctx->codec_type) {
2300     case AVMEDIA_TYPE_AUDIO:
2301         packet_queue_abort(&is->audioq);
2302
2303         SDL_CloseAudio();
2304
2305         packet_queue_end(&is->audioq);
2306         if (is->reformat_ctx)
2307             av_audio_convert_free(is->reformat_ctx);
2308         is->reformat_ctx = NULL;
2309         break;
2310     case AVMEDIA_TYPE_VIDEO:
2311         packet_queue_abort(&is->videoq);
2312
2313         /* note: we also signal this mutex to make sure we deblock the
2314            video thread in all cases */
2315         SDL_LockMutex(is->pictq_mutex);
2316         SDL_CondSignal(is->pictq_cond);
2317         SDL_UnlockMutex(is->pictq_mutex);
2318
2319         SDL_WaitThread(is->video_tid, NULL);
2320
2321         packet_queue_end(&is->videoq);
2322         break;
2323     case AVMEDIA_TYPE_SUBTITLE:
2324         packet_queue_abort(&is->subtitleq);
2325
2326         /* note: we also signal this mutex to make sure we deblock the
2327            video thread in all cases */
2328         SDL_LockMutex(is->subpq_mutex);
2329         is->subtitle_stream_changed = 1;
2330
2331         SDL_CondSignal(is->subpq_cond);
2332         SDL_UnlockMutex(is->subpq_mutex);
2333
2334         SDL_WaitThread(is->subtitle_tid, NULL);
2335
2336         packet_queue_end(&is->subtitleq);
2337         break;
2338     default:
2339         break;
2340     }
2341
2342     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2343     avcodec_close(avctx);
2344     switch(avctx->codec_type) {
2345     case AVMEDIA_TYPE_AUDIO:
2346         is->audio_st = NULL;
2347         is->audio_stream = -1;
2348         break;
2349     case AVMEDIA_TYPE_VIDEO:
2350         is->video_st = NULL;
2351         is->video_stream = -1;
2352         break;
2353     case AVMEDIA_TYPE_SUBTITLE:
2354         is->subtitle_st = NULL;
2355         is->subtitle_stream = -1;
2356         break;
2357     default:
2358         break;
2359     }
2360 }
2361
2362 /* since we have only one decoding thread, we can use a global
2363    variable instead of a thread local variable */
2364 static VideoState *global_video_state;
2365
2366 static int decode_interrupt_cb(void)
2367 {
2368     return (global_video_state && global_video_state->abort_request);
2369 }
2370
2371 /* this thread gets the stream from the disk or the network */
2372 static int decode_thread(void *arg)
2373 {
2374     VideoState *is = arg;
2375     AVFormatContext *ic;
2376     int err, i, ret;
2377     int st_index[AVMEDIA_TYPE_NB];
2378     AVPacket pkt1, *pkt = &pkt1;
2379     AVFormatParameters params, *ap = &params;
2380     int eof=0;
2381     int pkt_in_play_range = 0;
2382
2383     ic = avformat_alloc_context();
2384
2385     memset(st_index, -1, sizeof(st_index));
2386     is->video_stream = -1;
2387     is->audio_stream = -1;
2388     is->subtitle_stream = -1;
2389
2390     global_video_state = is;
2391     avio_set_interrupt_cb(decode_interrupt_cb);
2392
2393     memset(ap, 0, sizeof(*ap));
2394
2395     ap->prealloced_context = 1;
2396     ap->width = frame_width;
2397     ap->height= frame_height;
2398     ap->time_base= (AVRational){1, 25};
2399     ap->pix_fmt = frame_pix_fmt;
2400
2401     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2402
2403     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2404     if (err < 0) {
2405         print_error(is->filename, err);
2406         ret = -1;
2407         goto fail;
2408     }
2409     is->ic = ic;
2410
2411     if(genpts)
2412         ic->flags |= AVFMT_FLAG_GENPTS;
2413
2414     err = av_find_stream_info(ic);
2415     if (err < 0) {
2416         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2417         ret = -1;
2418         goto fail;
2419     }
2420     if(ic->pb)
2421         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2422
2423     if(seek_by_bytes<0)
2424         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2425
2426     /* if seeking requested, we execute it */
2427     if (start_time != AV_NOPTS_VALUE) {
2428         int64_t timestamp;
2429
2430         timestamp = start_time;
2431         /* add the stream start time */
2432         if (ic->start_time != AV_NOPTS_VALUE)
2433             timestamp += ic->start_time;
2434         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2435         if (ret < 0) {
2436             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2437                     is->filename, (double)timestamp / AV_TIME_BASE);
2438         }
2439     }
2440
2441     for (i = 0; i < ic->nb_streams; i++)
2442         ic->streams[i]->discard = AVDISCARD_ALL;
2443     if (!video_disable)
2444         st_index[AVMEDIA_TYPE_VIDEO] =
2445             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2446                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2447     if (!audio_disable)
2448         st_index[AVMEDIA_TYPE_AUDIO] =
2449             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2450                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2451                                 st_index[AVMEDIA_TYPE_VIDEO],
2452                                 NULL, 0);
2453     if (!video_disable)
2454         st_index[AVMEDIA_TYPE_SUBTITLE] =
2455             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2456                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2457                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2458                                  st_index[AVMEDIA_TYPE_AUDIO] :
2459                                  st_index[AVMEDIA_TYPE_VIDEO]),
2460                                 NULL, 0);
2461     if (show_status) {
2462         av_dump_format(ic, 0, is->filename, 0);
2463     }
2464
2465     is->show_mode = show_mode;
2466
2467     /* open the streams */
2468     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2469         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2470     }
2471
2472     ret=-1;
2473     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2474         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2475     }
2476     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2477     if(ret<0) {
2478         if (!display_disable)
2479             is->show_mode = SHOW_MODE_RDFT;
2480     }
2481
2482     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2483         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2484     }
2485
2486     if (is->video_stream < 0 && is->audio_stream < 0) {
2487         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2488         ret = -1;
2489         goto fail;
2490     }
2491
2492     for(;;) {
2493         if (is->abort_request)
2494             break;
2495         if (is->paused != is->last_paused) {
2496             is->last_paused = is->paused;
2497             if (is->paused)
2498                 is->read_pause_return= av_read_pause(ic);
2499             else
2500                 av_read_play(ic);
2501         }
2502 #if CONFIG_RTSP_DEMUXER
2503         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2504             /* wait 10 ms to avoid trying to get another packet */
2505             /* XXX: horrible */
2506             SDL_Delay(10);
2507             continue;
2508         }
2509 #endif
2510         if (is->seek_req) {
2511             int64_t seek_target= is->seek_pos;
2512             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2513             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2514 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2515 //      of the seek_pos/seek_rel variables
2516
2517             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2518             if (ret < 0) {
2519                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2520             }else{
2521                 if (is->audio_stream >= 0) {
2522                     packet_queue_flush(&is->audioq);
2523                     packet_queue_put(&is->audioq, &flush_pkt);
2524                 }
2525                 if (is->subtitle_stream >= 0) {
2526                     packet_queue_flush(&is->subtitleq);
2527                     packet_queue_put(&is->subtitleq, &flush_pkt);
2528                 }
2529                 if (is->video_stream >= 0) {
2530                     packet_queue_flush(&is->videoq);
2531                     packet_queue_put(&is->videoq, &flush_pkt);
2532                 }
2533             }
2534             is->seek_req = 0;
2535             eof= 0;
2536         }
2537
2538         /* if the queue are full, no need to read more */
2539         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2540             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2541                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2542                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2543             /* wait 10 ms */
2544             SDL_Delay(10);
2545             continue;
2546         }
2547         if(eof) {
2548             if(is->video_stream >= 0){
2549                 av_init_packet(pkt);
2550                 pkt->data=NULL;
2551                 pkt->size=0;
2552                 pkt->stream_index= is->video_stream;
2553                 packet_queue_put(&is->videoq, pkt);
2554             }
2555             SDL_Delay(10);
2556             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2557                 if(loop!=1 && (!loop || --loop)){
2558                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2559                 }else if(autoexit){
2560                     ret=AVERROR_EOF;
2561                     goto fail;
2562                 }
2563             }
2564             eof=0;
2565             continue;
2566         }
2567         ret = av_read_frame(ic, pkt);
2568         if (ret < 0) {
2569             if (ret == AVERROR_EOF || url_feof(ic->pb))
2570                 eof=1;
2571             if (ic->pb && ic->pb->error)
2572                 break;
2573             SDL_Delay(100); /* wait for user event */
2574             continue;
2575         }
2576         /* check if packet is in play range specified by user, then queue, otherwise discard */
2577         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2578                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2579                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2580                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2581                 <= ((double)duration/1000000);
2582         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2583             packet_queue_put(&is->audioq, pkt);
2584         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->videoq, pkt);
2586         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2587             packet_queue_put(&is->subtitleq, pkt);
2588         } else {
2589             av_free_packet(pkt);
2590         }
2591     }
2592     /* wait until the end */
2593     while (!is->abort_request) {
2594         SDL_Delay(100);
2595     }
2596
2597     ret = 0;
2598  fail:
2599     /* disable interrupting */
2600     global_video_state = NULL;
2601
2602     /* close each stream */
2603     if (is->audio_stream >= 0)
2604         stream_component_close(is, is->audio_stream);
2605     if (is->video_stream >= 0)
2606         stream_component_close(is, is->video_stream);
2607     if (is->subtitle_stream >= 0)
2608         stream_component_close(is, is->subtitle_stream);
2609     if (is->ic) {
2610         av_close_input_file(is->ic);
2611         is->ic = NULL; /* safety */
2612     }
2613     avio_set_interrupt_cb(NULL);
2614
2615     if (ret != 0) {
2616         SDL_Event event;
2617
2618         event.type = FF_QUIT_EVENT;
2619         event.user.data1 = is;
2620         SDL_PushEvent(&event);
2621     }
2622     return 0;
2623 }
2624
2625 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2626 {
2627     VideoState *is;
2628
2629     is = av_mallocz(sizeof(VideoState));
2630     if (!is)
2631         return NULL;
2632     av_strlcpy(is->filename, filename, sizeof(is->filename));
2633     is->iformat = iformat;
2634     is->ytop = 0;
2635     is->xleft = 0;
2636
2637     /* start video display */
2638     is->pictq_mutex = SDL_CreateMutex();
2639     is->pictq_cond = SDL_CreateCond();
2640
2641     is->subpq_mutex = SDL_CreateMutex();
2642     is->subpq_cond = SDL_CreateCond();
2643
2644     is->av_sync_type = av_sync_type;
2645     is->parse_tid = SDL_CreateThread(decode_thread, is);
2646     if (!is->parse_tid) {
2647         av_free(is);
2648         return NULL;
2649     }
2650     return is;
2651 }
2652
2653 static void stream_cycle_channel(VideoState *is, int codec_type)
2654 {
2655     AVFormatContext *ic = is->ic;
2656     int start_index, stream_index;
2657     AVStream *st;
2658
2659     if (codec_type == AVMEDIA_TYPE_VIDEO)
2660         start_index = is->video_stream;
2661     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2662         start_index = is->audio_stream;
2663     else
2664         start_index = is->subtitle_stream;
2665     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2666         return;
2667     stream_index = start_index;
2668     for(;;) {
2669         if (++stream_index >= is->ic->nb_streams)
2670         {
2671             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2672             {
2673                 stream_index = -1;
2674                 goto the_end;
2675             } else
2676                 stream_index = 0;
2677         }
2678         if (stream_index == start_index)
2679             return;
2680         st = ic->streams[stream_index];
2681         if (st->codec->codec_type == codec_type) {
2682             /* check that parameters are OK */
2683             switch(codec_type) {
2684             case AVMEDIA_TYPE_AUDIO:
2685                 if (st->codec->sample_rate != 0 &&
2686                     st->codec->channels != 0)
2687                     goto the_end;
2688                 break;
2689             case AVMEDIA_TYPE_VIDEO:
2690             case AVMEDIA_TYPE_SUBTITLE:
2691                 goto the_end;
2692             default:
2693                 break;
2694             }
2695         }
2696     }
2697  the_end:
2698     stream_component_close(is, start_index);
2699     stream_component_open(is, stream_index);
2700 }
2701
2702
2703 static void toggle_full_screen(void)
2704 {
2705     is_full_screen = !is_full_screen;
2706     if (!fs_screen_width) {
2707         /* use default SDL method */
2708 //        SDL_WM_ToggleFullScreen(screen);
2709     }
2710     video_open(cur_stream);
2711 }
2712
2713 static void toggle_pause(void)
2714 {
2715     if (cur_stream)
2716         stream_toggle_pause(cur_stream);
2717     step = 0;
2718 }
2719
2720 static void step_to_next_frame(void)
2721 {
2722     if (cur_stream) {
2723         /* if the stream is paused unpause it, then step */
2724         if (cur_stream->paused)
2725             stream_toggle_pause(cur_stream);
2726     }
2727     step = 1;
2728 }
2729
2730 static void toggle_audio_display(void)
2731 {
2732     if (cur_stream) {
2733         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2734         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2735         fill_rectangle(screen,
2736                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2737                     bgcolor);
2738         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2739     }
2740 }
2741
2742 /* handle an event sent by the GUI */
2743 static void event_loop(void)
2744 {
2745     SDL_Event event;
2746     double incr, pos, frac;
2747
2748     for(;;) {
2749         double x;
2750         SDL_WaitEvent(&event);
2751         switch(event.type) {
2752         case SDL_KEYDOWN:
2753             if (exit_on_keydown) {
2754                 do_exit();
2755                 break;
2756             }
2757             switch(event.key.keysym.sym) {
2758             case SDLK_ESCAPE:
2759             case SDLK_q:
2760                 do_exit();
2761                 break;
2762             case SDLK_f:
2763                 toggle_full_screen();
2764                 break;
2765             case SDLK_p:
2766             case SDLK_SPACE:
2767                 toggle_pause();
2768                 break;
2769             case SDLK_s: //S: Step to next frame
2770                 step_to_next_frame();
2771                 break;
2772             case SDLK_a:
2773                 if (cur_stream)
2774                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2775                 break;
2776             case SDLK_v:
2777                 if (cur_stream)
2778                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2779                 break;
2780             case SDLK_t:
2781                 if (cur_stream)
2782                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2783                 break;
2784             case SDLK_w:
2785                 toggle_audio_display();
2786                 break;
2787             case SDLK_LEFT:
2788                 incr = -10.0;
2789                 goto do_seek;
2790             case SDLK_RIGHT:
2791                 incr = 10.0;
2792                 goto do_seek;
2793             case SDLK_UP:
2794                 incr = 60.0;
2795                 goto do_seek;
2796             case SDLK_DOWN:
2797                 incr = -60.0;
2798             do_seek:
2799                 if (cur_stream) {
2800                     if (seek_by_bytes) {
2801                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2802                             pos= cur_stream->video_current_pos;
2803                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2804                             pos= cur_stream->audio_pkt.pos;
2805                         }else
2806                             pos = avio_tell(cur_stream->ic->pb);
2807                         if (cur_stream->ic->bit_rate)
2808                             incr *= cur_stream->ic->bit_rate / 8.0;
2809                         else
2810                             incr *= 180000.0;
2811                         pos += incr;
2812                         stream_seek(cur_stream, pos, incr, 1);
2813                     } else {
2814                         pos = get_master_clock(cur_stream);
2815                         pos += incr;
2816                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2817                     }
2818                 }
2819                 break;
2820             default:
2821                 break;
2822             }
2823             break;
2824         case SDL_MOUSEBUTTONDOWN:
2825             if (exit_on_mousedown) {
2826                 do_exit();
2827                 break;
2828             }
2829         case SDL_MOUSEMOTION:
2830             if(event.type ==SDL_MOUSEBUTTONDOWN){
2831                 x= event.button.x;
2832             }else{
2833                 if(event.motion.state != SDL_PRESSED)
2834                     break;
2835                 x= event.motion.x;
2836             }
2837             if (cur_stream) {
2838                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2839                     uint64_t size=  avio_size(cur_stream->ic->pb);
2840                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2841                 }else{
2842                     int64_t ts;
2843                     int ns, hh, mm, ss;
2844                     int tns, thh, tmm, tss;
2845                     tns = cur_stream->ic->duration/1000000LL;
2846                     thh = tns/3600;
2847                     tmm = (tns%3600)/60;
2848                     tss = (tns%60);
2849                     frac = x/cur_stream->width;
2850                     ns = frac*tns;
2851                     hh = ns/3600;
2852                     mm = (ns%3600)/60;
2853                     ss = (ns%60);
2854                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2855                             hh, mm, ss, thh, tmm, tss);
2856                     ts = frac*cur_stream->ic->duration;
2857                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2858                         ts += cur_stream->ic->start_time;
2859                     stream_seek(cur_stream, ts, 0, 0);
2860                 }
2861             }
2862             break;
2863         case SDL_VIDEORESIZE:
2864             if (cur_stream) {
2865                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2866                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2867                 screen_width = cur_stream->width = event.resize.w;
2868                 screen_height= cur_stream->height= event.resize.h;
2869             }
2870             break;
2871         case SDL_QUIT:
2872         case FF_QUIT_EVENT:
2873             do_exit();
2874             break;
2875         case FF_ALLOC_EVENT:
2876             video_open(event.user.data1);
2877             alloc_picture(event.user.data1);
2878             break;
2879         case FF_REFRESH_EVENT:
2880             video_refresh_timer(event.user.data1);
2881             cur_stream->refresh=0;
2882             break;
2883         default:
2884             break;
2885         }
2886     }
2887 }
2888
2889 static void opt_frame_size(const char *arg)
2890 {
2891     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2892         fprintf(stderr, "Incorrect frame size\n");
2893         exit(1);
2894     }
2895     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2896         fprintf(stderr, "Frame size must be a multiple of 2\n");
2897         exit(1);
2898     }
2899 }
2900
2901 static int opt_width(const char *opt, const char *arg)
2902 {
2903     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2904     return 0;
2905 }
2906
2907 static int opt_height(const char *opt, const char *arg)
2908 {
2909     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2910     return 0;
2911 }
2912
2913 static void opt_format(const char *arg)
2914 {
2915     file_iformat = av_find_input_format(arg);
2916     if (!file_iformat) {
2917         fprintf(stderr, "Unknown input format: %s\n", arg);
2918         exit(1);
2919     }
2920 }
2921
2922 static void opt_frame_pix_fmt(const char *arg)
2923 {
2924     frame_pix_fmt = av_get_pix_fmt(arg);
2925 }
2926
2927 static int opt_sync(const char *opt, const char *arg)
2928 {
2929     if (!strcmp(arg, "audio"))
2930         av_sync_type = AV_SYNC_AUDIO_MASTER;
2931     else if (!strcmp(arg, "video"))
2932         av_sync_type = AV_SYNC_VIDEO_MASTER;
2933     else if (!strcmp(arg, "ext"))
2934         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2935     else {
2936         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2937         exit(1);
2938     }
2939     return 0;
2940 }
2941
2942 static int opt_seek(const char *opt, const char *arg)
2943 {
2944     start_time = parse_time_or_die(opt, arg, 1);
2945     return 0;
2946 }
2947
2948 static int opt_duration(const char *opt, const char *arg)
2949 {
2950     duration = parse_time_or_die(opt, arg, 1);
2951     return 0;
2952 }
2953
2954 static int opt_debug(const char *opt, const char *arg)
2955 {
2956     av_log_set_level(99);
2957     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2958     return 0;
2959 }
2960
2961 static int opt_vismv(const char *opt, const char *arg)
2962 {
2963     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2964     return 0;
2965 }
2966
2967 static int opt_thread_count(const char *opt, const char *arg)
2968 {
2969     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2970 #if !HAVE_THREADS
2971     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2972 #endif
2973     return 0;
2974 }
2975
2976 static int opt_show_mode(const char *opt, const char *arg)
2977 {
2978     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2979                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2980                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2981                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2982     return 0;
2983 }
2984
2985 static const OptionDef options[] = {
2986 #include "cmdutils_common_opts.h"
2987     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2988     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2989     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2990     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2991     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2992     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2993     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2994     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2995     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2996     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2997     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2998     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2999     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3000     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3001     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3002     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3003     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3004     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3005     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3006     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3007     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3008     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3009     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3010     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3011     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3012     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3013     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3014     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3015     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3016     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3017     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3018     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3019     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3020     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3021     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3022     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3023     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3024 #if CONFIG_AVFILTER
3025     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3026 #endif
3027     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3028     { "showmode", HAS_ARG | OPT_FUNC2, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3029     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3030     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3031     { NULL, },
3032 };
3033
3034 static void show_usage(void)
3035 {
3036     printf("Simple media player\n");
3037     printf("usage: ffplay [options] input_file\n");
3038     printf("\n");
3039 }
3040
3041 static void show_help(void)
3042 {
3043     av_log_set_callback(log_callback_help);
3044     show_usage();
3045     show_help_options(options, "Main options:\n",
3046                       OPT_EXPERT, 0);
3047     show_help_options(options, "\nAdvanced options:\n",
3048                       OPT_EXPERT, OPT_EXPERT);
3049     printf("\n");
3050     av_opt_show2(avcodec_opts[0], NULL,
3051                  AV_OPT_FLAG_DECODING_PARAM, 0);
3052     printf("\n");
3053     av_opt_show2(avformat_opts, NULL,
3054                  AV_OPT_FLAG_DECODING_PARAM, 0);
3055 #if !CONFIG_AVFILTER
3056     printf("\n");
3057     av_opt_show2(sws_opts, NULL,
3058                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3059 #endif
3060     printf("\nWhile playing:\n"
3061            "q, ESC              quit\n"
3062            "f                   toggle full screen\n"
3063            "p, SPC              pause\n"
3064            "a                   cycle audio channel\n"
3065            "v                   cycle video channel\n"
3066            "t                   cycle subtitle channel\n"
3067            "w                   show audio waves\n"
3068            "s                   activate frame-step mode\n"
3069            "left/right          seek backward/forward 10 seconds\n"
3070            "down/up             seek backward/forward 1 minute\n"
3071            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3072            );
3073 }
3074
3075 static void opt_input_file(const char *filename)
3076 {
3077     if (input_filename) {
3078         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3079                 filename, input_filename);
3080         exit(1);
3081     }
3082     if (!strcmp(filename, "-"))
3083         filename = "pipe:";
3084     input_filename = filename;
3085 }
3086
3087 /* Called from the main */
3088 int main(int argc, char **argv)
3089 {
3090     int flags;
3091
3092     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3093
3094     /* register all codecs, demux and protocols */
3095     avcodec_register_all();
3096 #if CONFIG_AVDEVICE
3097     avdevice_register_all();
3098 #endif
3099 #if CONFIG_AVFILTER
3100     avfilter_register_all();
3101 #endif
3102     av_register_all();
3103
3104     init_opts();
3105
3106     show_banner();
3107
3108     parse_options(argc, argv, options, opt_input_file);
3109
3110     if (!input_filename) {
3111         show_usage();
3112         fprintf(stderr, "An input file must be specified\n");
3113         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3114         exit(1);
3115     }
3116
3117     if (display_disable) {
3118         video_disable = 1;
3119     }
3120     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3121 #if !defined(__MINGW32__) && !defined(__APPLE__)
3122     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3123 #endif
3124     if (SDL_Init (flags)) {
3125         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3126         exit(1);
3127     }
3128
3129     if (!display_disable) {
3130 #if HAVE_SDL_VIDEO_SIZE
3131         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3132         fs_screen_width = vi->current_w;
3133         fs_screen_height = vi->current_h;
3134 #endif
3135     }
3136
3137     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3138     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3139     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3140
3141     av_init_packet(&flush_pkt);
3142     flush_pkt.data= "FLUSH";
3143
3144     cur_stream = stream_open(input_filename, file_iformat);
3145
3146     event_loop();
3147
3148     /* never returns */
3149
3150     return 0;
3151 }