]> git.sesse.net Git - ffmpeg/blob - ffplay.c
mpegaudio: add _fixed suffix to some names
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/opt.h"
37 #include "libavcodec/avfft.h"
38
39 #if CONFIG_AVFILTER
40 # include "libavfilter/avfilter.h"
41 # include "libavfilter/avfiltergraph.h"
42 #endif
43
44 #include "cmdutils.h"
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #ifdef __MINGW32__
50 #undef main /* We don't want SDL to override our main() */
51 #endif
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 //#define DEBUG
60 //#define DEBUG_SYNC
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2*65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///<presentation time stamp for this picture
102     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
103     int64_t pos;                                 ///<byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t *audio_buf;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum AVSampleFormat audio_src_fmt;
166     AVAudioConvert *reformat_ctx;
167
168     int show_audio; /* if true, display audio samples */
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     //    QETimer *video_timer;
206     char filename[1024];
207     int width, height, xleft, ytop;
208
209     PtsCorrectionContext pts_ctx;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int frame_width = 0;
231 static int frame_height = 0;
232 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB]={
236     [AVMEDIA_TYPE_AUDIO]=-1,
237     [AVMEDIA_TYPE_VIDEO]=-1,
238     [AVMEDIA_TYPE_SUBTITLE]=-1,
239 };
240 static int seek_by_bytes=-1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int exit_on_keydown;
263 static int exit_on_mousedown;
264 static int loop=1;
265 static int framedrop=1;
266
267 static int rdftspeed=20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290     memset(q, 0, sizeof(PacketQueue));
291     q->mutex = SDL_CreateMutex();
292     q->cond = SDL_CreateCond();
293     packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298     AVPacketList *pkt, *pkt1;
299
300     SDL_LockMutex(q->mutex);
301     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302         pkt1 = pkt->next;
303         av_free_packet(&pkt->pkt);
304         av_freep(&pkt);
305     }
306     q->last_pkt = NULL;
307     q->first_pkt = NULL;
308     q->nb_packets = 0;
309     q->size = 0;
310     SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315     packet_queue_flush(q);
316     SDL_DestroyMutex(q->mutex);
317     SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322     AVPacketList *pkt1;
323
324     /* duplicate the packet */
325     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
326         return -1;
327
328     pkt1 = av_malloc(sizeof(AVPacketList));
329     if (!pkt1)
330         return -1;
331     pkt1->pkt = *pkt;
332     pkt1->next = NULL;
333
334
335     SDL_LockMutex(q->mutex);
336
337     if (!q->last_pkt)
338
339         q->first_pkt = pkt1;
340     else
341         q->last_pkt->next = pkt1;
342     q->last_pkt = pkt1;
343     q->nb_packets++;
344     q->size += pkt1->pkt.size + sizeof(*pkt1);
345     /* XXX: should duplicate packet data in DV case */
346     SDL_CondSignal(q->cond);
347
348     SDL_UnlockMutex(q->mutex);
349     return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414     int w1, w2, h1, h2;
415
416     /* fill the background */
417     w1 = x;
418     if (w1 < 0)
419         w1 = 0;
420     w2 = s->width - (x + w);
421     if (w2 < 0)
422         w2 = 0;
423     h1 = y;
424     if (h1 < 0)
425         h1 = 0;
426     h2 = s->height - (y + h);
427     if (h2 < 0)
428         h2 = 0;
429     fill_rectangle(screen,
430                    s->xleft, s->ytop,
431                    w1, s->height,
432                    color);
433     fill_rectangle(screen,
434                    s->xleft + s->width - w2, s->ytop,
435                    w2, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + w1, s->ytop,
439                    s->width - w1 - w2, h1,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop + s->height - h2,
443                    s->width - w1 - w2, h2,
444                    color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694 #if CONFIG_AVFILTER
695          if (vp->picref->video->pixel_aspect.num == 0)
696              aspect_ratio = 0;
697          else
698              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
699 #else
700
701         /* XXX: use variable in the frame */
702         if (is->video_st->sample_aspect_ratio.num)
703             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
704         else if (is->video_st->codec->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
706         else
707             aspect_ratio = 0;
708 #endif
709         if (aspect_ratio <= 0.0)
710             aspect_ratio = 1.0;
711         aspect_ratio *= (float)vp->width / (float)vp->height;
712
713         if (is->subtitle_st)
714         {
715             if (is->subpq_size > 0)
716             {
717                 sp = &is->subpq[is->subpq_rindex];
718
719                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
720                 {
721                     SDL_LockYUVOverlay (vp->bmp);
722
723                     pict.data[0] = vp->bmp->pixels[0];
724                     pict.data[1] = vp->bmp->pixels[2];
725                     pict.data[2] = vp->bmp->pixels[1];
726
727                     pict.linesize[0] = vp->bmp->pitches[0];
728                     pict.linesize[1] = vp->bmp->pitches[2];
729                     pict.linesize[2] = vp->bmp->pitches[1];
730
731                     for (i = 0; i < sp->sub.num_rects; i++)
732                         blend_subrect(&pict, sp->sub.rects[i],
733                                       vp->bmp->w, vp->bmp->h);
734
735                     SDL_UnlockYUVOverlay (vp->bmp);
736                 }
737             }
738         }
739
740
741         /* XXX: we suppose the screen has a 1.0 pixel ratio */
742         height = is->height;
743         width = ((int)rint(height * aspect_ratio)) & ~1;
744         if (width > is->width) {
745             width = is->width;
746             height = ((int)rint(width / aspect_ratio)) & ~1;
747         }
748         x = (is->width - width) / 2;
749         y = (is->height - height) / 2;
750         if (!is->no_background) {
751             /* fill the background */
752             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
753         } else {
754             is->no_background = 0;
755         }
756         rect.x = is->xleft + x;
757         rect.y = is->ytop  + y;
758         rect.w = width;
759         rect.h = height;
760         SDL_DisplayYUVOverlay(vp->bmp, &rect);
761     } else {
762 #if 0
763         fill_rectangle(screen,
764                        is->xleft, is->ytop, is->width, is->height,
765                        QERGB(0x00, 0x00, 0x00));
766 #endif
767     }
768 }
769
770 /* get the current audio output buffer size, in samples. With SDL, we
771    cannot have a precise information */
772 static int audio_write_get_buf_size(VideoState *is)
773 {
774     return is->audio_buf_size - is->audio_buf_index;
775 }
776
777 static inline int compute_mod(int a, int b)
778 {
779     a = a % b;
780     if (a >= 0)
781         return a;
782     else
783         return a + b;
784 }
785
786 static void video_audio_display(VideoState *s)
787 {
788     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
789     int ch, channels, h, h2, bgcolor, fgcolor;
790     int16_t time_diff;
791     int rdft_bits, nb_freq;
792
793     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
794         ;
795     nb_freq= 1<<(rdft_bits-1);
796
797     /* compute display index : center on currently output samples */
798     channels = s->audio_st->codec->channels;
799     nb_display_channels = channels;
800     if (!s->paused) {
801         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
802         n = 2 * channels;
803         delay = audio_write_get_buf_size(s);
804         delay /= n;
805
806         /* to be more precise, we take into account the time spent since
807            the last buffer computation */
808         if (audio_callback_time) {
809             time_diff = av_gettime() - audio_callback_time;
810             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
811         }
812
813         delay += 2*data_used;
814         if (delay < data_used)
815             delay = data_used;
816
817         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
818         if(s->show_audio==1){
819             h= INT_MIN;
820             for(i=0; i<1000; i+=channels){
821                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
822                 int a= s->sample_array[idx];
823                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
824                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
825                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
826                 int score= a-d;
827                 if(h<score && (b^c)<0){
828                     h= score;
829                     i_start= idx;
830                 }
831             }
832         }
833
834         s->last_i_start = i_start;
835     } else {
836         i_start = s->last_i_start;
837     }
838
839     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
840     if(s->show_audio==1){
841         fill_rectangle(screen,
842                        s->xleft, s->ytop, s->width, s->height,
843                        bgcolor);
844
845         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
846
847         /* total height for one channel */
848         h = s->height / nb_display_channels;
849         /* graph height / 2 */
850         h2 = (h * 9) / 20;
851         for(ch = 0;ch < nb_display_channels; ch++) {
852             i = i_start + ch;
853             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
854             for(x = 0; x < s->width; x++) {
855                 y = (s->sample_array[i] * h2) >> 15;
856                 if (y < 0) {
857                     y = -y;
858                     ys = y1 - y;
859                 } else {
860                     ys = y1;
861                 }
862                 fill_rectangle(screen,
863                                s->xleft + x, ys, 1, y,
864                                fgcolor);
865                 i += channels;
866                 if (i >= SAMPLE_ARRAY_SIZE)
867                     i -= SAMPLE_ARRAY_SIZE;
868             }
869         }
870
871         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
872
873         for(ch = 1;ch < nb_display_channels; ch++) {
874             y = s->ytop + ch * h;
875             fill_rectangle(screen,
876                            s->xleft, y, s->width, 1,
877                            fgcolor);
878         }
879         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
880     }else{
881         nb_display_channels= FFMIN(nb_display_channels, 2);
882         if(rdft_bits != s->rdft_bits){
883             av_rdft_end(s->rdft);
884             av_free(s->rdft_data);
885             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
886             s->rdft_bits= rdft_bits;
887             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
888         }
889         {
890             FFTSample *data[2];
891             for(ch = 0;ch < nb_display_channels; ch++) {
892                 data[ch] = s->rdft_data + 2*nb_freq*ch;
893                 i = i_start + ch;
894                 for(x = 0; x < 2*nb_freq; x++) {
895                     double w= (x-nb_freq)*(1.0/nb_freq);
896                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
897                     i += channels;
898                     if (i >= SAMPLE_ARRAY_SIZE)
899                         i -= SAMPLE_ARRAY_SIZE;
900                 }
901                 av_rdft_calc(s->rdft, data[ch]);
902             }
903             //least efficient way to do this, we should of course directly access it but its more than fast enough
904             for(y=0; y<s->height; y++){
905                 double w= 1/sqrt(nb_freq);
906                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
907                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
908                        + data[1][2*y+1]*data[1][2*y+1])) : a;
909                 a= FFMIN(a,255);
910                 b= FFMIN(b,255);
911                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
912
913                 fill_rectangle(screen,
914                             s->xpos, s->height-y, 1, 1,
915                             fgcolor);
916             }
917         }
918         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
919         s->xpos++;
920         if(s->xpos >= s->width)
921             s->xpos= s->xleft;
922     }
923 }
924
925 static int video_open(VideoState *is){
926     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
927     int w,h;
928
929     if(is_full_screen) flags |= SDL_FULLSCREEN;
930     else               flags |= SDL_RESIZABLE;
931
932     if (is_full_screen && fs_screen_width) {
933         w = fs_screen_width;
934         h = fs_screen_height;
935     } else if(!is_full_screen && screen_width){
936         w = screen_width;
937         h = screen_height;
938 #if CONFIG_AVFILTER
939     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
940         w = is->out_video_filter->inputs[0]->w;
941         h = is->out_video_filter->inputs[0]->h;
942 #else
943     }else if (is->video_st && is->video_st->codec->width){
944         w = is->video_st->codec->width;
945         h = is->video_st->codec->height;
946 #endif
947     } else {
948         w = 640;
949         h = 480;
950     }
951     if(screen && is->width == screen->w && screen->w == w
952        && is->height== screen->h && screen->h == h)
953         return 0;
954
955 #ifndef __APPLE__
956     screen = SDL_SetVideoMode(w, h, 0, flags);
957 #else
958     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
959     screen = SDL_SetVideoMode(w, h, 24, flags);
960 #endif
961     if (!screen) {
962         fprintf(stderr, "SDL: could not set video mode - exiting\n");
963         return -1;
964     }
965     if (!window_title)
966         window_title = input_filename;
967     SDL_WM_SetCaption(window_title, window_title);
968
969     is->width = screen->w;
970     is->height = screen->h;
971
972     return 0;
973 }
974
975 /* display the current picture, if any */
976 static void video_display(VideoState *is)
977 {
978     if(!screen)
979         video_open(cur_stream);
980     if (is->audio_st && is->show_audio)
981         video_audio_display(is);
982     else if (is->video_st)
983         video_image_display(is);
984 }
985
986 static int refresh_thread(void *opaque)
987 {
988     VideoState *is= opaque;
989     while(!is->abort_request){
990         SDL_Event event;
991         event.type = FF_REFRESH_EVENT;
992         event.user.data1 = opaque;
993         if(!is->refresh){
994             is->refresh=1;
995             SDL_PushEvent(&event);
996         }
997         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
998     }
999     return 0;
1000 }
1001
1002 /* get the current audio clock value */
1003 static double get_audio_clock(VideoState *is)
1004 {
1005     double pts;
1006     int hw_buf_size, bytes_per_sec;
1007     pts = is->audio_clock;
1008     hw_buf_size = audio_write_get_buf_size(is);
1009     bytes_per_sec = 0;
1010     if (is->audio_st) {
1011         bytes_per_sec = is->audio_st->codec->sample_rate *
1012             2 * is->audio_st->codec->channels;
1013     }
1014     if (bytes_per_sec)
1015         pts -= (double)hw_buf_size / bytes_per_sec;
1016     return pts;
1017 }
1018
1019 /* get the current video clock value */
1020 static double get_video_clock(VideoState *is)
1021 {
1022     if (is->paused) {
1023         return is->video_current_pts;
1024     } else {
1025         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1026     }
1027 }
1028
1029 /* get the current external clock value */
1030 static double get_external_clock(VideoState *is)
1031 {
1032     int64_t ti;
1033     ti = av_gettime();
1034     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1035 }
1036
1037 /* get the current master clock value */
1038 static double get_master_clock(VideoState *is)
1039 {
1040     double val;
1041
1042     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1043         if (is->video_st)
1044             val = get_video_clock(is);
1045         else
1046             val = get_audio_clock(is);
1047     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1048         if (is->audio_st)
1049             val = get_audio_clock(is);
1050         else
1051             val = get_video_clock(is);
1052     } else {
1053         val = get_external_clock(is);
1054     }
1055     return val;
1056 }
1057
1058 /* seek in the stream */
1059 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1060 {
1061     if (!is->seek_req) {
1062         is->seek_pos = pos;
1063         is->seek_rel = rel;
1064         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1065         if (seek_by_bytes)
1066             is->seek_flags |= AVSEEK_FLAG_BYTE;
1067         is->seek_req = 1;
1068     }
1069 }
1070
1071 /* pause or resume the video */
1072 static void stream_pause(VideoState *is)
1073 {
1074     if (is->paused) {
1075         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1076         if(is->read_pause_return != AVERROR(ENOSYS)){
1077             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1078         }
1079         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1080     }
1081     is->paused = !is->paused;
1082 }
1083
1084 static double compute_target_time(double frame_current_pts, VideoState *is)
1085 {
1086     double delay, sync_threshold, diff;
1087
1088     /* compute nominal delay */
1089     delay = frame_current_pts - is->frame_last_pts;
1090     if (delay <= 0 || delay >= 10.0) {
1091         /* if incorrect delay, use previous one */
1092         delay = is->frame_last_delay;
1093     } else {
1094         is->frame_last_delay = delay;
1095     }
1096     is->frame_last_pts = frame_current_pts;
1097
1098     /* update delay to follow master synchronisation source */
1099     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1100          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1101         /* if video is slave, we try to correct big delays by
1102            duplicating or deleting a frame */
1103         diff = get_video_clock(is) - get_master_clock(is);
1104
1105         /* skip or repeat frame. We take into account the
1106            delay to compute the threshold. I still don't know
1107            if it is the best guess */
1108         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1109         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1110             if (diff <= -sync_threshold)
1111                 delay = 0;
1112             else if (diff >= sync_threshold)
1113                 delay = 2 * delay;
1114         }
1115     }
1116     is->frame_timer += delay;
1117 #if defined(DEBUG_SYNC)
1118     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1119             delay, actual_delay, frame_current_pts, -diff);
1120 #endif
1121
1122     return is->frame_timer;
1123 }
1124
1125 /* called to display each frame */
1126 static void video_refresh_timer(void *opaque)
1127 {
1128     VideoState *is = opaque;
1129     VideoPicture *vp;
1130
1131     SubPicture *sp, *sp2;
1132
1133     if (is->video_st) {
1134 retry:
1135         if (is->pictq_size == 0) {
1136             //nothing to do, no picture to display in the que
1137         } else {
1138             double time= av_gettime()/1000000.0;
1139             double next_target;
1140             /* dequeue the picture */
1141             vp = &is->pictq[is->pictq_rindex];
1142
1143             if(time < vp->target_clock)
1144                 return;
1145             /* update current video pts */
1146             is->video_current_pts = vp->pts;
1147             is->video_current_pts_drift = is->video_current_pts - time;
1148             is->video_current_pos = vp->pos;
1149             if(is->pictq_size > 1){
1150                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1151                 assert(nextvp->target_clock >= vp->target_clock);
1152                 next_target= nextvp->target_clock;
1153             }else{
1154                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1155             }
1156             if(framedrop && time > next_target){
1157                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1158                 if(is->pictq_size > 1 || time > next_target + 0.5){
1159                     /* update queue size and signal for next picture */
1160                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1161                         is->pictq_rindex = 0;
1162
1163                     SDL_LockMutex(is->pictq_mutex);
1164                     is->pictq_size--;
1165                     SDL_CondSignal(is->pictq_cond);
1166                     SDL_UnlockMutex(is->pictq_mutex);
1167                     goto retry;
1168                 }
1169             }
1170
1171             if(is->subtitle_st) {
1172                 if (is->subtitle_stream_changed) {
1173                     SDL_LockMutex(is->subpq_mutex);
1174
1175                     while (is->subpq_size) {
1176                         free_subpicture(&is->subpq[is->subpq_rindex]);
1177
1178                         /* update queue size and signal for next picture */
1179                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1180                             is->subpq_rindex = 0;
1181
1182                         is->subpq_size--;
1183                     }
1184                     is->subtitle_stream_changed = 0;
1185
1186                     SDL_CondSignal(is->subpq_cond);
1187                     SDL_UnlockMutex(is->subpq_mutex);
1188                 } else {
1189                     if (is->subpq_size > 0) {
1190                         sp = &is->subpq[is->subpq_rindex];
1191
1192                         if (is->subpq_size > 1)
1193                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1194                         else
1195                             sp2 = NULL;
1196
1197                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1198                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1199                         {
1200                             free_subpicture(sp);
1201
1202                             /* update queue size and signal for next picture */
1203                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1204                                 is->subpq_rindex = 0;
1205
1206                             SDL_LockMutex(is->subpq_mutex);
1207                             is->subpq_size--;
1208                             SDL_CondSignal(is->subpq_cond);
1209                             SDL_UnlockMutex(is->subpq_mutex);
1210                         }
1211                     }
1212                 }
1213             }
1214
1215             /* display picture */
1216             if (!display_disable)
1217                 video_display(is);
1218
1219             /* update queue size and signal for next picture */
1220             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1221                 is->pictq_rindex = 0;
1222
1223             SDL_LockMutex(is->pictq_mutex);
1224             is->pictq_size--;
1225             SDL_CondSignal(is->pictq_cond);
1226             SDL_UnlockMutex(is->pictq_mutex);
1227         }
1228     } else if (is->audio_st) {
1229         /* draw the next audio frame */
1230
1231         /* if only audio stream, then display the audio bars (better
1232            than nothing, just to test the implementation */
1233
1234         /* display picture */
1235         if (!display_disable)
1236             video_display(is);
1237     }
1238     if (show_status) {
1239         static int64_t last_time;
1240         int64_t cur_time;
1241         int aqsize, vqsize, sqsize;
1242         double av_diff;
1243
1244         cur_time = av_gettime();
1245         if (!last_time || (cur_time - last_time) >= 30000) {
1246             aqsize = 0;
1247             vqsize = 0;
1248             sqsize = 0;
1249             if (is->audio_st)
1250                 aqsize = is->audioq.size;
1251             if (is->video_st)
1252                 vqsize = is->videoq.size;
1253             if (is->subtitle_st)
1254                 sqsize = is->subtitleq.size;
1255             av_diff = 0;
1256             if (is->audio_st && is->video_st)
1257                 av_diff = get_audio_clock(is) - get_video_clock(is);
1258             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1259                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1260             fflush(stdout);
1261             last_time = cur_time;
1262         }
1263     }
1264 }
1265
1266 static void stream_close(VideoState *is)
1267 {
1268     VideoPicture *vp;
1269     int i;
1270     /* XXX: use a special url_shutdown call to abort parse cleanly */
1271     is->abort_request = 1;
1272     SDL_WaitThread(is->parse_tid, NULL);
1273     SDL_WaitThread(is->refresh_tid, NULL);
1274
1275     /* free all pictures */
1276     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1277         vp = &is->pictq[i];
1278 #if CONFIG_AVFILTER
1279         if (vp->picref) {
1280             avfilter_unref_buffer(vp->picref);
1281             vp->picref = NULL;
1282         }
1283 #endif
1284         if (vp->bmp) {
1285             SDL_FreeYUVOverlay(vp->bmp);
1286             vp->bmp = NULL;
1287         }
1288     }
1289     SDL_DestroyMutex(is->pictq_mutex);
1290     SDL_DestroyCond(is->pictq_cond);
1291     SDL_DestroyMutex(is->subpq_mutex);
1292     SDL_DestroyCond(is->subpq_cond);
1293 #if !CONFIG_AVFILTER
1294     if (is->img_convert_ctx)
1295         sws_freeContext(is->img_convert_ctx);
1296 #endif
1297     av_free(is);
1298 }
1299
1300 static void do_exit(void)
1301 {
1302     if (cur_stream) {
1303         stream_close(cur_stream);
1304         cur_stream = NULL;
1305     }
1306     uninit_opts();
1307 #if CONFIG_AVFILTER
1308     avfilter_uninit();
1309 #endif
1310     if (show_status)
1311         printf("\n");
1312     SDL_Quit();
1313     av_log(NULL, AV_LOG_QUIET, "");
1314     exit(0);
1315 }
1316
1317 /* allocate a picture (needs to do that in main thread to avoid
1318    potential locking problems */
1319 static void alloc_picture(void *opaque)
1320 {
1321     VideoState *is = opaque;
1322     VideoPicture *vp;
1323
1324     vp = &is->pictq[is->pictq_windex];
1325
1326     if (vp->bmp)
1327         SDL_FreeYUVOverlay(vp->bmp);
1328
1329 #if CONFIG_AVFILTER
1330     if (vp->picref)
1331         avfilter_unref_buffer(vp->picref);
1332     vp->picref = NULL;
1333
1334     vp->width   = is->out_video_filter->inputs[0]->w;
1335     vp->height  = is->out_video_filter->inputs[0]->h;
1336     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1337 #else
1338     vp->width   = is->video_st->codec->width;
1339     vp->height  = is->video_st->codec->height;
1340     vp->pix_fmt = is->video_st->codec->pix_fmt;
1341 #endif
1342
1343     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1344                                    SDL_YV12_OVERLAY,
1345                                    screen);
1346     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1347         /* SDL allocates a buffer smaller than requested if the video
1348          * overlay hardware is unable to support the requested size. */
1349         fprintf(stderr, "Error: the video system does not support an image\n"
1350                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1351                         "to reduce the image size.\n", vp->width, vp->height );
1352         do_exit();
1353     }
1354
1355     SDL_LockMutex(is->pictq_mutex);
1356     vp->allocated = 1;
1357     SDL_CondSignal(is->pictq_cond);
1358     SDL_UnlockMutex(is->pictq_mutex);
1359 }
1360
1361 /**
1362  *
1363  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1364  */
1365 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1366 {
1367     VideoPicture *vp;
1368     int dst_pix_fmt;
1369 #if CONFIG_AVFILTER
1370     AVPicture pict_src;
1371 #endif
1372     /* wait until we have space to put a new picture */
1373     SDL_LockMutex(is->pictq_mutex);
1374
1375     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1376         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1377
1378     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1379            !is->videoq.abort_request) {
1380         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1381     }
1382     SDL_UnlockMutex(is->pictq_mutex);
1383
1384     if (is->videoq.abort_request)
1385         return -1;
1386
1387     vp = &is->pictq[is->pictq_windex];
1388
1389     /* alloc or resize hardware picture buffer */
1390     if (!vp->bmp ||
1391 #if CONFIG_AVFILTER
1392         vp->width  != is->out_video_filter->inputs[0]->w ||
1393         vp->height != is->out_video_filter->inputs[0]->h) {
1394 #else
1395         vp->width != is->video_st->codec->width ||
1396         vp->height != is->video_st->codec->height) {
1397 #endif
1398         SDL_Event event;
1399
1400         vp->allocated = 0;
1401
1402         /* the allocation must be done in the main thread to avoid
1403            locking problems */
1404         event.type = FF_ALLOC_EVENT;
1405         event.user.data1 = is;
1406         SDL_PushEvent(&event);
1407
1408         /* wait until the picture is allocated */
1409         SDL_LockMutex(is->pictq_mutex);
1410         while (!vp->allocated && !is->videoq.abort_request) {
1411             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1412         }
1413         SDL_UnlockMutex(is->pictq_mutex);
1414
1415         if (is->videoq.abort_request)
1416             return -1;
1417     }
1418
1419     /* if the frame is not skipped, then display it */
1420     if (vp->bmp) {
1421         AVPicture pict;
1422 #if CONFIG_AVFILTER
1423         if(vp->picref)
1424             avfilter_unref_buffer(vp->picref);
1425         vp->picref = src_frame->opaque;
1426 #endif
1427
1428         /* get a pointer on the bitmap */
1429         SDL_LockYUVOverlay (vp->bmp);
1430
1431         dst_pix_fmt = PIX_FMT_YUV420P;
1432         memset(&pict,0,sizeof(AVPicture));
1433         pict.data[0] = vp->bmp->pixels[0];
1434         pict.data[1] = vp->bmp->pixels[2];
1435         pict.data[2] = vp->bmp->pixels[1];
1436
1437         pict.linesize[0] = vp->bmp->pitches[0];
1438         pict.linesize[1] = vp->bmp->pitches[2];
1439         pict.linesize[2] = vp->bmp->pitches[1];
1440
1441 #if CONFIG_AVFILTER
1442         pict_src.data[0] = src_frame->data[0];
1443         pict_src.data[1] = src_frame->data[1];
1444         pict_src.data[2] = src_frame->data[2];
1445
1446         pict_src.linesize[0] = src_frame->linesize[0];
1447         pict_src.linesize[1] = src_frame->linesize[1];
1448         pict_src.linesize[2] = src_frame->linesize[2];
1449
1450         //FIXME use direct rendering
1451         av_picture_copy(&pict, &pict_src,
1452                         vp->pix_fmt, vp->width, vp->height);
1453 #else
1454         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1455         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1456             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1457             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1458         if (is->img_convert_ctx == NULL) {
1459             fprintf(stderr, "Cannot initialize the conversion context\n");
1460             exit(1);
1461         }
1462         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1463                   0, vp->height, pict.data, pict.linesize);
1464 #endif
1465         /* update the bitmap content */
1466         SDL_UnlockYUVOverlay(vp->bmp);
1467
1468         vp->pts = pts;
1469         vp->pos = pos;
1470
1471         /* now we can update the picture count */
1472         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1473             is->pictq_windex = 0;
1474         SDL_LockMutex(is->pictq_mutex);
1475         vp->target_clock= compute_target_time(vp->pts, is);
1476
1477         is->pictq_size++;
1478         SDL_UnlockMutex(is->pictq_mutex);
1479     }
1480     return 0;
1481 }
1482
1483 /**
1484  * compute the exact PTS for the picture if it is omitted in the stream
1485  * @param pts1 the dts of the pkt / pts of the frame
1486  */
1487 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1488 {
1489     double frame_delay, pts;
1490
1491     pts = pts1;
1492
1493     if (pts != 0) {
1494         /* update video clock with pts, if present */
1495         is->video_clock = pts;
1496     } else {
1497         pts = is->video_clock;
1498     }
1499     /* update video clock for next frame */
1500     frame_delay = av_q2d(is->video_st->codec->time_base);
1501     /* for MPEG2, the frame can be repeated, so we update the
1502        clock accordingly */
1503     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1504     is->video_clock += frame_delay;
1505
1506 #if defined(DEBUG_SYNC) && 0
1507     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1508            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1509 #endif
1510     return queue_picture(is, src_frame, pts, pos);
1511 }
1512
1513 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1514 {
1515     int len1, got_picture, i;
1516
1517     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1518         return -1;
1519
1520     if (pkt->data == flush_pkt.data) {
1521         avcodec_flush_buffers(is->video_st->codec);
1522
1523         SDL_LockMutex(is->pictq_mutex);
1524         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1525         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1526             is->pictq[i].target_clock= 0;
1527         }
1528         while (is->pictq_size && !is->videoq.abort_request) {
1529             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1530         }
1531         is->video_current_pos = -1;
1532         SDL_UnlockMutex(is->pictq_mutex);
1533
1534         init_pts_correction(&is->pts_ctx);
1535         is->frame_last_pts = AV_NOPTS_VALUE;
1536         is->frame_last_delay = 0;
1537         is->frame_timer = (double)av_gettime() / 1000000.0;
1538         is->skip_frames = 1;
1539         is->skip_frames_index = 0;
1540         return 0;
1541     }
1542
1543     len1 = avcodec_decode_video2(is->video_st->codec,
1544                                  frame, &got_picture,
1545                                  pkt);
1546
1547     if (got_picture) {
1548         if (decoder_reorder_pts == -1) {
1549             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1550         } else if (decoder_reorder_pts) {
1551             *pts = frame->pkt_pts;
1552         } else {
1553             *pts = frame->pkt_dts;
1554         }
1555
1556         if (*pts == AV_NOPTS_VALUE) {
1557             *pts = 0;
1558         }
1559
1560         is->skip_frames_index += 1;
1561         if(is->skip_frames_index >= is->skip_frames){
1562             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1563             return 1;
1564         }
1565
1566     }
1567     return 0;
1568 }
1569
1570 #if CONFIG_AVFILTER
1571 typedef struct {
1572     VideoState *is;
1573     AVFrame *frame;
1574     int use_dr1;
1575 } FilterPriv;
1576
1577 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1578 {
1579     AVFilterContext *ctx = codec->opaque;
1580     AVFilterBufferRef  *ref;
1581     int perms = AV_PERM_WRITE;
1582     int i, w, h, stride[4];
1583     unsigned edge;
1584     int pixel_size;
1585
1586     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1587         perms |= AV_PERM_NEG_LINESIZES;
1588
1589     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1590         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1591         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1592         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1593     }
1594     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1595
1596     w = codec->width;
1597     h = codec->height;
1598     avcodec_align_dimensions2(codec, &w, &h, stride);
1599     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1600     w += edge << 1;
1601     h += edge << 1;
1602
1603     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1604         return -1;
1605
1606     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1607     ref->video->w = codec->width;
1608     ref->video->h = codec->height;
1609     for(i = 0; i < 4; i ++) {
1610         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1611         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1612
1613         if (ref->data[i]) {
1614             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1615         }
1616         pic->data[i]     = ref->data[i];
1617         pic->linesize[i] = ref->linesize[i];
1618     }
1619     pic->opaque = ref;
1620     pic->age    = INT_MAX;
1621     pic->type   = FF_BUFFER_TYPE_USER;
1622     pic->reordered_opaque = codec->reordered_opaque;
1623     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1624     else           pic->pkt_pts = AV_NOPTS_VALUE;
1625     return 0;
1626 }
1627
1628 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1629 {
1630     memset(pic->data, 0, sizeof(pic->data));
1631     avfilter_unref_buffer(pic->opaque);
1632 }
1633
1634 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1635 {
1636     AVFilterBufferRef *ref = pic->opaque;
1637
1638     if (pic->data[0] == NULL) {
1639         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1640         return codec->get_buffer(codec, pic);
1641     }
1642
1643     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1644         (codec->pix_fmt != ref->format)) {
1645         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1646         return -1;
1647     }
1648
1649     pic->reordered_opaque = codec->reordered_opaque;
1650     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1651     else           pic->pkt_pts = AV_NOPTS_VALUE;
1652     return 0;
1653 }
1654
1655 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1656 {
1657     FilterPriv *priv = ctx->priv;
1658     AVCodecContext *codec;
1659     if(!opaque) return -1;
1660
1661     priv->is = opaque;
1662     codec    = priv->is->video_st->codec;
1663     codec->opaque = ctx;
1664     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1665         priv->use_dr1 = 1;
1666         codec->get_buffer     = input_get_buffer;
1667         codec->release_buffer = input_release_buffer;
1668         codec->reget_buffer   = input_reget_buffer;
1669         codec->thread_safe_callbacks = 1;
1670     }
1671
1672     priv->frame = avcodec_alloc_frame();
1673
1674     return 0;
1675 }
1676
1677 static void input_uninit(AVFilterContext *ctx)
1678 {
1679     FilterPriv *priv = ctx->priv;
1680     av_free(priv->frame);
1681 }
1682
1683 static int input_request_frame(AVFilterLink *link)
1684 {
1685     FilterPriv *priv = link->src->priv;
1686     AVFilterBufferRef *picref;
1687     int64_t pts = 0;
1688     AVPacket pkt;
1689     int ret;
1690
1691     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1692         av_free_packet(&pkt);
1693     if (ret < 0)
1694         return -1;
1695
1696     if(priv->use_dr1) {
1697         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1698     } else {
1699         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1700         av_image_copy(picref->data, picref->linesize,
1701                       priv->frame->data, priv->frame->linesize,
1702                       picref->format, link->w, link->h);
1703     }
1704     av_free_packet(&pkt);
1705
1706     picref->pts = pts;
1707     picref->pos = pkt.pos;
1708     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1709     avfilter_start_frame(link, picref);
1710     avfilter_draw_slice(link, 0, link->h, 1);
1711     avfilter_end_frame(link);
1712
1713     return 0;
1714 }
1715
1716 static int input_query_formats(AVFilterContext *ctx)
1717 {
1718     FilterPriv *priv = ctx->priv;
1719     enum PixelFormat pix_fmts[] = {
1720         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1721     };
1722
1723     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1724     return 0;
1725 }
1726
1727 static int input_config_props(AVFilterLink *link)
1728 {
1729     FilterPriv *priv  = link->src->priv;
1730     AVCodecContext *c = priv->is->video_st->codec;
1731
1732     link->w = c->width;
1733     link->h = c->height;
1734     link->time_base = priv->is->video_st->time_base;
1735
1736     return 0;
1737 }
1738
1739 static AVFilter input_filter =
1740 {
1741     .name      = "ffplay_input",
1742
1743     .priv_size = sizeof(FilterPriv),
1744
1745     .init      = input_init,
1746     .uninit    = input_uninit,
1747
1748     .query_formats = input_query_formats,
1749
1750     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1751     .outputs   = (AVFilterPad[]) {{ .name = "default",
1752                                     .type = AVMEDIA_TYPE_VIDEO,
1753                                     .request_frame = input_request_frame,
1754                                     .config_props  = input_config_props, },
1755                                   { .name = NULL }},
1756 };
1757
1758 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1759 {
1760     char sws_flags_str[128];
1761     int ret;
1762     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1763     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1764     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1765     graph->scale_sws_opts = av_strdup(sws_flags_str);
1766
1767     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1768                                             NULL, is, graph)) < 0)
1769         goto the_end;
1770     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1771                                             NULL, &ffsink_ctx, graph)) < 0)
1772         goto the_end;
1773
1774     if(vfilters) {
1775         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1776         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1777
1778         outputs->name    = av_strdup("in");
1779         outputs->filter_ctx = filt_src;
1780         outputs->pad_idx = 0;
1781         outputs->next    = NULL;
1782
1783         inputs->name    = av_strdup("out");
1784         inputs->filter_ctx = filt_out;
1785         inputs->pad_idx = 0;
1786         inputs->next    = NULL;
1787
1788         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1789             goto the_end;
1790         av_freep(&vfilters);
1791     } else {
1792         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1793             goto the_end;
1794     }
1795
1796     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1797         goto the_end;
1798
1799     is->out_video_filter = filt_out;
1800 the_end:
1801     return ret;
1802 }
1803
1804 #endif  /* CONFIG_AVFILTER */
1805
1806 static int video_thread(void *arg)
1807 {
1808     VideoState *is = arg;
1809     AVFrame *frame= avcodec_alloc_frame();
1810     int64_t pts_int;
1811     double pts;
1812     int ret;
1813
1814 #if CONFIG_AVFILTER
1815     AVFilterGraph *graph = avfilter_graph_alloc();
1816     AVFilterContext *filt_out = NULL;
1817     int64_t pos;
1818
1819     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1820         goto the_end;
1821     filt_out = is->out_video_filter;
1822 #endif
1823
1824     for(;;) {
1825 #if !CONFIG_AVFILTER
1826         AVPacket pkt;
1827 #else
1828         AVFilterBufferRef *picref;
1829         AVRational tb;
1830 #endif
1831         while (is->paused && !is->videoq.abort_request)
1832             SDL_Delay(10);
1833 #if CONFIG_AVFILTER
1834         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1835         if (picref) {
1836             pts_int = picref->pts;
1837             pos     = picref->pos;
1838             frame->opaque = picref;
1839         }
1840
1841         if (av_cmp_q(tb, is->video_st->time_base)) {
1842             av_unused int64_t pts1 = pts_int;
1843             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1844             av_dlog(NULL, "video_thread(): "
1845                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1846                     tb.num, tb.den, pts1,
1847                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1848         }
1849 #else
1850         ret = get_video_frame(is, frame, &pts_int, &pkt);
1851 #endif
1852
1853         if (ret < 0) goto the_end;
1854
1855         if (!ret)
1856             continue;
1857
1858         pts = pts_int*av_q2d(is->video_st->time_base);
1859
1860 #if CONFIG_AVFILTER
1861         ret = output_picture2(is, frame, pts, pos);
1862 #else
1863         ret = output_picture2(is, frame, pts,  pkt.pos);
1864         av_free_packet(&pkt);
1865 #endif
1866         if (ret < 0)
1867             goto the_end;
1868
1869         if (step)
1870             if (cur_stream)
1871                 stream_pause(cur_stream);
1872     }
1873  the_end:
1874 #if CONFIG_AVFILTER
1875     avfilter_graph_free(&graph);
1876 #endif
1877     av_free(frame);
1878     return 0;
1879 }
1880
1881 static int subtitle_thread(void *arg)
1882 {
1883     VideoState *is = arg;
1884     SubPicture *sp;
1885     AVPacket pkt1, *pkt = &pkt1;
1886     int len1, got_subtitle;
1887     double pts;
1888     int i, j;
1889     int r, g, b, y, u, v, a;
1890
1891     for(;;) {
1892         while (is->paused && !is->subtitleq.abort_request) {
1893             SDL_Delay(10);
1894         }
1895         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1896             break;
1897
1898         if(pkt->data == flush_pkt.data){
1899             avcodec_flush_buffers(is->subtitle_st->codec);
1900             continue;
1901         }
1902         SDL_LockMutex(is->subpq_mutex);
1903         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1904                !is->subtitleq.abort_request) {
1905             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1906         }
1907         SDL_UnlockMutex(is->subpq_mutex);
1908
1909         if (is->subtitleq.abort_request)
1910             goto the_end;
1911
1912         sp = &is->subpq[is->subpq_windex];
1913
1914        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1915            this packet, if any */
1916         pts = 0;
1917         if (pkt->pts != AV_NOPTS_VALUE)
1918             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1919
1920         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1921                                     &sp->sub, &got_subtitle,
1922                                     pkt);
1923 //            if (len1 < 0)
1924 //                break;
1925         if (got_subtitle && sp->sub.format == 0) {
1926             sp->pts = pts;
1927
1928             for (i = 0; i < sp->sub.num_rects; i++)
1929             {
1930                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1931                 {
1932                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1933                     y = RGB_TO_Y_CCIR(r, g, b);
1934                     u = RGB_TO_U_CCIR(r, g, b, 0);
1935                     v = RGB_TO_V_CCIR(r, g, b, 0);
1936                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1937                 }
1938             }
1939
1940             /* now we can update the picture count */
1941             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1942                 is->subpq_windex = 0;
1943             SDL_LockMutex(is->subpq_mutex);
1944             is->subpq_size++;
1945             SDL_UnlockMutex(is->subpq_mutex);
1946         }
1947         av_free_packet(pkt);
1948 //        if (step)
1949 //            if (cur_stream)
1950 //                stream_pause(cur_stream);
1951     }
1952  the_end:
1953     return 0;
1954 }
1955
1956 /* copy samples for viewing in editor window */
1957 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1958 {
1959     int size, len, channels;
1960
1961     channels = is->audio_st->codec->channels;
1962
1963     size = samples_size / sizeof(short);
1964     while (size > 0) {
1965         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1966         if (len > size)
1967             len = size;
1968         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1969         samples += len;
1970         is->sample_array_index += len;
1971         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1972             is->sample_array_index = 0;
1973         size -= len;
1974     }
1975 }
1976
1977 /* return the new audio buffer size (samples can be added or deleted
1978    to get better sync if video or external master clock) */
1979 static int synchronize_audio(VideoState *is, short *samples,
1980                              int samples_size1, double pts)
1981 {
1982     int n, samples_size;
1983     double ref_clock;
1984
1985     n = 2 * is->audio_st->codec->channels;
1986     samples_size = samples_size1;
1987
1988     /* if not master, then we try to remove or add samples to correct the clock */
1989     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1990          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1991         double diff, avg_diff;
1992         int wanted_size, min_size, max_size, nb_samples;
1993
1994         ref_clock = get_master_clock(is);
1995         diff = get_audio_clock(is) - ref_clock;
1996
1997         if (diff < AV_NOSYNC_THRESHOLD) {
1998             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1999             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2000                 /* not enough measures to have a correct estimate */
2001                 is->audio_diff_avg_count++;
2002             } else {
2003                 /* estimate the A-V difference */
2004                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2005
2006                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2007                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2008                     nb_samples = samples_size / n;
2009
2010                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2011                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2012                     if (wanted_size < min_size)
2013                         wanted_size = min_size;
2014                     else if (wanted_size > max_size)
2015                         wanted_size = max_size;
2016
2017                     /* add or remove samples to correction the synchro */
2018                     if (wanted_size < samples_size) {
2019                         /* remove samples */
2020                         samples_size = wanted_size;
2021                     } else if (wanted_size > samples_size) {
2022                         uint8_t *samples_end, *q;
2023                         int nb;
2024
2025                         /* add samples */
2026                         nb = (samples_size - wanted_size);
2027                         samples_end = (uint8_t *)samples + samples_size - n;
2028                         q = samples_end + n;
2029                         while (nb > 0) {
2030                             memcpy(q, samples_end, n);
2031                             q += n;
2032                             nb -= n;
2033                         }
2034                         samples_size = wanted_size;
2035                     }
2036                 }
2037                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2038                         diff, avg_diff, samples_size - samples_size1,
2039                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2040             }
2041         } else {
2042             /* too big difference : may be initial PTS errors, so
2043                reset A-V filter */
2044             is->audio_diff_avg_count = 0;
2045             is->audio_diff_cum = 0;
2046         }
2047     }
2048
2049     return samples_size;
2050 }
2051
2052 /* decode one audio frame and returns its uncompressed size */
2053 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2054 {
2055     AVPacket *pkt_temp = &is->audio_pkt_temp;
2056     AVPacket *pkt = &is->audio_pkt;
2057     AVCodecContext *dec= is->audio_st->codec;
2058     int n, len1, data_size;
2059     double pts;
2060
2061     for(;;) {
2062         /* NOTE: the audio packet can contain several frames */
2063         while (pkt_temp->size > 0) {
2064             data_size = sizeof(is->audio_buf1);
2065             len1 = avcodec_decode_audio3(dec,
2066                                         (int16_t *)is->audio_buf1, &data_size,
2067                                         pkt_temp);
2068             if (len1 < 0) {
2069                 /* if error, we skip the frame */
2070                 pkt_temp->size = 0;
2071                 break;
2072             }
2073
2074             pkt_temp->data += len1;
2075             pkt_temp->size -= len1;
2076             if (data_size <= 0)
2077                 continue;
2078
2079             if (dec->sample_fmt != is->audio_src_fmt) {
2080                 if (is->reformat_ctx)
2081                     av_audio_convert_free(is->reformat_ctx);
2082                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2083                                                          dec->sample_fmt, 1, NULL, 0);
2084                 if (!is->reformat_ctx) {
2085                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2086                         av_get_sample_fmt_name(dec->sample_fmt),
2087                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2088                         break;
2089                 }
2090                 is->audio_src_fmt= dec->sample_fmt;
2091             }
2092
2093             if (is->reformat_ctx) {
2094                 const void *ibuf[6]= {is->audio_buf1};
2095                 void *obuf[6]= {is->audio_buf2};
2096                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2097                 int ostride[6]= {2};
2098                 int len= data_size/istride[0];
2099                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2100                     printf("av_audio_convert() failed\n");
2101                     break;
2102                 }
2103                 is->audio_buf= is->audio_buf2;
2104                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2105                           remove this legacy cruft */
2106                 data_size= len*2;
2107             }else{
2108                 is->audio_buf= is->audio_buf1;
2109             }
2110
2111             /* if no pts, then compute it */
2112             pts = is->audio_clock;
2113             *pts_ptr = pts;
2114             n = 2 * dec->channels;
2115             is->audio_clock += (double)data_size /
2116                 (double)(n * dec->sample_rate);
2117 #if defined(DEBUG_SYNC)
2118             {
2119                 static double last_clock;
2120                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2121                        is->audio_clock - last_clock,
2122                        is->audio_clock, pts);
2123                 last_clock = is->audio_clock;
2124             }
2125 #endif
2126             return data_size;
2127         }
2128
2129         /* free the current packet */
2130         if (pkt->data)
2131             av_free_packet(pkt);
2132
2133         if (is->paused || is->audioq.abort_request) {
2134             return -1;
2135         }
2136
2137         /* read next packet */
2138         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2139             return -1;
2140         if(pkt->data == flush_pkt.data){
2141             avcodec_flush_buffers(dec);
2142             continue;
2143         }
2144
2145         pkt_temp->data = pkt->data;
2146         pkt_temp->size = pkt->size;
2147
2148         /* if update the audio clock with the pts */
2149         if (pkt->pts != AV_NOPTS_VALUE) {
2150             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2151         }
2152     }
2153 }
2154
2155 /* prepare a new audio buffer */
2156 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2157 {
2158     VideoState *is = opaque;
2159     int audio_size, len1;
2160     double pts;
2161
2162     audio_callback_time = av_gettime();
2163
2164     while (len > 0) {
2165         if (is->audio_buf_index >= is->audio_buf_size) {
2166            audio_size = audio_decode_frame(is, &pts);
2167            if (audio_size < 0) {
2168                 /* if error, just output silence */
2169                is->audio_buf = is->audio_buf1;
2170                is->audio_buf_size = 1024;
2171                memset(is->audio_buf, 0, is->audio_buf_size);
2172            } else {
2173                if (is->show_audio)
2174                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2175                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2176                                               pts);
2177                is->audio_buf_size = audio_size;
2178            }
2179            is->audio_buf_index = 0;
2180         }
2181         len1 = is->audio_buf_size - is->audio_buf_index;
2182         if (len1 > len)
2183             len1 = len;
2184         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2185         len -= len1;
2186         stream += len1;
2187         is->audio_buf_index += len1;
2188     }
2189 }
2190
2191 /* open a given stream. Return 0 if OK */
2192 static int stream_component_open(VideoState *is, int stream_index)
2193 {
2194     AVFormatContext *ic = is->ic;
2195     AVCodecContext *avctx;
2196     AVCodec *codec;
2197     SDL_AudioSpec wanted_spec, spec;
2198
2199     if (stream_index < 0 || stream_index >= ic->nb_streams)
2200         return -1;
2201     avctx = ic->streams[stream_index]->codec;
2202
2203     /* prepare audio output */
2204     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2205         if (avctx->channels > 0) {
2206             avctx->request_channels = FFMIN(2, avctx->channels);
2207         } else {
2208             avctx->request_channels = 2;
2209         }
2210     }
2211
2212     codec = avcodec_find_decoder(avctx->codec_id);
2213     avctx->debug_mv = debug_mv;
2214     avctx->debug = debug;
2215     avctx->workaround_bugs = workaround_bugs;
2216     avctx->lowres = lowres;
2217     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2218     avctx->idct_algo= idct;
2219     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2220     avctx->skip_frame= skip_frame;
2221     avctx->skip_idct= skip_idct;
2222     avctx->skip_loop_filter= skip_loop_filter;
2223     avctx->error_recognition= error_recognition;
2224     avctx->error_concealment= error_concealment;
2225     avctx->thread_count= thread_count;
2226
2227     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2228
2229     if (!codec ||
2230         avcodec_open(avctx, codec) < 0)
2231         return -1;
2232
2233     /* prepare audio output */
2234     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2235         wanted_spec.freq = avctx->sample_rate;
2236         wanted_spec.format = AUDIO_S16SYS;
2237         wanted_spec.channels = avctx->channels;
2238         wanted_spec.silence = 0;
2239         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2240         wanted_spec.callback = sdl_audio_callback;
2241         wanted_spec.userdata = is;
2242         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2243             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2244             return -1;
2245         }
2246         is->audio_hw_buf_size = spec.size;
2247         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2248     }
2249
2250     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2251     switch(avctx->codec_type) {
2252     case AVMEDIA_TYPE_AUDIO:
2253         is->audio_stream = stream_index;
2254         is->audio_st = ic->streams[stream_index];
2255         is->audio_buf_size = 0;
2256         is->audio_buf_index = 0;
2257
2258         /* init averaging filter */
2259         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2260         is->audio_diff_avg_count = 0;
2261         /* since we do not have a precise anough audio fifo fullness,
2262            we correct audio sync only if larger than this threshold */
2263         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2264
2265         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2266         packet_queue_init(&is->audioq);
2267         SDL_PauseAudio(0);
2268         break;
2269     case AVMEDIA_TYPE_VIDEO:
2270         is->video_stream = stream_index;
2271         is->video_st = ic->streams[stream_index];
2272
2273 //        is->video_current_pts_time = av_gettime();
2274
2275         packet_queue_init(&is->videoq);
2276         is->video_tid = SDL_CreateThread(video_thread, is);
2277         break;
2278     case AVMEDIA_TYPE_SUBTITLE:
2279         is->subtitle_stream = stream_index;
2280         is->subtitle_st = ic->streams[stream_index];
2281         packet_queue_init(&is->subtitleq);
2282
2283         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2284         break;
2285     default:
2286         break;
2287     }
2288     return 0;
2289 }
2290
2291 static void stream_component_close(VideoState *is, int stream_index)
2292 {
2293     AVFormatContext *ic = is->ic;
2294     AVCodecContext *avctx;
2295
2296     if (stream_index < 0 || stream_index >= ic->nb_streams)
2297         return;
2298     avctx = ic->streams[stream_index]->codec;
2299
2300     switch(avctx->codec_type) {
2301     case AVMEDIA_TYPE_AUDIO:
2302         packet_queue_abort(&is->audioq);
2303
2304         SDL_CloseAudio();
2305
2306         packet_queue_end(&is->audioq);
2307         if (is->reformat_ctx)
2308             av_audio_convert_free(is->reformat_ctx);
2309         is->reformat_ctx = NULL;
2310         break;
2311     case AVMEDIA_TYPE_VIDEO:
2312         packet_queue_abort(&is->videoq);
2313
2314         /* note: we also signal this mutex to make sure we deblock the
2315            video thread in all cases */
2316         SDL_LockMutex(is->pictq_mutex);
2317         SDL_CondSignal(is->pictq_cond);
2318         SDL_UnlockMutex(is->pictq_mutex);
2319
2320         SDL_WaitThread(is->video_tid, NULL);
2321
2322         packet_queue_end(&is->videoq);
2323         break;
2324     case AVMEDIA_TYPE_SUBTITLE:
2325         packet_queue_abort(&is->subtitleq);
2326
2327         /* note: we also signal this mutex to make sure we deblock the
2328            video thread in all cases */
2329         SDL_LockMutex(is->subpq_mutex);
2330         is->subtitle_stream_changed = 1;
2331
2332         SDL_CondSignal(is->subpq_cond);
2333         SDL_UnlockMutex(is->subpq_mutex);
2334
2335         SDL_WaitThread(is->subtitle_tid, NULL);
2336
2337         packet_queue_end(&is->subtitleq);
2338         break;
2339     default:
2340         break;
2341     }
2342
2343     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2344     avcodec_close(avctx);
2345     switch(avctx->codec_type) {
2346     case AVMEDIA_TYPE_AUDIO:
2347         is->audio_st = NULL;
2348         is->audio_stream = -1;
2349         break;
2350     case AVMEDIA_TYPE_VIDEO:
2351         is->video_st = NULL;
2352         is->video_stream = -1;
2353         break;
2354     case AVMEDIA_TYPE_SUBTITLE:
2355         is->subtitle_st = NULL;
2356         is->subtitle_stream = -1;
2357         break;
2358     default:
2359         break;
2360     }
2361 }
2362
2363 /* since we have only one decoding thread, we can use a global
2364    variable instead of a thread local variable */
2365 static VideoState *global_video_state;
2366
2367 static int decode_interrupt_cb(void)
2368 {
2369     return (global_video_state && global_video_state->abort_request);
2370 }
2371
2372 /* this thread gets the stream from the disk or the network */
2373 static int decode_thread(void *arg)
2374 {
2375     VideoState *is = arg;
2376     AVFormatContext *ic;
2377     int err, i, ret;
2378     int st_index[AVMEDIA_TYPE_NB];
2379     AVPacket pkt1, *pkt = &pkt1;
2380     AVFormatParameters params, *ap = &params;
2381     int eof=0;
2382     int pkt_in_play_range = 0;
2383
2384     ic = avformat_alloc_context();
2385
2386     memset(st_index, -1, sizeof(st_index));
2387     is->video_stream = -1;
2388     is->audio_stream = -1;
2389     is->subtitle_stream = -1;
2390
2391     global_video_state = is;
2392     avio_set_interrupt_cb(decode_interrupt_cb);
2393
2394     memset(ap, 0, sizeof(*ap));
2395
2396     ap->prealloced_context = 1;
2397     ap->width = frame_width;
2398     ap->height= frame_height;
2399     ap->time_base= (AVRational){1, 25};
2400     ap->pix_fmt = frame_pix_fmt;
2401
2402     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2403
2404     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2405     if (err < 0) {
2406         print_error(is->filename, err);
2407         ret = -1;
2408         goto fail;
2409     }
2410     is->ic = ic;
2411
2412     if(genpts)
2413         ic->flags |= AVFMT_FLAG_GENPTS;
2414
2415     err = av_find_stream_info(ic);
2416     if (err < 0) {
2417         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2418         ret = -1;
2419         goto fail;
2420     }
2421     if(ic->pb)
2422         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2423
2424     if(seek_by_bytes<0)
2425         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2426
2427     /* if seeking requested, we execute it */
2428     if (start_time != AV_NOPTS_VALUE) {
2429         int64_t timestamp;
2430
2431         timestamp = start_time;
2432         /* add the stream start time */
2433         if (ic->start_time != AV_NOPTS_VALUE)
2434             timestamp += ic->start_time;
2435         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2436         if (ret < 0) {
2437             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2438                     is->filename, (double)timestamp / AV_TIME_BASE);
2439         }
2440     }
2441
2442     for (i = 0; i < ic->nb_streams; i++)
2443         ic->streams[i]->discard = AVDISCARD_ALL;
2444     if (!video_disable)
2445         st_index[AVMEDIA_TYPE_VIDEO] =
2446             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2447                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2448     if (!audio_disable)
2449         st_index[AVMEDIA_TYPE_AUDIO] =
2450             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2451                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2452                                 st_index[AVMEDIA_TYPE_VIDEO],
2453                                 NULL, 0);
2454     if (!video_disable)
2455         st_index[AVMEDIA_TYPE_SUBTITLE] =
2456             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2457                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2458                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2459                                  st_index[AVMEDIA_TYPE_AUDIO] :
2460                                  st_index[AVMEDIA_TYPE_VIDEO]),
2461                                 NULL, 0);
2462     if (show_status) {
2463         av_dump_format(ic, 0, is->filename, 0);
2464     }
2465
2466     /* open the streams */
2467     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2468         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2469     }
2470
2471     ret=-1;
2472     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2473         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2474     }
2475     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2476     if(ret<0) {
2477         if (!display_disable)
2478             is->show_audio = 2;
2479     }
2480
2481     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2482         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2483     }
2484
2485     if (is->video_stream < 0 && is->audio_stream < 0) {
2486         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2487         ret = -1;
2488         goto fail;
2489     }
2490
2491     for(;;) {
2492         if (is->abort_request)
2493             break;
2494         if (is->paused != is->last_paused) {
2495             is->last_paused = is->paused;
2496             if (is->paused)
2497                 is->read_pause_return= av_read_pause(ic);
2498             else
2499                 av_read_play(ic);
2500         }
2501 #if CONFIG_RTSP_DEMUXER
2502         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2503             /* wait 10 ms to avoid trying to get another packet */
2504             /* XXX: horrible */
2505             SDL_Delay(10);
2506             continue;
2507         }
2508 #endif
2509         if (is->seek_req) {
2510             int64_t seek_target= is->seek_pos;
2511             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2512             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2513 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2514 //      of the seek_pos/seek_rel variables
2515
2516             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2517             if (ret < 0) {
2518                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2519             }else{
2520                 if (is->audio_stream >= 0) {
2521                     packet_queue_flush(&is->audioq);
2522                     packet_queue_put(&is->audioq, &flush_pkt);
2523                 }
2524                 if (is->subtitle_stream >= 0) {
2525                     packet_queue_flush(&is->subtitleq);
2526                     packet_queue_put(&is->subtitleq, &flush_pkt);
2527                 }
2528                 if (is->video_stream >= 0) {
2529                     packet_queue_flush(&is->videoq);
2530                     packet_queue_put(&is->videoq, &flush_pkt);
2531                 }
2532             }
2533             is->seek_req = 0;
2534             eof= 0;
2535         }
2536
2537         /* if the queue are full, no need to read more */
2538         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2539             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2540                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2541                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2542             /* wait 10 ms */
2543             SDL_Delay(10);
2544             continue;
2545         }
2546         if(eof) {
2547             if(is->video_stream >= 0){
2548                 av_init_packet(pkt);
2549                 pkt->data=NULL;
2550                 pkt->size=0;
2551                 pkt->stream_index= is->video_stream;
2552                 packet_queue_put(&is->videoq, pkt);
2553             }
2554             SDL_Delay(10);
2555             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2556                 if(loop!=1 && (!loop || --loop)){
2557                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2558                 }else if(autoexit){
2559                     ret=AVERROR_EOF;
2560                     goto fail;
2561                 }
2562             }
2563             continue;
2564         }
2565         ret = av_read_frame(ic, pkt);
2566         if (ret < 0) {
2567             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2568                 eof=1;
2569             if (ic->pb && ic->pb->error)
2570                 break;
2571             SDL_Delay(100); /* wait for user event */
2572             continue;
2573         }
2574         /* check if packet is in play range specified by user, then queue, otherwise discard */
2575         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2576                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2577                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2578                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2579                 <= ((double)duration/1000000);
2580         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2581             packet_queue_put(&is->audioq, pkt);
2582         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2583             packet_queue_put(&is->videoq, pkt);
2584         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->subtitleq, pkt);
2586         } else {
2587             av_free_packet(pkt);
2588         }
2589     }
2590     /* wait until the end */
2591     while (!is->abort_request) {
2592         SDL_Delay(100);
2593     }
2594
2595     ret = 0;
2596  fail:
2597     /* disable interrupting */
2598     global_video_state = NULL;
2599
2600     /* close each stream */
2601     if (is->audio_stream >= 0)
2602         stream_component_close(is, is->audio_stream);
2603     if (is->video_stream >= 0)
2604         stream_component_close(is, is->video_stream);
2605     if (is->subtitle_stream >= 0)
2606         stream_component_close(is, is->subtitle_stream);
2607     if (is->ic) {
2608         av_close_input_file(is->ic);
2609         is->ic = NULL; /* safety */
2610     }
2611     avio_set_interrupt_cb(NULL);
2612
2613     if (ret != 0) {
2614         SDL_Event event;
2615
2616         event.type = FF_QUIT_EVENT;
2617         event.user.data1 = is;
2618         SDL_PushEvent(&event);
2619     }
2620     return 0;
2621 }
2622
2623 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2624 {
2625     VideoState *is;
2626
2627     is = av_mallocz(sizeof(VideoState));
2628     if (!is)
2629         return NULL;
2630     av_strlcpy(is->filename, filename, sizeof(is->filename));
2631     is->iformat = iformat;
2632     is->ytop = 0;
2633     is->xleft = 0;
2634
2635     /* start video display */
2636     is->pictq_mutex = SDL_CreateMutex();
2637     is->pictq_cond = SDL_CreateCond();
2638
2639     is->subpq_mutex = SDL_CreateMutex();
2640     is->subpq_cond = SDL_CreateCond();
2641
2642     is->av_sync_type = av_sync_type;
2643     is->parse_tid = SDL_CreateThread(decode_thread, is);
2644     if (!is->parse_tid) {
2645         av_free(is);
2646         return NULL;
2647     }
2648     return is;
2649 }
2650
2651 static void stream_cycle_channel(VideoState *is, int codec_type)
2652 {
2653     AVFormatContext *ic = is->ic;
2654     int start_index, stream_index;
2655     AVStream *st;
2656
2657     if (codec_type == AVMEDIA_TYPE_VIDEO)
2658         start_index = is->video_stream;
2659     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2660         start_index = is->audio_stream;
2661     else
2662         start_index = is->subtitle_stream;
2663     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2664         return;
2665     stream_index = start_index;
2666     for(;;) {
2667         if (++stream_index >= is->ic->nb_streams)
2668         {
2669             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2670             {
2671                 stream_index = -1;
2672                 goto the_end;
2673             } else
2674                 stream_index = 0;
2675         }
2676         if (stream_index == start_index)
2677             return;
2678         st = ic->streams[stream_index];
2679         if (st->codec->codec_type == codec_type) {
2680             /* check that parameters are OK */
2681             switch(codec_type) {
2682             case AVMEDIA_TYPE_AUDIO:
2683                 if (st->codec->sample_rate != 0 &&
2684                     st->codec->channels != 0)
2685                     goto the_end;
2686                 break;
2687             case AVMEDIA_TYPE_VIDEO:
2688             case AVMEDIA_TYPE_SUBTITLE:
2689                 goto the_end;
2690             default:
2691                 break;
2692             }
2693         }
2694     }
2695  the_end:
2696     stream_component_close(is, start_index);
2697     stream_component_open(is, stream_index);
2698 }
2699
2700
2701 static void toggle_full_screen(void)
2702 {
2703     is_full_screen = !is_full_screen;
2704     if (!fs_screen_width) {
2705         /* use default SDL method */
2706 //        SDL_WM_ToggleFullScreen(screen);
2707     }
2708     video_open(cur_stream);
2709 }
2710
2711 static void toggle_pause(void)
2712 {
2713     if (cur_stream)
2714         stream_pause(cur_stream);
2715     step = 0;
2716 }
2717
2718 static void step_to_next_frame(void)
2719 {
2720     if (cur_stream) {
2721         /* if the stream is paused unpause it, then step */
2722         if (cur_stream->paused)
2723             stream_pause(cur_stream);
2724     }
2725     step = 1;
2726 }
2727
2728 static void toggle_audio_display(void)
2729 {
2730     if (cur_stream) {
2731         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2732         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2733         fill_rectangle(screen,
2734                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2735                     bgcolor);
2736         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2737     }
2738 }
2739
2740 /* handle an event sent by the GUI */
2741 static void event_loop(void)
2742 {
2743     SDL_Event event;
2744     double incr, pos, frac;
2745
2746     for(;;) {
2747         double x;
2748         SDL_WaitEvent(&event);
2749         switch(event.type) {
2750         case SDL_KEYDOWN:
2751             if (exit_on_keydown) {
2752                 do_exit();
2753                 break;
2754             }
2755             switch(event.key.keysym.sym) {
2756             case SDLK_ESCAPE:
2757             case SDLK_q:
2758                 do_exit();
2759                 break;
2760             case SDLK_f:
2761                 toggle_full_screen();
2762                 break;
2763             case SDLK_p:
2764             case SDLK_SPACE:
2765                 toggle_pause();
2766                 break;
2767             case SDLK_s: //S: Step to next frame
2768                 step_to_next_frame();
2769                 break;
2770             case SDLK_a:
2771                 if (cur_stream)
2772                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2773                 break;
2774             case SDLK_v:
2775                 if (cur_stream)
2776                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2777                 break;
2778             case SDLK_t:
2779                 if (cur_stream)
2780                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2781                 break;
2782             case SDLK_w:
2783                 toggle_audio_display();
2784                 break;
2785             case SDLK_LEFT:
2786                 incr = -10.0;
2787                 goto do_seek;
2788             case SDLK_RIGHT:
2789                 incr = 10.0;
2790                 goto do_seek;
2791             case SDLK_UP:
2792                 incr = 60.0;
2793                 goto do_seek;
2794             case SDLK_DOWN:
2795                 incr = -60.0;
2796             do_seek:
2797                 if (cur_stream) {
2798                     if (seek_by_bytes) {
2799                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2800                             pos= cur_stream->video_current_pos;
2801                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2802                             pos= cur_stream->audio_pkt.pos;
2803                         }else
2804                             pos = avio_tell(cur_stream->ic->pb);
2805                         if (cur_stream->ic->bit_rate)
2806                             incr *= cur_stream->ic->bit_rate / 8.0;
2807                         else
2808                             incr *= 180000.0;
2809                         pos += incr;
2810                         stream_seek(cur_stream, pos, incr, 1);
2811                     } else {
2812                         pos = get_master_clock(cur_stream);
2813                         pos += incr;
2814                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2815                     }
2816                 }
2817                 break;
2818             default:
2819                 break;
2820             }
2821             break;
2822         case SDL_MOUSEBUTTONDOWN:
2823             if (exit_on_mousedown) {
2824                 do_exit();
2825                 break;
2826             }
2827         case SDL_MOUSEMOTION:
2828             if(event.type ==SDL_MOUSEBUTTONDOWN){
2829                 x= event.button.x;
2830             }else{
2831                 if(event.motion.state != SDL_PRESSED)
2832                     break;
2833                 x= event.motion.x;
2834             }
2835             if (cur_stream) {
2836                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2837                     uint64_t size=  avio_size(cur_stream->ic->pb);
2838                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2839                 }else{
2840                     int64_t ts;
2841                     int ns, hh, mm, ss;
2842                     int tns, thh, tmm, tss;
2843                     tns = cur_stream->ic->duration/1000000LL;
2844                     thh = tns/3600;
2845                     tmm = (tns%3600)/60;
2846                     tss = (tns%60);
2847                     frac = x/cur_stream->width;
2848                     ns = frac*tns;
2849                     hh = ns/3600;
2850                     mm = (ns%3600)/60;
2851                     ss = (ns%60);
2852                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2853                             hh, mm, ss, thh, tmm, tss);
2854                     ts = frac*cur_stream->ic->duration;
2855                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2856                         ts += cur_stream->ic->start_time;
2857                     stream_seek(cur_stream, ts, 0, 0);
2858                 }
2859             }
2860             break;
2861         case SDL_VIDEORESIZE:
2862             if (cur_stream) {
2863                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2864                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2865                 screen_width = cur_stream->width = event.resize.w;
2866                 screen_height= cur_stream->height= event.resize.h;
2867             }
2868             break;
2869         case SDL_QUIT:
2870         case FF_QUIT_EVENT:
2871             do_exit();
2872             break;
2873         case FF_ALLOC_EVENT:
2874             video_open(event.user.data1);
2875             alloc_picture(event.user.data1);
2876             break;
2877         case FF_REFRESH_EVENT:
2878             video_refresh_timer(event.user.data1);
2879             cur_stream->refresh=0;
2880             break;
2881         default:
2882             break;
2883         }
2884     }
2885 }
2886
2887 static void opt_frame_size(const char *arg)
2888 {
2889     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2890         fprintf(stderr, "Incorrect frame size\n");
2891         exit(1);
2892     }
2893     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2894         fprintf(stderr, "Frame size must be a multiple of 2\n");
2895         exit(1);
2896     }
2897 }
2898
2899 static int opt_width(const char *opt, const char *arg)
2900 {
2901     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2902     return 0;
2903 }
2904
2905 static int opt_height(const char *opt, const char *arg)
2906 {
2907     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2908     return 0;
2909 }
2910
2911 static void opt_format(const char *arg)
2912 {
2913     file_iformat = av_find_input_format(arg);
2914     if (!file_iformat) {
2915         fprintf(stderr, "Unknown input format: %s\n", arg);
2916         exit(1);
2917     }
2918 }
2919
2920 static void opt_frame_pix_fmt(const char *arg)
2921 {
2922     frame_pix_fmt = av_get_pix_fmt(arg);
2923 }
2924
2925 static int opt_sync(const char *opt, const char *arg)
2926 {
2927     if (!strcmp(arg, "audio"))
2928         av_sync_type = AV_SYNC_AUDIO_MASTER;
2929     else if (!strcmp(arg, "video"))
2930         av_sync_type = AV_SYNC_VIDEO_MASTER;
2931     else if (!strcmp(arg, "ext"))
2932         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2933     else {
2934         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2935         exit(1);
2936     }
2937     return 0;
2938 }
2939
2940 static int opt_seek(const char *opt, const char *arg)
2941 {
2942     start_time = parse_time_or_die(opt, arg, 1);
2943     return 0;
2944 }
2945
2946 static int opt_duration(const char *opt, const char *arg)
2947 {
2948     duration = parse_time_or_die(opt, arg, 1);
2949     return 0;
2950 }
2951
2952 static int opt_debug(const char *opt, const char *arg)
2953 {
2954     av_log_set_level(99);
2955     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2956     return 0;
2957 }
2958
2959 static int opt_vismv(const char *opt, const char *arg)
2960 {
2961     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2962     return 0;
2963 }
2964
2965 static int opt_thread_count(const char *opt, const char *arg)
2966 {
2967     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2968 #if !HAVE_THREADS
2969     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2970 #endif
2971     return 0;
2972 }
2973
2974 static const OptionDef options[] = {
2975 #include "cmdutils_common_opts.h"
2976     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2977     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2978     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2979     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2980     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2981     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2982     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2983     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2984     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2985     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2986     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2987     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2988     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2989     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2990     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2991     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2992     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2993     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2994     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2995     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2996     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2997     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2998     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2999     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3000     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3001     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3002     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3003     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3004     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3005     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3006     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3007     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3008     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3009     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3010     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3011     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3012     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3013 #if CONFIG_AVFILTER
3014     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3015 #endif
3016     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3017     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3018     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
3019     { NULL, },
3020 };
3021
3022 static void show_usage(void)
3023 {
3024     printf("Simple media player\n");
3025     printf("usage: ffplay [options] input_file\n");
3026     printf("\n");
3027 }
3028
3029 static void show_help(void)
3030 {
3031     av_log_set_callback(log_callback_help);
3032     show_usage();
3033     show_help_options(options, "Main options:\n",
3034                       OPT_EXPERT, 0);
3035     show_help_options(options, "\nAdvanced options:\n",
3036                       OPT_EXPERT, OPT_EXPERT);
3037     printf("\n");
3038     av_opt_show2(avcodec_opts[0], NULL,
3039                  AV_OPT_FLAG_DECODING_PARAM, 0);
3040     printf("\n");
3041     av_opt_show2(avformat_opts, NULL,
3042                  AV_OPT_FLAG_DECODING_PARAM, 0);
3043 #if !CONFIG_AVFILTER
3044     printf("\n");
3045     av_opt_show2(sws_opts, NULL,
3046                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3047 #endif
3048     printf("\nWhile playing:\n"
3049            "q, ESC              quit\n"
3050            "f                   toggle full screen\n"
3051            "p, SPC              pause\n"
3052            "a                   cycle audio channel\n"
3053            "v                   cycle video channel\n"
3054            "t                   cycle subtitle channel\n"
3055            "w                   show audio waves\n"
3056            "s                   activate frame-step mode\n"
3057            "left/right          seek backward/forward 10 seconds\n"
3058            "down/up             seek backward/forward 1 minute\n"
3059            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3060            );
3061 }
3062
3063 static void opt_input_file(const char *filename)
3064 {
3065     if (input_filename) {
3066         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3067                 filename, input_filename);
3068         exit(1);
3069     }
3070     if (!strcmp(filename, "-"))
3071         filename = "pipe:";
3072     input_filename = filename;
3073 }
3074
3075 /* Called from the main */
3076 int main(int argc, char **argv)
3077 {
3078     int flags;
3079
3080     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3081
3082     /* register all codecs, demux and protocols */
3083     avcodec_register_all();
3084 #if CONFIG_AVDEVICE
3085     avdevice_register_all();
3086 #endif
3087 #if CONFIG_AVFILTER
3088     avfilter_register_all();
3089 #endif
3090     av_register_all();
3091
3092     init_opts();
3093
3094     show_banner();
3095
3096     parse_options(argc, argv, options, opt_input_file);
3097
3098     if (!input_filename) {
3099         show_usage();
3100         fprintf(stderr, "An input file must be specified\n");
3101         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3102         exit(1);
3103     }
3104
3105     if (display_disable) {
3106         video_disable = 1;
3107     }
3108     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3109 #if !defined(__MINGW32__) && !defined(__APPLE__)
3110     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3111 #endif
3112     if (SDL_Init (flags)) {
3113         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3114         exit(1);
3115     }
3116
3117     if (!display_disable) {
3118 #if HAVE_SDL_VIDEO_SIZE
3119         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3120         fs_screen_width = vi->current_w;
3121         fs_screen_height = vi->current_h;
3122 #endif
3123     }
3124
3125     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3126     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3127     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3128
3129     av_init_packet(&flush_pkt);
3130     flush_pkt.data= "FLUSH";
3131
3132     cur_stream = stream_open(input_filename, file_iformat);
3133
3134     event_loop();
3135
3136     /* never returns */
3137
3138     return 0;
3139 }