]> git.sesse.net Git - ffmpeg/blob - avplay.c
doc: update documentation to use avconv
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     int reallocate;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
157     uint8_t *audio_buf;
158     uint8_t *audio_buf1;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat audio_src_fmt;
164     AVAudioConvert *reformat_ctx;
165     AVFrame *frame;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     //    QETimer *video_timer;
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208     PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212 #endif
213
214     float skip_frames;
215     float skip_frames_index;
216     int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int debug = 0;
243 static int debug_mv = 0;
244 static int step = 0;
245 static int thread_count = 1;
246 static int workaround_bugs = 1;
247 static int fast = 0;
248 static int genpts = 0;
249 static int lowres = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
254 static int error_recognition = FF_ER_CAREFUL;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts= -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop=1;
261 static int framedrop=1;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 void exit_program(int ret)
282 {
283     exit(ret);
284 }
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291     memset(q, 0, sizeof(PacketQueue));
292     q->mutex = SDL_CreateMutex();
293     q->cond = SDL_CreateCond();
294     packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299     AVPacketList *pkt, *pkt1;
300
301     SDL_LockMutex(q->mutex);
302     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303         pkt1 = pkt->next;
304         av_free_packet(&pkt->pkt);
305         av_freep(&pkt);
306     }
307     q->last_pkt = NULL;
308     q->first_pkt = NULL;
309     q->nb_packets = 0;
310     q->size = 0;
311     SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316     packet_queue_flush(q);
317     SDL_DestroyMutex(q->mutex);
318     SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323     AVPacketList *pkt1;
324
325     /* duplicate the packet */
326     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327         return -1;
328
329     pkt1 = av_malloc(sizeof(AVPacketList));
330     if (!pkt1)
331         return -1;
332     pkt1->pkt = *pkt;
333     pkt1->next = NULL;
334
335
336     SDL_LockMutex(q->mutex);
337
338     if (!q->last_pkt)
339
340         q->first_pkt = pkt1;
341     else
342         q->last_pkt->next = pkt1;
343     q->last_pkt = pkt1;
344     q->nb_packets++;
345     q->size += pkt1->pkt.size + sizeof(*pkt1);
346     /* XXX: should duplicate packet data in DV case */
347     SDL_CondSignal(q->cond);
348
349     SDL_UnlockMutex(q->mutex);
350     return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for(;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #define ALPHA_BLEND(a, oldp, newp, s)\
412 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
413
414 #define RGBA_IN(r, g, b, a, s)\
415 {\
416     unsigned int v = ((const uint32_t *)(s))[0];\
417     a = (v >> 24) & 0xff;\
418     r = (v >> 16) & 0xff;\
419     g = (v >> 8) & 0xff;\
420     b = v & 0xff;\
421 }
422
423 #define YUVA_IN(y, u, v, a, s, pal)\
424 {\
425     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
426     a = (val >> 24) & 0xff;\
427     y = (val >> 16) & 0xff;\
428     u = (val >> 8) & 0xff;\
429     v = val & 0xff;\
430 }
431
432 #define YUVA_OUT(d, y, u, v, a)\
433 {\
434     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
435 }
436
437
438 #define BPP 1
439
440 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
441 {
442     int wrap, wrap3, width2, skip2;
443     int y, u, v, a, u1, v1, a1, w, h;
444     uint8_t *lum, *cb, *cr;
445     const uint8_t *p;
446     const uint32_t *pal;
447     int dstx, dsty, dstw, dsth;
448
449     dstw = av_clip(rect->w, 0, imgw);
450     dsth = av_clip(rect->h, 0, imgh);
451     dstx = av_clip(rect->x, 0, imgw - dstw);
452     dsty = av_clip(rect->y, 0, imgh - dsth);
453     lum = dst->data[0] + dsty * dst->linesize[0];
454     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
455     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
456
457     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
458     skip2 = dstx >> 1;
459     wrap = dst->linesize[0];
460     wrap3 = rect->pict.linesize[0];
461     p = rect->pict.data[0];
462     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
463
464     if (dsty & 1) {
465         lum += dstx;
466         cb += skip2;
467         cr += skip2;
468
469         if (dstx & 1) {
470             YUVA_IN(y, u, v, a, p, pal);
471             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
472             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
473             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
474             cb++;
475             cr++;
476             lum++;
477             p += BPP;
478         }
479         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
480             YUVA_IN(y, u, v, a, p, pal);
481             u1 = u;
482             v1 = v;
483             a1 = a;
484             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
485
486             YUVA_IN(y, u, v, a, p + BPP, pal);
487             u1 += u;
488             v1 += v;
489             a1 += a;
490             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
491             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
492             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
493             cb++;
494             cr++;
495             p += 2 * BPP;
496             lum += 2;
497         }
498         if (w) {
499             YUVA_IN(y, u, v, a, p, pal);
500             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
501             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
502             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
503             p++;
504             lum++;
505         }
506         p += wrap3 - dstw * BPP;
507         lum += wrap - dstw - dstx;
508         cb += dst->linesize[1] - width2 - skip2;
509         cr += dst->linesize[2] - width2 - skip2;
510     }
511     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
512         lum += dstx;
513         cb += skip2;
514         cr += skip2;
515
516         if (dstx & 1) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             p += wrap3;
523             lum += wrap;
524             YUVA_IN(y, u, v, a, p, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += -wrap3 + BPP;
534             lum += -wrap + 1;
535         }
536         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
537             YUVA_IN(y, u, v, a, p, pal);
538             u1 = u;
539             v1 = v;
540             a1 = a;
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542
543             YUVA_IN(y, u, v, a, p + BPP, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
548             p += wrap3;
549             lum += wrap;
550
551             YUVA_IN(y, u, v, a, p, pal);
552             u1 += u;
553             v1 += v;
554             a1 += a;
555             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556
557             YUVA_IN(y, u, v, a, p + BPP, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
562
563             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
564             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
565
566             cb++;
567             cr++;
568             p += -wrap3 + 2 * BPP;
569             lum += -wrap + 2;
570         }
571         if (w) {
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 = u;
574             v1 = v;
575             a1 = a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             p += wrap3;
578             lum += wrap;
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
585             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
586             cb++;
587             cr++;
588             p += -wrap3 + BPP;
589             lum += -wrap + 1;
590         }
591         p += wrap3 + (wrap3 - dstw * BPP);
592         lum += wrap + (wrap - dstw - dstx);
593         cb += dst->linesize[1] - width2 - skip2;
594         cr += dst->linesize[2] - width2 - skip2;
595     }
596     /* handle odd height */
597     if (h) {
598         lum += dstx;
599         cb += skip2;
600         cr += skip2;
601
602         if (dstx & 1) {
603             YUVA_IN(y, u, v, a, p, pal);
604             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
605             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
606             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
607             cb++;
608             cr++;
609             lum++;
610             p += BPP;
611         }
612         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618
619             YUVA_IN(y, u, v, a, p + BPP, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
626             cb++;
627             cr++;
628             p += 2 * BPP;
629             lum += 2;
630         }
631         if (w) {
632             YUVA_IN(y, u, v, a, p, pal);
633             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
634             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
635             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636         }
637     }
638 }
639
640 static void free_subpicture(SubPicture *sp)
641 {
642     avsubtitle_free(&sp->sub);
643 }
644
645 static void video_image_display(VideoState *is)
646 {
647     VideoPicture *vp;
648     SubPicture *sp;
649     AVPicture pict;
650     float aspect_ratio;
651     int width, height, x, y;
652     SDL_Rect rect;
653     int i;
654
655     vp = &is->pictq[is->pictq_rindex];
656     if (vp->bmp) {
657 #if CONFIG_AVFILTER
658          if (vp->picref->video->pixel_aspect.num == 0)
659              aspect_ratio = 0;
660          else
661              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
662 #else
663
664         /* XXX: use variable in the frame */
665         if (is->video_st->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667         else if (is->video_st->codec->sample_aspect_ratio.num)
668             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669         else
670             aspect_ratio = 0;
671 #endif
672         if (aspect_ratio <= 0.0)
673             aspect_ratio = 1.0;
674         aspect_ratio *= (float)vp->width / (float)vp->height;
675
676         if (is->subtitle_st)
677         {
678             if (is->subpq_size > 0)
679             {
680                 sp = &is->subpq[is->subpq_rindex];
681
682                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
683                 {
684                     SDL_LockYUVOverlay (vp->bmp);
685
686                     pict.data[0] = vp->bmp->pixels[0];
687                     pict.data[1] = vp->bmp->pixels[2];
688                     pict.data[2] = vp->bmp->pixels[1];
689
690                     pict.linesize[0] = vp->bmp->pitches[0];
691                     pict.linesize[1] = vp->bmp->pitches[2];
692                     pict.linesize[2] = vp->bmp->pitches[1];
693
694                     for (i = 0; i < sp->sub.num_rects; i++)
695                         blend_subrect(&pict, sp->sub.rects[i],
696                                       vp->bmp->w, vp->bmp->h);
697
698                     SDL_UnlockYUVOverlay (vp->bmp);
699                 }
700             }
701         }
702
703
704         /* XXX: we suppose the screen has a 1.0 pixel ratio */
705         height = is->height;
706         width = ((int)rint(height * aspect_ratio)) & ~1;
707         if (width > is->width) {
708             width = is->width;
709             height = ((int)rint(width / aspect_ratio)) & ~1;
710         }
711         x = (is->width - width) / 2;
712         y = (is->height - height) / 2;
713         is->no_background = 0;
714         rect.x = is->xleft + x;
715         rect.y = is->ytop  + y;
716         rect.w = width;
717         rect.h = height;
718         SDL_DisplayYUVOverlay(vp->bmp, &rect);
719     }
720 }
721
722 /* get the current audio output buffer size, in samples. With SDL, we
723    cannot have a precise information */
724 static int audio_write_get_buf_size(VideoState *is)
725 {
726     return is->audio_buf_size - is->audio_buf_index;
727 }
728
729 static inline int compute_mod(int a, int b)
730 {
731     a = a % b;
732     if (a >= 0)
733         return a;
734     else
735         return a + b;
736 }
737
738 static void video_audio_display(VideoState *s)
739 {
740     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
741     int ch, channels, h, h2, bgcolor, fgcolor;
742     int16_t time_diff;
743     int rdft_bits, nb_freq;
744
745     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
746         ;
747     nb_freq= 1<<(rdft_bits-1);
748
749     /* compute display index : center on currently output samples */
750     channels = s->audio_st->codec->channels;
751     nb_display_channels = channels;
752     if (!s->paused) {
753         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
754         n = 2 * channels;
755         delay = audio_write_get_buf_size(s);
756         delay /= n;
757
758         /* to be more precise, we take into account the time spent since
759            the last buffer computation */
760         if (audio_callback_time) {
761             time_diff = av_gettime() - audio_callback_time;
762             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
763         }
764
765         delay += 2*data_used;
766         if (delay < data_used)
767             delay = data_used;
768
769         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
770         if(s->show_audio==1){
771             h= INT_MIN;
772             for(i=0; i<1000; i+=channels){
773                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
774                 int a= s->sample_array[idx];
775                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
776                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
777                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
778                 int score= a-d;
779                 if(h<score && (b^c)<0){
780                     h= score;
781                     i_start= idx;
782                 }
783             }
784         }
785
786         s->last_i_start = i_start;
787     } else {
788         i_start = s->last_i_start;
789     }
790
791     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
792     if(s->show_audio==1){
793         fill_rectangle(screen,
794                        s->xleft, s->ytop, s->width, s->height,
795                        bgcolor);
796
797         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
798
799         /* total height for one channel */
800         h = s->height / nb_display_channels;
801         /* graph height / 2 */
802         h2 = (h * 9) / 20;
803         for(ch = 0;ch < nb_display_channels; ch++) {
804             i = i_start + ch;
805             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
806             for(x = 0; x < s->width; x++) {
807                 y = (s->sample_array[i] * h2) >> 15;
808                 if (y < 0) {
809                     y = -y;
810                     ys = y1 - y;
811                 } else {
812                     ys = y1;
813                 }
814                 fill_rectangle(screen,
815                                s->xleft + x, ys, 1, y,
816                                fgcolor);
817                 i += channels;
818                 if (i >= SAMPLE_ARRAY_SIZE)
819                     i -= SAMPLE_ARRAY_SIZE;
820             }
821         }
822
823         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
824
825         for(ch = 1;ch < nb_display_channels; ch++) {
826             y = s->ytop + ch * h;
827             fill_rectangle(screen,
828                            s->xleft, y, s->width, 1,
829                            fgcolor);
830         }
831         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
832     }else{
833         nb_display_channels= FFMIN(nb_display_channels, 2);
834         if(rdft_bits != s->rdft_bits){
835             av_rdft_end(s->rdft);
836             av_free(s->rdft_data);
837             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
838             s->rdft_bits= rdft_bits;
839             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
840         }
841         {
842             FFTSample *data[2];
843             for(ch = 0;ch < nb_display_channels; ch++) {
844                 data[ch] = s->rdft_data + 2*nb_freq*ch;
845                 i = i_start + ch;
846                 for(x = 0; x < 2*nb_freq; x++) {
847                     double w= (x-nb_freq)*(1.0/nb_freq);
848                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
849                     i += channels;
850                     if (i >= SAMPLE_ARRAY_SIZE)
851                         i -= SAMPLE_ARRAY_SIZE;
852                 }
853                 av_rdft_calc(s->rdft, data[ch]);
854             }
855             //least efficient way to do this, we should of course directly access it but its more than fast enough
856             for(y=0; y<s->height; y++){
857                 double w= 1/sqrt(nb_freq);
858                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
859                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
860                        + data[1][2*y+1]*data[1][2*y+1])) : a;
861                 a= FFMIN(a,255);
862                 b= FFMIN(b,255);
863                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
864
865                 fill_rectangle(screen,
866                             s->xpos, s->height-y, 1, 1,
867                             fgcolor);
868             }
869         }
870         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
871         s->xpos++;
872         if(s->xpos >= s->width)
873             s->xpos= s->xleft;
874     }
875 }
876
877 static int video_open(VideoState *is){
878     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
879     int w,h;
880
881     if(is_full_screen) flags |= SDL_FULLSCREEN;
882     else               flags |= SDL_RESIZABLE;
883
884     if (is_full_screen && fs_screen_width) {
885         w = fs_screen_width;
886         h = fs_screen_height;
887     } else if(!is_full_screen && screen_width){
888         w = screen_width;
889         h = screen_height;
890 #if CONFIG_AVFILTER
891     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
892         w = is->out_video_filter->inputs[0]->w;
893         h = is->out_video_filter->inputs[0]->h;
894 #else
895     }else if (is->video_st && is->video_st->codec->width){
896         w = is->video_st->codec->width;
897         h = is->video_st->codec->height;
898 #endif
899     } else {
900         w = 640;
901         h = 480;
902     }
903     if(screen && is->width == screen->w && screen->w == w
904        && is->height== screen->h && screen->h == h)
905         return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911     screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913     if (!screen) {
914         fprintf(stderr, "SDL: could not set video mode - exiting\n");
915         return -1;
916     }
917     if (!window_title)
918         window_title = input_filename;
919     SDL_WM_SetCaption(window_title, window_title);
920
921     is->width = screen->w;
922     is->height = screen->h;
923
924     return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930     if(!screen)
931         video_open(cur_stream);
932     if (is->audio_st && is->show_audio)
933         video_audio_display(is);
934     else if (is->video_st)
935         video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940     VideoState *is= opaque;
941     while(!is->abort_request){
942         SDL_Event event;
943         event.type = FF_REFRESH_EVENT;
944         event.user.data1 = opaque;
945         if(!is->refresh){
946             is->refresh=1;
947             SDL_PushEvent(&event);
948         }
949         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950     }
951     return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957     double pts;
958     int hw_buf_size, bytes_per_sec;
959     pts = is->audio_clock;
960     hw_buf_size = audio_write_get_buf_size(is);
961     bytes_per_sec = 0;
962     if (is->audio_st) {
963         bytes_per_sec = is->audio_st->codec->sample_rate *
964             2 * is->audio_st->codec->channels;
965     }
966     if (bytes_per_sec)
967         pts -= (double)hw_buf_size / bytes_per_sec;
968     return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974     if (is->paused) {
975         return is->video_current_pts;
976     } else {
977         return is->video_current_pts_drift + av_gettime() / 1000000.0;
978     }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984     int64_t ti;
985     ti = av_gettime();
986     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992     double val;
993
994     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995         if (is->video_st)
996             val = get_video_clock(is);
997         else
998             val = get_audio_clock(is);
999     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000         if (is->audio_st)
1001             val = get_audio_clock(is);
1002         else
1003             val = get_video_clock(is);
1004     } else {
1005         val = get_external_clock(is);
1006     }
1007     return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013     if (!is->seek_req) {
1014         is->seek_pos = pos;
1015         is->seek_rel = rel;
1016         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017         if (seek_by_bytes)
1018             is->seek_flags |= AVSEEK_FLAG_BYTE;
1019         is->seek_req = 1;
1020     }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026     if (is->paused) {
1027         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028         if(is->read_pause_return != AVERROR(ENOSYS)){
1029             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1030         }
1031         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1032     }
1033     is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038     double delay, sync_threshold, diff;
1039
1040     /* compute nominal delay */
1041     delay = frame_current_pts - is->frame_last_pts;
1042     if (delay <= 0 || delay >= 10.0) {
1043         /* if incorrect delay, use previous one */
1044         delay = is->frame_last_delay;
1045     } else {
1046         is->frame_last_delay = delay;
1047     }
1048     is->frame_last_pts = frame_current_pts;
1049
1050     /* update delay to follow master synchronisation source */
1051     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053         /* if video is slave, we try to correct big delays by
1054            duplicating or deleting a frame */
1055         diff = get_video_clock(is) - get_master_clock(is);
1056
1057         /* skip or repeat frame. We take into account the
1058            delay to compute the threshold. I still don't know
1059            if it is the best guess */
1060         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062             if (diff <= -sync_threshold)
1063                 delay = 0;
1064             else if (diff >= sync_threshold)
1065                 delay = 2 * delay;
1066         }
1067     }
1068     is->frame_timer += delay;
1069
1070     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071             delay, frame_current_pts, -diff);
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             //nothing to do, no picture to display in the que
1088         } else {
1089             double time= av_gettime()/1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if(time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if(is->pictq_size > 1){
1101                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             }else{
1105                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1106             }
1107             if(framedrop && time > next_target){
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if(is->pictq_size > 1 || time > next_target + 0.5){
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if(is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1211             fflush(stdout);
1212             last_time = cur_time;
1213         }
1214     }
1215 }
1216
1217 static void stream_close(VideoState *is)
1218 {
1219     VideoPicture *vp;
1220     int i;
1221     /* XXX: use a special url_shutdown call to abort parse cleanly */
1222     is->abort_request = 1;
1223     SDL_WaitThread(is->parse_tid, NULL);
1224     SDL_WaitThread(is->refresh_tid, NULL);
1225
1226     /* free all pictures */
1227     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1228         vp = &is->pictq[i];
1229 #if CONFIG_AVFILTER
1230         if (vp->picref) {
1231             avfilter_unref_buffer(vp->picref);
1232             vp->picref = NULL;
1233         }
1234 #endif
1235         if (vp->bmp) {
1236             SDL_FreeYUVOverlay(vp->bmp);
1237             vp->bmp = NULL;
1238         }
1239     }
1240     SDL_DestroyMutex(is->pictq_mutex);
1241     SDL_DestroyCond(is->pictq_cond);
1242     SDL_DestroyMutex(is->subpq_mutex);
1243     SDL_DestroyCond(is->subpq_cond);
1244 #if !CONFIG_AVFILTER
1245     if (is->img_convert_ctx)
1246         sws_freeContext(is->img_convert_ctx);
1247 #endif
1248     av_free(is);
1249 }
1250
1251 static void do_exit(void)
1252 {
1253     if (cur_stream) {
1254         stream_close(cur_stream);
1255         cur_stream = NULL;
1256     }
1257     uninit_opts();
1258 #if CONFIG_AVFILTER
1259     avfilter_uninit();
1260 #endif
1261     avformat_network_deinit();
1262     if (show_status)
1263         printf("\n");
1264     SDL_Quit();
1265     av_log(NULL, AV_LOG_QUIET, "");
1266     exit(0);
1267 }
1268
1269 /* allocate a picture (needs to do that in main thread to avoid
1270    potential locking problems */
1271 static void alloc_picture(void *opaque)
1272 {
1273     VideoState *is = opaque;
1274     VideoPicture *vp;
1275
1276     vp = &is->pictq[is->pictq_windex];
1277
1278     if (vp->bmp)
1279         SDL_FreeYUVOverlay(vp->bmp);
1280
1281 #if CONFIG_AVFILTER
1282     if (vp->picref)
1283         avfilter_unref_buffer(vp->picref);
1284     vp->picref = NULL;
1285
1286     vp->width   = is->out_video_filter->inputs[0]->w;
1287     vp->height  = is->out_video_filter->inputs[0]->h;
1288     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1289 #else
1290     vp->width   = is->video_st->codec->width;
1291     vp->height  = is->video_st->codec->height;
1292     vp->pix_fmt = is->video_st->codec->pix_fmt;
1293 #endif
1294
1295     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1296                                    SDL_YV12_OVERLAY,
1297                                    screen);
1298     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1299         /* SDL allocates a buffer smaller than requested if the video
1300          * overlay hardware is unable to support the requested size. */
1301         fprintf(stderr, "Error: the video system does not support an image\n"
1302                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1303                         "to reduce the image size.\n", vp->width, vp->height );
1304         do_exit();
1305     }
1306
1307     SDL_LockMutex(is->pictq_mutex);
1308     vp->allocated = 1;
1309     SDL_CondSignal(is->pictq_cond);
1310     SDL_UnlockMutex(is->pictq_mutex);
1311 }
1312
1313 /**
1314  *
1315  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1316  */
1317 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1318 {
1319     VideoPicture *vp;
1320 #if CONFIG_AVFILTER
1321     AVPicture pict_src;
1322 #else
1323     int dst_pix_fmt = PIX_FMT_YUV420P;
1324 #endif
1325     /* wait until we have space to put a new picture */
1326     SDL_LockMutex(is->pictq_mutex);
1327
1328     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1329         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1330
1331     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1332            !is->videoq.abort_request) {
1333         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1334     }
1335     SDL_UnlockMutex(is->pictq_mutex);
1336
1337     if (is->videoq.abort_request)
1338         return -1;
1339
1340     vp = &is->pictq[is->pictq_windex];
1341
1342     /* alloc or resize hardware picture buffer */
1343     if (!vp->bmp || vp->reallocate ||
1344 #if CONFIG_AVFILTER
1345         vp->width  != is->out_video_filter->inputs[0]->w ||
1346         vp->height != is->out_video_filter->inputs[0]->h) {
1347 #else
1348         vp->width != is->video_st->codec->width ||
1349         vp->height != is->video_st->codec->height) {
1350 #endif
1351         SDL_Event event;
1352
1353         vp->allocated  = 0;
1354         vp->reallocate = 0;
1355
1356         /* the allocation must be done in the main thread to avoid
1357            locking problems */
1358         event.type = FF_ALLOC_EVENT;
1359         event.user.data1 = is;
1360         SDL_PushEvent(&event);
1361
1362         /* wait until the picture is allocated */
1363         SDL_LockMutex(is->pictq_mutex);
1364         while (!vp->allocated && !is->videoq.abort_request) {
1365             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1366         }
1367         SDL_UnlockMutex(is->pictq_mutex);
1368
1369         if (is->videoq.abort_request)
1370             return -1;
1371     }
1372
1373     /* if the frame is not skipped, then display it */
1374     if (vp->bmp) {
1375         AVPicture pict;
1376 #if CONFIG_AVFILTER
1377         if(vp->picref)
1378             avfilter_unref_buffer(vp->picref);
1379         vp->picref = src_frame->opaque;
1380 #endif
1381
1382         /* get a pointer on the bitmap */
1383         SDL_LockYUVOverlay (vp->bmp);
1384
1385         memset(&pict,0,sizeof(AVPicture));
1386         pict.data[0] = vp->bmp->pixels[0];
1387         pict.data[1] = vp->bmp->pixels[2];
1388         pict.data[2] = vp->bmp->pixels[1];
1389
1390         pict.linesize[0] = vp->bmp->pitches[0];
1391         pict.linesize[1] = vp->bmp->pitches[2];
1392         pict.linesize[2] = vp->bmp->pitches[1];
1393
1394 #if CONFIG_AVFILTER
1395         pict_src.data[0] = src_frame->data[0];
1396         pict_src.data[1] = src_frame->data[1];
1397         pict_src.data[2] = src_frame->data[2];
1398
1399         pict_src.linesize[0] = src_frame->linesize[0];
1400         pict_src.linesize[1] = src_frame->linesize[1];
1401         pict_src.linesize[2] = src_frame->linesize[2];
1402
1403         //FIXME use direct rendering
1404         av_picture_copy(&pict, &pict_src,
1405                         vp->pix_fmt, vp->width, vp->height);
1406 #else
1407         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1408         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1409             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1410             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1411         if (is->img_convert_ctx == NULL) {
1412             fprintf(stderr, "Cannot initialize the conversion context\n");
1413             exit(1);
1414         }
1415         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1416                   0, vp->height, pict.data, pict.linesize);
1417 #endif
1418         /* update the bitmap content */
1419         SDL_UnlockYUVOverlay(vp->bmp);
1420
1421         vp->pts = pts;
1422         vp->pos = pos;
1423
1424         /* now we can update the picture count */
1425         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1426             is->pictq_windex = 0;
1427         SDL_LockMutex(is->pictq_mutex);
1428         vp->target_clock= compute_target_time(vp->pts, is);
1429
1430         is->pictq_size++;
1431         SDL_UnlockMutex(is->pictq_mutex);
1432     }
1433     return 0;
1434 }
1435
1436 /**
1437  * compute the exact PTS for the picture if it is omitted in the stream
1438  * @param pts1 the dts of the pkt / pts of the frame
1439  */
1440 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1441 {
1442     double frame_delay, pts;
1443
1444     pts = pts1;
1445
1446     if (pts != 0) {
1447         /* update video clock with pts, if present */
1448         is->video_clock = pts;
1449     } else {
1450         pts = is->video_clock;
1451     }
1452     /* update video clock for next frame */
1453     frame_delay = av_q2d(is->video_st->codec->time_base);
1454     /* for MPEG2, the frame can be repeated, so we update the
1455        clock accordingly */
1456     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1457     is->video_clock += frame_delay;
1458
1459     return queue_picture(is, src_frame, pts, pos);
1460 }
1461
1462 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1463 {
1464     int got_picture, i;
1465
1466     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1467         return -1;
1468
1469     if (pkt->data == flush_pkt.data) {
1470         avcodec_flush_buffers(is->video_st->codec);
1471
1472         SDL_LockMutex(is->pictq_mutex);
1473         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1474         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1475             is->pictq[i].target_clock= 0;
1476         }
1477         while (is->pictq_size && !is->videoq.abort_request) {
1478             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1479         }
1480         is->video_current_pos = -1;
1481         SDL_UnlockMutex(is->pictq_mutex);
1482
1483         init_pts_correction(&is->pts_ctx);
1484         is->frame_last_pts = AV_NOPTS_VALUE;
1485         is->frame_last_delay = 0;
1486         is->frame_timer = (double)av_gettime() / 1000000.0;
1487         is->skip_frames = 1;
1488         is->skip_frames_index = 0;
1489         return 0;
1490     }
1491
1492     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1493
1494     if (got_picture) {
1495         if (decoder_reorder_pts == -1) {
1496             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1497         } else if (decoder_reorder_pts) {
1498             *pts = frame->pkt_pts;
1499         } else {
1500             *pts = frame->pkt_dts;
1501         }
1502
1503         if (*pts == AV_NOPTS_VALUE) {
1504             *pts = 0;
1505         }
1506
1507         is->skip_frames_index += 1;
1508         if(is->skip_frames_index >= is->skip_frames){
1509             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1510             return 1;
1511         }
1512
1513     }
1514     return 0;
1515 }
1516
1517 #if CONFIG_AVFILTER
1518 typedef struct {
1519     VideoState *is;
1520     AVFrame *frame;
1521     int use_dr1;
1522 } FilterPriv;
1523
1524 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1525 {
1526     AVFilterContext *ctx = codec->opaque;
1527     AVFilterBufferRef  *ref;
1528     int perms = AV_PERM_WRITE;
1529     int i, w, h, stride[4];
1530     unsigned edge;
1531     int pixel_size;
1532
1533     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1534         perms |= AV_PERM_NEG_LINESIZES;
1535
1536     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1537         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1538         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1539         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1540     }
1541     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1542
1543     w = codec->width;
1544     h = codec->height;
1545     avcodec_align_dimensions2(codec, &w, &h, stride);
1546     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1547     w += edge << 1;
1548     h += edge << 1;
1549
1550     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1551         return -1;
1552
1553     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1554     ref->video->w = codec->width;
1555     ref->video->h = codec->height;
1556     for(i = 0; i < 4; i ++) {
1557         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1558         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1559
1560         if (ref->data[i]) {
1561             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1562         }
1563         pic->data[i]     = ref->data[i];
1564         pic->linesize[i] = ref->linesize[i];
1565     }
1566     pic->opaque = ref;
1567     pic->age    = INT_MAX;
1568     pic->type   = FF_BUFFER_TYPE_USER;
1569     pic->reordered_opaque = codec->reordered_opaque;
1570     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1571     else           pic->pkt_pts = AV_NOPTS_VALUE;
1572     return 0;
1573 }
1574
1575 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1576 {
1577     memset(pic->data, 0, sizeof(pic->data));
1578     avfilter_unref_buffer(pic->opaque);
1579 }
1580
1581 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1582 {
1583     AVFilterBufferRef *ref = pic->opaque;
1584
1585     if (pic->data[0] == NULL) {
1586         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1587         return codec->get_buffer(codec, pic);
1588     }
1589
1590     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1591         (codec->pix_fmt != ref->format)) {
1592         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1593         return -1;
1594     }
1595
1596     pic->reordered_opaque = codec->reordered_opaque;
1597     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1598     else           pic->pkt_pts = AV_NOPTS_VALUE;
1599     return 0;
1600 }
1601
1602 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1603 {
1604     FilterPriv *priv = ctx->priv;
1605     AVCodecContext *codec;
1606     if(!opaque) return -1;
1607
1608     priv->is = opaque;
1609     codec    = priv->is->video_st->codec;
1610     codec->opaque = ctx;
1611     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1612         priv->use_dr1 = 1;
1613         codec->get_buffer     = input_get_buffer;
1614         codec->release_buffer = input_release_buffer;
1615         codec->reget_buffer   = input_reget_buffer;
1616         codec->thread_safe_callbacks = 1;
1617     }
1618
1619     priv->frame = avcodec_alloc_frame();
1620
1621     return 0;
1622 }
1623
1624 static void input_uninit(AVFilterContext *ctx)
1625 {
1626     FilterPriv *priv = ctx->priv;
1627     av_free(priv->frame);
1628 }
1629
1630 static int input_request_frame(AVFilterLink *link)
1631 {
1632     FilterPriv *priv = link->src->priv;
1633     AVFilterBufferRef *picref;
1634     int64_t pts = 0;
1635     AVPacket pkt;
1636     int ret;
1637
1638     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1639         av_free_packet(&pkt);
1640     if (ret < 0)
1641         return -1;
1642
1643     if(priv->use_dr1) {
1644         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1645     } else {
1646         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1647         av_image_copy(picref->data, picref->linesize,
1648                       priv->frame->data, priv->frame->linesize,
1649                       picref->format, link->w, link->h);
1650     }
1651     av_free_packet(&pkt);
1652
1653     picref->pts = pts;
1654     picref->pos = pkt.pos;
1655     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1656     avfilter_start_frame(link, picref);
1657     avfilter_draw_slice(link, 0, link->h, 1);
1658     avfilter_end_frame(link);
1659
1660     return 0;
1661 }
1662
1663 static int input_query_formats(AVFilterContext *ctx)
1664 {
1665     FilterPriv *priv = ctx->priv;
1666     enum PixelFormat pix_fmts[] = {
1667         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1668     };
1669
1670     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1671     return 0;
1672 }
1673
1674 static int input_config_props(AVFilterLink *link)
1675 {
1676     FilterPriv *priv  = link->src->priv;
1677     AVCodecContext *c = priv->is->video_st->codec;
1678
1679     link->w = c->width;
1680     link->h = c->height;
1681     link->time_base = priv->is->video_st->time_base;
1682
1683     return 0;
1684 }
1685
1686 static AVFilter input_filter =
1687 {
1688     .name      = "avplay_input",
1689
1690     .priv_size = sizeof(FilterPriv),
1691
1692     .init      = input_init,
1693     .uninit    = input_uninit,
1694
1695     .query_formats = input_query_formats,
1696
1697     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1698     .outputs   = (AVFilterPad[]) {{ .name = "default",
1699                                     .type = AVMEDIA_TYPE_VIDEO,
1700                                     .request_frame = input_request_frame,
1701                                     .config_props  = input_config_props, },
1702                                   { .name = NULL }},
1703 };
1704
1705 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1706 {
1707     char sws_flags_str[128];
1708     int ret;
1709     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1710     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1711     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1712     graph->scale_sws_opts = av_strdup(sws_flags_str);
1713
1714     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1715                                             NULL, is, graph)) < 0)
1716         return ret;
1717     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1718                                             NULL, &ffsink_ctx, graph)) < 0)
1719         return ret;
1720
1721     if(vfilters) {
1722         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1723         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1724
1725         outputs->name    = av_strdup("in");
1726         outputs->filter_ctx = filt_src;
1727         outputs->pad_idx = 0;
1728         outputs->next    = NULL;
1729
1730         inputs->name    = av_strdup("out");
1731         inputs->filter_ctx = filt_out;
1732         inputs->pad_idx = 0;
1733         inputs->next    = NULL;
1734
1735         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1736             return ret;
1737         av_freep(&vfilters);
1738     } else {
1739         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1740             return ret;
1741     }
1742
1743     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1744         return ret;
1745
1746     is->out_video_filter = filt_out;
1747
1748     return ret;
1749 }
1750
1751 #endif  /* CONFIG_AVFILTER */
1752
1753 static int video_thread(void *arg)
1754 {
1755     VideoState *is = arg;
1756     AVFrame *frame= avcodec_alloc_frame();
1757     int64_t pts_int;
1758     double pts;
1759     int ret;
1760
1761 #if CONFIG_AVFILTER
1762     AVFilterGraph *graph = avfilter_graph_alloc();
1763     AVFilterContext *filt_out = NULL;
1764     int64_t pos;
1765     int last_w = is->video_st->codec->width;
1766     int last_h = is->video_st->codec->height;
1767
1768     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1769         goto the_end;
1770     filt_out = is->out_video_filter;
1771 #endif
1772
1773     for(;;) {
1774 #if !CONFIG_AVFILTER
1775         AVPacket pkt;
1776 #else
1777         AVFilterBufferRef *picref;
1778         AVRational tb;
1779 #endif
1780         while (is->paused && !is->videoq.abort_request)
1781             SDL_Delay(10);
1782 #if CONFIG_AVFILTER
1783         if (   last_w != is->video_st->codec->width
1784             || last_h != is->video_st->codec->height) {
1785             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1786                     is->video_st->codec->width, is->video_st->codec->height);
1787             avfilter_graph_free(&graph);
1788             graph = avfilter_graph_alloc();
1789             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1790                 goto the_end;
1791             filt_out = is->out_video_filter;
1792             last_w = is->video_st->codec->width;
1793             last_h = is->video_st->codec->height;
1794         }
1795         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1796         if (picref) {
1797             pts_int = picref->pts;
1798             pos     = picref->pos;
1799             frame->opaque = picref;
1800         }
1801
1802         if (av_cmp_q(tb, is->video_st->time_base)) {
1803             av_unused int64_t pts1 = pts_int;
1804             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1805             av_dlog(NULL, "video_thread(): "
1806                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1807                     tb.num, tb.den, pts1,
1808                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1809         }
1810 #else
1811         ret = get_video_frame(is, frame, &pts_int, &pkt);
1812 #endif
1813
1814         if (ret < 0) goto the_end;
1815
1816         if (!ret)
1817             continue;
1818
1819         pts = pts_int*av_q2d(is->video_st->time_base);
1820
1821 #if CONFIG_AVFILTER
1822         ret = output_picture2(is, frame, pts, pos);
1823 #else
1824         ret = output_picture2(is, frame, pts,  pkt.pos);
1825         av_free_packet(&pkt);
1826 #endif
1827         if (ret < 0)
1828             goto the_end;
1829
1830         if (step)
1831             if (cur_stream)
1832                 stream_pause(cur_stream);
1833     }
1834  the_end:
1835 #if CONFIG_AVFILTER
1836     avfilter_graph_free(&graph);
1837 #endif
1838     av_free(frame);
1839     return 0;
1840 }
1841
1842 static int subtitle_thread(void *arg)
1843 {
1844     VideoState *is = arg;
1845     SubPicture *sp;
1846     AVPacket pkt1, *pkt = &pkt1;
1847     int got_subtitle;
1848     double pts;
1849     int i, j;
1850     int r, g, b, y, u, v, a;
1851
1852     for(;;) {
1853         while (is->paused && !is->subtitleq.abort_request) {
1854             SDL_Delay(10);
1855         }
1856         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1857             break;
1858
1859         if(pkt->data == flush_pkt.data){
1860             avcodec_flush_buffers(is->subtitle_st->codec);
1861             continue;
1862         }
1863         SDL_LockMutex(is->subpq_mutex);
1864         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1865                !is->subtitleq.abort_request) {
1866             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1867         }
1868         SDL_UnlockMutex(is->subpq_mutex);
1869
1870         if (is->subtitleq.abort_request)
1871             return 0;
1872
1873         sp = &is->subpq[is->subpq_windex];
1874
1875        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1876            this packet, if any */
1877         pts = 0;
1878         if (pkt->pts != AV_NOPTS_VALUE)
1879             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1880
1881         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1882                                  &got_subtitle, pkt);
1883
1884         if (got_subtitle && sp->sub.format == 0) {
1885             sp->pts = pts;
1886
1887             for (i = 0; i < sp->sub.num_rects; i++)
1888             {
1889                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1890                 {
1891                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1892                     y = RGB_TO_Y_CCIR(r, g, b);
1893                     u = RGB_TO_U_CCIR(r, g, b, 0);
1894                     v = RGB_TO_V_CCIR(r, g, b, 0);
1895                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1896                 }
1897             }
1898
1899             /* now we can update the picture count */
1900             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1901                 is->subpq_windex = 0;
1902             SDL_LockMutex(is->subpq_mutex);
1903             is->subpq_size++;
1904             SDL_UnlockMutex(is->subpq_mutex);
1905         }
1906         av_free_packet(pkt);
1907     }
1908     return 0;
1909 }
1910
1911 /* copy samples for viewing in editor window */
1912 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1913 {
1914     int size, len;
1915
1916     size = samples_size / sizeof(short);
1917     while (size > 0) {
1918         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1919         if (len > size)
1920             len = size;
1921         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1922         samples += len;
1923         is->sample_array_index += len;
1924         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1925             is->sample_array_index = 0;
1926         size -= len;
1927     }
1928 }
1929
1930 /* return the new audio buffer size (samples can be added or deleted
1931    to get better sync if video or external master clock) */
1932 static int synchronize_audio(VideoState *is, short *samples,
1933                              int samples_size1, double pts)
1934 {
1935     int n, samples_size;
1936     double ref_clock;
1937
1938     n = 2 * is->audio_st->codec->channels;
1939     samples_size = samples_size1;
1940
1941     /* if not master, then we try to remove or add samples to correct the clock */
1942     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1943          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1944         double diff, avg_diff;
1945         int wanted_size, min_size, max_size, nb_samples;
1946
1947         ref_clock = get_master_clock(is);
1948         diff = get_audio_clock(is) - ref_clock;
1949
1950         if (diff < AV_NOSYNC_THRESHOLD) {
1951             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1952             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1953                 /* not enough measures to have a correct estimate */
1954                 is->audio_diff_avg_count++;
1955             } else {
1956                 /* estimate the A-V difference */
1957                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1958
1959                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1960                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1961                     nb_samples = samples_size / n;
1962
1963                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1964                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1965                     if (wanted_size < min_size)
1966                         wanted_size = min_size;
1967                     else if (wanted_size > max_size)
1968                         wanted_size = max_size;
1969
1970                     /* add or remove samples to correction the synchro */
1971                     if (wanted_size < samples_size) {
1972                         /* remove samples */
1973                         samples_size = wanted_size;
1974                     } else if (wanted_size > samples_size) {
1975                         uint8_t *samples_end, *q;
1976                         int nb;
1977
1978                         /* add samples */
1979                         nb = (samples_size - wanted_size);
1980                         samples_end = (uint8_t *)samples + samples_size - n;
1981                         q = samples_end + n;
1982                         while (nb > 0) {
1983                             memcpy(q, samples_end, n);
1984                             q += n;
1985                             nb -= n;
1986                         }
1987                         samples_size = wanted_size;
1988                     }
1989                 }
1990                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1991                         diff, avg_diff, samples_size - samples_size1,
1992                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1993             }
1994         } else {
1995             /* too big difference : may be initial PTS errors, so
1996                reset A-V filter */
1997             is->audio_diff_avg_count = 0;
1998             is->audio_diff_cum = 0;
1999         }
2000     }
2001
2002     return samples_size;
2003 }
2004
2005 /* decode one audio frame and returns its uncompressed size */
2006 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2007 {
2008     AVPacket *pkt_temp = &is->audio_pkt_temp;
2009     AVPacket *pkt = &is->audio_pkt;
2010     AVCodecContext *dec= is->audio_st->codec;
2011     int n, len1, data_size, got_frame;
2012     double pts;
2013     int new_packet = 0;
2014     int flush_complete = 0;
2015
2016     for(;;) {
2017         /* NOTE: the audio packet can contain several frames */
2018         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2019             if (!is->frame) {
2020                 if (!(is->frame = avcodec_alloc_frame()))
2021                     return AVERROR(ENOMEM);
2022             } else
2023                 avcodec_get_frame_defaults(is->frame);
2024
2025             if (flush_complete)
2026                 break;
2027             new_packet = 0;
2028             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2029             if (len1 < 0) {
2030                 /* if error, we skip the frame */
2031                 pkt_temp->size = 0;
2032                 break;
2033             }
2034
2035             pkt_temp->data += len1;
2036             pkt_temp->size -= len1;
2037
2038             if (!got_frame) {
2039                 /* stop sending empty packets if the decoder is finished */
2040                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2041                     flush_complete = 1;
2042                 continue;
2043             }
2044             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2045                                                    is->frame->nb_samples,
2046                                                    dec->sample_fmt, 1);
2047
2048             if (dec->sample_fmt != is->audio_src_fmt) {
2049                 if (is->reformat_ctx)
2050                     av_audio_convert_free(is->reformat_ctx);
2051                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2052                                                          dec->sample_fmt, 1, NULL, 0);
2053                 if (!is->reformat_ctx) {
2054                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2055                         av_get_sample_fmt_name(dec->sample_fmt),
2056                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2057                         break;
2058                 }
2059                 is->audio_src_fmt= dec->sample_fmt;
2060             }
2061
2062             if (is->reformat_ctx) {
2063                 const void *ibuf[6]= { is->frame->data[0] };
2064                 void *obuf[6];
2065                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2066                 int ostride[6]= {2};
2067                 int len= data_size/istride[0];
2068                 obuf[0] = av_realloc(is->audio_buf1, FFALIGN(len * ostride[0], 32));
2069                 if (!obuf[0]) {
2070                     return AVERROR(ENOMEM);
2071                 }
2072                 is->audio_buf1 = obuf[0];
2073                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2074                     printf("av_audio_convert() failed\n");
2075                     break;
2076                 }
2077                 is->audio_buf = is->audio_buf1;
2078                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2079                           remove this legacy cruft */
2080                 data_size= len*2;
2081             }else{
2082                 is->audio_buf = is->frame->data[0];
2083             }
2084
2085             /* if no pts, then compute it */
2086             pts = is->audio_clock;
2087             *pts_ptr = pts;
2088             n = 2 * dec->channels;
2089             is->audio_clock += (double)data_size /
2090                 (double)(n * dec->sample_rate);
2091 #ifdef DEBUG
2092             {
2093                 static double last_clock;
2094                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2095                        is->audio_clock - last_clock,
2096                        is->audio_clock, pts);
2097                 last_clock = is->audio_clock;
2098             }
2099 #endif
2100             return data_size;
2101         }
2102
2103         /* free the current packet */
2104         if (pkt->data)
2105             av_free_packet(pkt);
2106
2107         if (is->paused || is->audioq.abort_request) {
2108             return -1;
2109         }
2110
2111         /* read next packet */
2112         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2113             return -1;
2114
2115         if (pkt->data == flush_pkt.data)
2116             avcodec_flush_buffers(dec);
2117
2118         *pkt_temp = *pkt;
2119
2120         /* if update the audio clock with the pts */
2121         if (pkt->pts != AV_NOPTS_VALUE) {
2122             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2123         }
2124     }
2125 }
2126
2127 /* prepare a new audio buffer */
2128 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2129 {
2130     VideoState *is = opaque;
2131     int audio_size, len1;
2132     double pts;
2133
2134     audio_callback_time = av_gettime();
2135
2136     while (len > 0) {
2137         if (is->audio_buf_index >= is->audio_buf_size) {
2138            audio_size = audio_decode_frame(is, &pts);
2139            if (audio_size < 0) {
2140                 /* if error, just output silence */
2141                is->audio_buf      = is->silence_buf;
2142                is->audio_buf_size = sizeof(is->silence_buf);
2143            } else {
2144                if (is->show_audio)
2145                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2146                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2147                                               pts);
2148                is->audio_buf_size = audio_size;
2149            }
2150            is->audio_buf_index = 0;
2151         }
2152         len1 = is->audio_buf_size - is->audio_buf_index;
2153         if (len1 > len)
2154             len1 = len;
2155         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2156         len -= len1;
2157         stream += len1;
2158         is->audio_buf_index += len1;
2159     }
2160 }
2161
2162 /* open a given stream. Return 0 if OK */
2163 static int stream_component_open(VideoState *is, int stream_index)
2164 {
2165     AVFormatContext *ic = is->ic;
2166     AVCodecContext *avctx;
2167     AVCodec *codec;
2168     SDL_AudioSpec wanted_spec, spec;
2169     AVDictionary *opts;
2170     AVDictionaryEntry *t = NULL;
2171
2172     if (stream_index < 0 || stream_index >= ic->nb_streams)
2173         return -1;
2174     avctx = ic->streams[stream_index]->codec;
2175
2176     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2177
2178     /* prepare audio output */
2179     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2180         if (avctx->channels > 0) {
2181             avctx->request_channels = FFMIN(2, avctx->channels);
2182         } else {
2183             avctx->request_channels = 2;
2184         }
2185     }
2186
2187     codec = avcodec_find_decoder(avctx->codec_id);
2188     avctx->debug_mv = debug_mv;
2189     avctx->debug = debug;
2190     avctx->workaround_bugs = workaround_bugs;
2191     avctx->lowres = lowres;
2192     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2193     avctx->idct_algo= idct;
2194     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2195     avctx->skip_frame= skip_frame;
2196     avctx->skip_idct= skip_idct;
2197     avctx->skip_loop_filter= skip_loop_filter;
2198     avctx->error_recognition= error_recognition;
2199     avctx->error_concealment= error_concealment;
2200     avctx->thread_count= thread_count;
2201
2202     if (!codec ||
2203         avcodec_open2(avctx, codec, &opts) < 0)
2204         return -1;
2205     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2206         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2207         return AVERROR_OPTION_NOT_FOUND;
2208     }
2209
2210     /* prepare audio output */
2211     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2212         wanted_spec.freq = avctx->sample_rate;
2213         wanted_spec.format = AUDIO_S16SYS;
2214         wanted_spec.channels = avctx->channels;
2215         wanted_spec.silence = 0;
2216         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2217         wanted_spec.callback = sdl_audio_callback;
2218         wanted_spec.userdata = is;
2219         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2220             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2221             return -1;
2222         }
2223         is->audio_hw_buf_size = spec.size;
2224         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2225     }
2226
2227     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2228     switch(avctx->codec_type) {
2229     case AVMEDIA_TYPE_AUDIO:
2230         is->audio_stream = stream_index;
2231         is->audio_st = ic->streams[stream_index];
2232         is->audio_buf_size = 0;
2233         is->audio_buf_index = 0;
2234
2235         /* init averaging filter */
2236         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2237         is->audio_diff_avg_count = 0;
2238         /* since we do not have a precise anough audio fifo fullness,
2239            we correct audio sync only if larger than this threshold */
2240         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2241
2242         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2243         packet_queue_init(&is->audioq);
2244         SDL_PauseAudio(0);
2245         break;
2246     case AVMEDIA_TYPE_VIDEO:
2247         is->video_stream = stream_index;
2248         is->video_st = ic->streams[stream_index];
2249
2250         packet_queue_init(&is->videoq);
2251         is->video_tid = SDL_CreateThread(video_thread, is);
2252         break;
2253     case AVMEDIA_TYPE_SUBTITLE:
2254         is->subtitle_stream = stream_index;
2255         is->subtitle_st = ic->streams[stream_index];
2256         packet_queue_init(&is->subtitleq);
2257
2258         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2259         break;
2260     default:
2261         break;
2262     }
2263     return 0;
2264 }
2265
2266 static void stream_component_close(VideoState *is, int stream_index)
2267 {
2268     AVFormatContext *ic = is->ic;
2269     AVCodecContext *avctx;
2270
2271     if (stream_index < 0 || stream_index >= ic->nb_streams)
2272         return;
2273     avctx = ic->streams[stream_index]->codec;
2274
2275     switch(avctx->codec_type) {
2276     case AVMEDIA_TYPE_AUDIO:
2277         packet_queue_abort(&is->audioq);
2278
2279         SDL_CloseAudio();
2280
2281         packet_queue_end(&is->audioq);
2282         av_free_packet(&is->audio_pkt);
2283         if (is->reformat_ctx)
2284             av_audio_convert_free(is->reformat_ctx);
2285         is->reformat_ctx = NULL;
2286         av_freep(&is->audio_buf1);
2287         is->audio_buf = NULL;
2288         av_freep(&is->frame);
2289
2290         if (is->rdft) {
2291             av_rdft_end(is->rdft);
2292             av_freep(&is->rdft_data);
2293             is->rdft = NULL;
2294             is->rdft_bits = 0;
2295         }
2296         break;
2297     case AVMEDIA_TYPE_VIDEO:
2298         packet_queue_abort(&is->videoq);
2299
2300         /* note: we also signal this mutex to make sure we deblock the
2301            video thread in all cases */
2302         SDL_LockMutex(is->pictq_mutex);
2303         SDL_CondSignal(is->pictq_cond);
2304         SDL_UnlockMutex(is->pictq_mutex);
2305
2306         SDL_WaitThread(is->video_tid, NULL);
2307
2308         packet_queue_end(&is->videoq);
2309         break;
2310     case AVMEDIA_TYPE_SUBTITLE:
2311         packet_queue_abort(&is->subtitleq);
2312
2313         /* note: we also signal this mutex to make sure we deblock the
2314            video thread in all cases */
2315         SDL_LockMutex(is->subpq_mutex);
2316         is->subtitle_stream_changed = 1;
2317
2318         SDL_CondSignal(is->subpq_cond);
2319         SDL_UnlockMutex(is->subpq_mutex);
2320
2321         SDL_WaitThread(is->subtitle_tid, NULL);
2322
2323         packet_queue_end(&is->subtitleq);
2324         break;
2325     default:
2326         break;
2327     }
2328
2329     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2330     avcodec_close(avctx);
2331     switch(avctx->codec_type) {
2332     case AVMEDIA_TYPE_AUDIO:
2333         is->audio_st = NULL;
2334         is->audio_stream = -1;
2335         break;
2336     case AVMEDIA_TYPE_VIDEO:
2337         is->video_st = NULL;
2338         is->video_stream = -1;
2339         break;
2340     case AVMEDIA_TYPE_SUBTITLE:
2341         is->subtitle_st = NULL;
2342         is->subtitle_stream = -1;
2343         break;
2344     default:
2345         break;
2346     }
2347 }
2348
2349 /* since we have only one decoding thread, we can use a global
2350    variable instead of a thread local variable */
2351 static VideoState *global_video_state;
2352
2353 static int decode_interrupt_cb(void *ctx)
2354 {
2355     return (global_video_state && global_video_state->abort_request);
2356 }
2357
2358 /* this thread gets the stream from the disk or the network */
2359 static int decode_thread(void *arg)
2360 {
2361     VideoState *is = arg;
2362     AVFormatContext *ic = NULL;
2363     int err, i, ret;
2364     int st_index[AVMEDIA_TYPE_NB];
2365     AVPacket pkt1, *pkt = &pkt1;
2366     int eof=0;
2367     int pkt_in_play_range = 0;
2368     AVDictionaryEntry *t;
2369     AVDictionary **opts;
2370     int orig_nb_streams;
2371
2372     memset(st_index, -1, sizeof(st_index));
2373     is->video_stream = -1;
2374     is->audio_stream = -1;
2375     is->subtitle_stream = -1;
2376
2377     global_video_state = is;
2378
2379     ic = avformat_alloc_context();
2380     ic->interrupt_callback.callback = decode_interrupt_cb;
2381     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2382     if (err < 0) {
2383         print_error(is->filename, err);
2384         ret = -1;
2385         goto fail;
2386     }
2387     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2388         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2389         ret = AVERROR_OPTION_NOT_FOUND;
2390         goto fail;
2391     }
2392     is->ic = ic;
2393
2394     if(genpts)
2395         ic->flags |= AVFMT_FLAG_GENPTS;
2396
2397     opts = setup_find_stream_info_opts(ic, codec_opts);
2398     orig_nb_streams = ic->nb_streams;
2399
2400     err = avformat_find_stream_info(ic, opts);
2401     if (err < 0) {
2402         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2403         ret = -1;
2404         goto fail;
2405     }
2406     for (i = 0; i < orig_nb_streams; i++)
2407         av_dict_free(&opts[i]);
2408     av_freep(&opts);
2409
2410     if(ic->pb)
2411         ic->pb->eof_reached= 0; //FIXME hack, avplay maybe should not use url_feof() to test for the end
2412
2413     if(seek_by_bytes<0)
2414         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2415
2416     /* if seeking requested, we execute it */
2417     if (start_time != AV_NOPTS_VALUE) {
2418         int64_t timestamp;
2419
2420         timestamp = start_time;
2421         /* add the stream start time */
2422         if (ic->start_time != AV_NOPTS_VALUE)
2423             timestamp += ic->start_time;
2424         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2425         if (ret < 0) {
2426             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2427                     is->filename, (double)timestamp / AV_TIME_BASE);
2428         }
2429     }
2430
2431     for (i = 0; i < ic->nb_streams; i++)
2432         ic->streams[i]->discard = AVDISCARD_ALL;
2433     if (!video_disable)
2434         st_index[AVMEDIA_TYPE_VIDEO] =
2435             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2436                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2437     if (!audio_disable)
2438         st_index[AVMEDIA_TYPE_AUDIO] =
2439             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2440                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2441                                 st_index[AVMEDIA_TYPE_VIDEO],
2442                                 NULL, 0);
2443     if (!video_disable)
2444         st_index[AVMEDIA_TYPE_SUBTITLE] =
2445             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2446                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2447                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2448                                  st_index[AVMEDIA_TYPE_AUDIO] :
2449                                  st_index[AVMEDIA_TYPE_VIDEO]),
2450                                 NULL, 0);
2451     if (show_status) {
2452         av_dump_format(ic, 0, is->filename, 0);
2453     }
2454
2455     /* open the streams */
2456     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2457         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2458     }
2459
2460     ret=-1;
2461     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2462         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2463     }
2464     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2465     if(ret<0) {
2466         if (!display_disable)
2467             is->show_audio = 2;
2468     }
2469
2470     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2471         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2472     }
2473
2474     if (is->video_stream < 0 && is->audio_stream < 0) {
2475         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2476         ret = -1;
2477         goto fail;
2478     }
2479
2480     for(;;) {
2481         if (is->abort_request)
2482             break;
2483         if (is->paused != is->last_paused) {
2484             is->last_paused = is->paused;
2485             if (is->paused)
2486                 is->read_pause_return= av_read_pause(ic);
2487             else
2488                 av_read_play(ic);
2489         }
2490 #if CONFIG_RTSP_DEMUXER
2491         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2492             /* wait 10 ms to avoid trying to get another packet */
2493             /* XXX: horrible */
2494             SDL_Delay(10);
2495             continue;
2496         }
2497 #endif
2498         if (is->seek_req) {
2499             int64_t seek_target= is->seek_pos;
2500             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2501             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2502 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2503 //      of the seek_pos/seek_rel variables
2504
2505             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2506             if (ret < 0) {
2507                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2508             }else{
2509                 if (is->audio_stream >= 0) {
2510                     packet_queue_flush(&is->audioq);
2511                     packet_queue_put(&is->audioq, &flush_pkt);
2512                 }
2513                 if (is->subtitle_stream >= 0) {
2514                     packet_queue_flush(&is->subtitleq);
2515                     packet_queue_put(&is->subtitleq, &flush_pkt);
2516                 }
2517                 if (is->video_stream >= 0) {
2518                     packet_queue_flush(&is->videoq);
2519                     packet_queue_put(&is->videoq, &flush_pkt);
2520                 }
2521             }
2522             is->seek_req = 0;
2523             eof= 0;
2524         }
2525
2526         /* if the queue are full, no need to read more */
2527         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2528             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2529                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2530                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2531             /* wait 10 ms */
2532             SDL_Delay(10);
2533             continue;
2534         }
2535         if(eof) {
2536             if(is->video_stream >= 0){
2537                 av_init_packet(pkt);
2538                 pkt->data=NULL;
2539                 pkt->size=0;
2540                 pkt->stream_index= is->video_stream;
2541                 packet_queue_put(&is->videoq, pkt);
2542             }
2543             if (is->audio_stream >= 0 &&
2544                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2545                 av_init_packet(pkt);
2546                 pkt->data = NULL;
2547                 pkt->size = 0;
2548                 pkt->stream_index = is->audio_stream;
2549                 packet_queue_put(&is->audioq, pkt);
2550             }
2551             SDL_Delay(10);
2552             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2553                 if(loop!=1 && (!loop || --loop)){
2554                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2555                 }else if(autoexit){
2556                     ret=AVERROR_EOF;
2557                     goto fail;
2558                 }
2559             }
2560             continue;
2561         }
2562         ret = av_read_frame(ic, pkt);
2563         if (ret < 0) {
2564             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2565                 eof=1;
2566             if (ic->pb && ic->pb->error)
2567                 break;
2568             SDL_Delay(100); /* wait for user event */
2569             continue;
2570         }
2571         /* check if packet is in play range specified by user, then queue, otherwise discard */
2572         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2573                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2574                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2575                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2576                 <= ((double)duration/1000000);
2577         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2578             packet_queue_put(&is->audioq, pkt);
2579         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2580             packet_queue_put(&is->videoq, pkt);
2581         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2582             packet_queue_put(&is->subtitleq, pkt);
2583         } else {
2584             av_free_packet(pkt);
2585         }
2586     }
2587     /* wait until the end */
2588     while (!is->abort_request) {
2589         SDL_Delay(100);
2590     }
2591
2592     ret = 0;
2593  fail:
2594     /* disable interrupting */
2595     global_video_state = NULL;
2596
2597     /* close each stream */
2598     if (is->audio_stream >= 0)
2599         stream_component_close(is, is->audio_stream);
2600     if (is->video_stream >= 0)
2601         stream_component_close(is, is->video_stream);
2602     if (is->subtitle_stream >= 0)
2603         stream_component_close(is, is->subtitle_stream);
2604     if (is->ic) {
2605         av_close_input_file(is->ic);
2606         is->ic = NULL; /* safety */
2607     }
2608     avio_set_interrupt_cb(NULL);
2609
2610     if (ret != 0) {
2611         SDL_Event event;
2612
2613         event.type = FF_QUIT_EVENT;
2614         event.user.data1 = is;
2615         SDL_PushEvent(&event);
2616     }
2617     return 0;
2618 }
2619
2620 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2621 {
2622     VideoState *is;
2623
2624     is = av_mallocz(sizeof(VideoState));
2625     if (!is)
2626         return NULL;
2627     av_strlcpy(is->filename, filename, sizeof(is->filename));
2628     is->iformat = iformat;
2629     is->ytop = 0;
2630     is->xleft = 0;
2631
2632     /* start video display */
2633     is->pictq_mutex = SDL_CreateMutex();
2634     is->pictq_cond = SDL_CreateCond();
2635
2636     is->subpq_mutex = SDL_CreateMutex();
2637     is->subpq_cond = SDL_CreateCond();
2638
2639     is->av_sync_type = av_sync_type;
2640     is->parse_tid = SDL_CreateThread(decode_thread, is);
2641     if (!is->parse_tid) {
2642         av_free(is);
2643         return NULL;
2644     }
2645     return is;
2646 }
2647
2648 static void stream_cycle_channel(VideoState *is, int codec_type)
2649 {
2650     AVFormatContext *ic = is->ic;
2651     int start_index, stream_index;
2652     AVStream *st;
2653
2654     if (codec_type == AVMEDIA_TYPE_VIDEO)
2655         start_index = is->video_stream;
2656     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2657         start_index = is->audio_stream;
2658     else
2659         start_index = is->subtitle_stream;
2660     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2661         return;
2662     stream_index = start_index;
2663     for(;;) {
2664         if (++stream_index >= is->ic->nb_streams)
2665         {
2666             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2667             {
2668                 stream_index = -1;
2669                 goto the_end;
2670             } else
2671                 stream_index = 0;
2672         }
2673         if (stream_index == start_index)
2674             return;
2675         st = ic->streams[stream_index];
2676         if (st->codec->codec_type == codec_type) {
2677             /* check that parameters are OK */
2678             switch(codec_type) {
2679             case AVMEDIA_TYPE_AUDIO:
2680                 if (st->codec->sample_rate != 0 &&
2681                     st->codec->channels != 0)
2682                     goto the_end;
2683                 break;
2684             case AVMEDIA_TYPE_VIDEO:
2685             case AVMEDIA_TYPE_SUBTITLE:
2686                 goto the_end;
2687             default:
2688                 break;
2689             }
2690         }
2691     }
2692  the_end:
2693     stream_component_close(is, start_index);
2694     stream_component_open(is, stream_index);
2695 }
2696
2697
2698 static void toggle_full_screen(void)
2699 {
2700     is_full_screen = !is_full_screen;
2701 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2702     /* OSX needs to empty the picture_queue */
2703     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2704         cur_stream->pictq[i].reallocate = 1;
2705     }
2706 #endif
2707     video_open(cur_stream);
2708 }
2709
2710 static void toggle_pause(void)
2711 {
2712     if (cur_stream)
2713         stream_pause(cur_stream);
2714     step = 0;
2715 }
2716
2717 static void step_to_next_frame(void)
2718 {
2719     if (cur_stream) {
2720         /* if the stream is paused unpause it, then step */
2721         if (cur_stream->paused)
2722             stream_pause(cur_stream);
2723     }
2724     step = 1;
2725 }
2726
2727 static void toggle_audio_display(void)
2728 {
2729     if (cur_stream) {
2730         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2731         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2732         fill_rectangle(screen,
2733                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2734                     bgcolor);
2735         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2736     }
2737 }
2738
2739 /* handle an event sent by the GUI */
2740 static void event_loop(void)
2741 {
2742     SDL_Event event;
2743     double incr, pos, frac;
2744
2745     for(;;) {
2746         double x;
2747         SDL_WaitEvent(&event);
2748         switch(event.type) {
2749         case SDL_KEYDOWN:
2750             if (exit_on_keydown) {
2751                 do_exit();
2752                 break;
2753             }
2754             switch(event.key.keysym.sym) {
2755             case SDLK_ESCAPE:
2756             case SDLK_q:
2757                 do_exit();
2758                 break;
2759             case SDLK_f:
2760                 toggle_full_screen();
2761                 break;
2762             case SDLK_p:
2763             case SDLK_SPACE:
2764                 toggle_pause();
2765                 break;
2766             case SDLK_s: //S: Step to next frame
2767                 step_to_next_frame();
2768                 break;
2769             case SDLK_a:
2770                 if (cur_stream)
2771                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2772                 break;
2773             case SDLK_v:
2774                 if (cur_stream)
2775                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2776                 break;
2777             case SDLK_t:
2778                 if (cur_stream)
2779                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2780                 break;
2781             case SDLK_w:
2782                 toggle_audio_display();
2783                 break;
2784             case SDLK_LEFT:
2785                 incr = -10.0;
2786                 goto do_seek;
2787             case SDLK_RIGHT:
2788                 incr = 10.0;
2789                 goto do_seek;
2790             case SDLK_UP:
2791                 incr = 60.0;
2792                 goto do_seek;
2793             case SDLK_DOWN:
2794                 incr = -60.0;
2795             do_seek:
2796                 if (cur_stream) {
2797                     if (seek_by_bytes) {
2798                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2799                             pos= cur_stream->video_current_pos;
2800                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2801                             pos= cur_stream->audio_pkt.pos;
2802                         }else
2803                             pos = avio_tell(cur_stream->ic->pb);
2804                         if (cur_stream->ic->bit_rate)
2805                             incr *= cur_stream->ic->bit_rate / 8.0;
2806                         else
2807                             incr *= 180000.0;
2808                         pos += incr;
2809                         stream_seek(cur_stream, pos, incr, 1);
2810                     } else {
2811                         pos = get_master_clock(cur_stream);
2812                         pos += incr;
2813                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2814                     }
2815                 }
2816                 break;
2817             default:
2818                 break;
2819             }
2820             break;
2821         case SDL_MOUSEBUTTONDOWN:
2822             if (exit_on_mousedown) {
2823                 do_exit();
2824                 break;
2825             }
2826         case SDL_MOUSEMOTION:
2827             if(event.type ==SDL_MOUSEBUTTONDOWN){
2828                 x= event.button.x;
2829             }else{
2830                 if(event.motion.state != SDL_PRESSED)
2831                     break;
2832                 x= event.motion.x;
2833             }
2834             if (cur_stream) {
2835                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2836                     uint64_t size=  avio_size(cur_stream->ic->pb);
2837                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2838                 }else{
2839                     int64_t ts;
2840                     int ns, hh, mm, ss;
2841                     int tns, thh, tmm, tss;
2842                     tns = cur_stream->ic->duration/1000000LL;
2843                     thh = tns/3600;
2844                     tmm = (tns%3600)/60;
2845                     tss = (tns%60);
2846                     frac = x/cur_stream->width;
2847                     ns = frac*tns;
2848                     hh = ns/3600;
2849                     mm = (ns%3600)/60;
2850                     ss = (ns%60);
2851                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2852                             hh, mm, ss, thh, tmm, tss);
2853                     ts = frac*cur_stream->ic->duration;
2854                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2855                         ts += cur_stream->ic->start_time;
2856                     stream_seek(cur_stream, ts, 0, 0);
2857                 }
2858             }
2859             break;
2860         case SDL_VIDEORESIZE:
2861             if (cur_stream) {
2862                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2863                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2864                 screen_width = cur_stream->width = event.resize.w;
2865                 screen_height= cur_stream->height= event.resize.h;
2866             }
2867             break;
2868         case SDL_QUIT:
2869         case FF_QUIT_EVENT:
2870             do_exit();
2871             break;
2872         case FF_ALLOC_EVENT:
2873             video_open(event.user.data1);
2874             alloc_picture(event.user.data1);
2875             break;
2876         case FF_REFRESH_EVENT:
2877             video_refresh_timer(event.user.data1);
2878             cur_stream->refresh=0;
2879             break;
2880         default:
2881             break;
2882         }
2883     }
2884 }
2885
2886 static int opt_frame_size(const char *opt, const char *arg)
2887 {
2888     av_log(NULL, AV_LOG_ERROR,
2889            "Option '%s' has been removed, use private format options instead\n", opt);
2890     return AVERROR(EINVAL);
2891 }
2892
2893 static int opt_width(const char *opt, const char *arg)
2894 {
2895     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2896     return 0;
2897 }
2898
2899 static int opt_height(const char *opt, const char *arg)
2900 {
2901     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2902     return 0;
2903 }
2904
2905 static int opt_format(const char *opt, const char *arg)
2906 {
2907     file_iformat = av_find_input_format(arg);
2908     if (!file_iformat) {
2909         fprintf(stderr, "Unknown input format: %s\n", arg);
2910         return AVERROR(EINVAL);
2911     }
2912     return 0;
2913 }
2914
2915 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2916 {
2917     av_log(NULL, AV_LOG_ERROR,
2918            "Option '%s' has been removed, use private format options instead\n", opt);
2919     return AVERROR(EINVAL);
2920 }
2921
2922 static int opt_sync(const char *opt, const char *arg)
2923 {
2924     if (!strcmp(arg, "audio"))
2925         av_sync_type = AV_SYNC_AUDIO_MASTER;
2926     else if (!strcmp(arg, "video"))
2927         av_sync_type = AV_SYNC_VIDEO_MASTER;
2928     else if (!strcmp(arg, "ext"))
2929         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2930     else {
2931         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2932         exit(1);
2933     }
2934     return 0;
2935 }
2936
2937 static int opt_seek(const char *opt, const char *arg)
2938 {
2939     start_time = parse_time_or_die(opt, arg, 1);
2940     return 0;
2941 }
2942
2943 static int opt_duration(const char *opt, const char *arg)
2944 {
2945     duration = parse_time_or_die(opt, arg, 1);
2946     return 0;
2947 }
2948
2949 static int opt_debug(const char *opt, const char *arg)
2950 {
2951     av_log_set_level(99);
2952     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2953     return 0;
2954 }
2955
2956 static int opt_vismv(const char *opt, const char *arg)
2957 {
2958     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2959     return 0;
2960 }
2961
2962 static int opt_thread_count(const char *opt, const char *arg)
2963 {
2964     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2965 #if !HAVE_THREADS
2966     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2967 #endif
2968     return 0;
2969 }
2970
2971 static const OptionDef options[] = {
2972 #include "cmdutils_common_opts.h"
2973     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2974     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2975     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2976     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2977     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2978     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2979     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2980     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2981     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2982     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2983     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2984     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2985     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2986     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2987     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2988     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2989     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2990     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2991     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2992     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2993     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2994     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2995     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2996     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2997     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2998     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2999     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3000     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3001     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3002     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3003     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3004     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3005     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3006     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3007     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3008     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3009     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3010 #if CONFIG_AVFILTER
3011     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3012 #endif
3013     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3014     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3015     { "i", 0, {NULL}, "avconv compatibility dummy option", ""},
3016     { NULL, },
3017 };
3018
3019 static void show_usage(void)
3020 {
3021     printf("Simple media player\n");
3022     printf("usage: %s [options] input_file\n", program_name);
3023     printf("\n");
3024 }
3025
3026 static void show_help(void)
3027 {
3028     av_log_set_callback(log_callback_help);
3029     show_usage();
3030     show_help_options(options, "Main options:\n",
3031                       OPT_EXPERT, 0);
3032     show_help_options(options, "\nAdvanced options:\n",
3033                       OPT_EXPERT, OPT_EXPERT);
3034     printf("\n");
3035     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3036     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3037 #if !CONFIG_AVFILTER
3038     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3039 #endif
3040     printf("\nWhile playing:\n"
3041            "q, ESC              quit\n"
3042            "f                   toggle full screen\n"
3043            "p, SPC              pause\n"
3044            "a                   cycle audio channel\n"
3045            "v                   cycle video channel\n"
3046            "t                   cycle subtitle channel\n"
3047            "w                   show audio waves\n"
3048            "s                   activate frame-step mode\n"
3049            "left/right          seek backward/forward 10 seconds\n"
3050            "down/up             seek backward/forward 1 minute\n"
3051            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3052            );
3053 }
3054
3055 static void opt_input_file(void *optctx, const char *filename)
3056 {
3057     if (input_filename) {
3058         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3059                 filename, input_filename);
3060         exit(1);
3061     }
3062     if (!strcmp(filename, "-"))
3063         filename = "pipe:";
3064     input_filename = filename;
3065 }
3066
3067 /* Called from the main */
3068 int main(int argc, char **argv)
3069 {
3070     int flags;
3071
3072     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3073     parse_loglevel(argc, argv, options);
3074
3075     /* register all codecs, demux and protocols */
3076     avcodec_register_all();
3077 #if CONFIG_AVDEVICE
3078     avdevice_register_all();
3079 #endif
3080 #if CONFIG_AVFILTER
3081     avfilter_register_all();
3082 #endif
3083     av_register_all();
3084     avformat_network_init();
3085
3086     init_opts();
3087
3088     show_banner();
3089
3090     parse_options(NULL, argc, argv, options, opt_input_file);
3091
3092     if (!input_filename) {
3093         show_usage();
3094         fprintf(stderr, "An input file must be specified\n");
3095         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3096         exit(1);
3097     }
3098
3099     if (display_disable) {
3100         video_disable = 1;
3101     }
3102     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3103 #if !defined(__MINGW32__) && !defined(__APPLE__)
3104     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3105 #endif
3106     if (SDL_Init (flags)) {
3107         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3108         exit(1);
3109     }
3110
3111     if (!display_disable) {
3112 #if HAVE_SDL_VIDEO_SIZE
3113         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3114         fs_screen_width = vi->current_w;
3115         fs_screen_height = vi->current_h;
3116 #endif
3117     }
3118
3119     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3120     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3121     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3122
3123     av_init_packet(&flush_pkt);
3124     flush_pkt.data= "FLUSH";
3125
3126     cur_stream = stream_open(input_filename, file_iformat);
3127
3128     event_loop();
3129
3130     /* never returns */
3131
3132     return 0;
3133 }