]> git.sesse.net Git - ffmpeg/blob - avplay.c
avcodec: remove pointless AVOption, internal_buffer_count
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     int reallocate;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t *audio_buf;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum AVSampleFormat audio_src_fmt;
166     AVAudioConvert *reformat_ctx;
167
168     int show_audio; /* if true, display audio samples */
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     //    QETimer *video_timer;
206     char filename[1024];
207     int width, height, xleft, ytop;
208
209     PtsCorrectionContext pts_ctx;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int debug = 0;
244 static int debug_mv = 0;
245 static int step = 0;
246 static int thread_count = 1;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int lowres = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
255 static int error_recognition = FF_ER_CAREFUL;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts= -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop=1;
262 static int framedrop=1;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 void exit_program(int ret)
283 {
284     exit(ret);
285 }
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for(;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414
415 #define RGBA_IN(r, g, b, a, s)\
416 {\
417     unsigned int v = ((const uint32_t *)(s))[0];\
418     a = (v >> 24) & 0xff;\
419     r = (v >> 16) & 0xff;\
420     g = (v >> 8) & 0xff;\
421     b = v & 0xff;\
422 }
423
424 #define YUVA_IN(y, u, v, a, s, pal)\
425 {\
426     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427     a = (val >> 24) & 0xff;\
428     y = (val >> 16) & 0xff;\
429     u = (val >> 8) & 0xff;\
430     v = val & 0xff;\
431 }
432
433 #define YUVA_OUT(d, y, u, v, a)\
434 {\
435     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436 }
437
438
439 #define BPP 1
440
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442 {
443     int wrap, wrap3, width2, skip2;
444     int y, u, v, a, u1, v1, a1, w, h;
445     uint8_t *lum, *cb, *cr;
446     const uint8_t *p;
447     const uint32_t *pal;
448     int dstx, dsty, dstw, dsth;
449
450     dstw = av_clip(rect->w, 0, imgw);
451     dsth = av_clip(rect->h, 0, imgh);
452     dstx = av_clip(rect->x, 0, imgw - dstw);
453     dsty = av_clip(rect->y, 0, imgh - dsth);
454     lum = dst->data[0] + dsty * dst->linesize[0];
455     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457
458     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459     skip2 = dstx >> 1;
460     wrap = dst->linesize[0];
461     wrap3 = rect->pict.linesize[0];
462     p = rect->pict.data[0];
463     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
464
465     if (dsty & 1) {
466         lum += dstx;
467         cb += skip2;
468         cr += skip2;
469
470         if (dstx & 1) {
471             YUVA_IN(y, u, v, a, p, pal);
472             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475             cb++;
476             cr++;
477             lum++;
478             p += BPP;
479         }
480         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
481             YUVA_IN(y, u, v, a, p, pal);
482             u1 = u;
483             v1 = v;
484             a1 = a;
485             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
487             YUVA_IN(y, u, v, a, p + BPP, pal);
488             u1 += u;
489             v1 += v;
490             a1 += a;
491             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494             cb++;
495             cr++;
496             p += 2 * BPP;
497             lum += 2;
498         }
499         if (w) {
500             YUVA_IN(y, u, v, a, p, pal);
501             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504             p++;
505             lum++;
506         }
507         p += wrap3 - dstw * BPP;
508         lum += wrap - dstw - dstx;
509         cb += dst->linesize[1] - width2 - skip2;
510         cr += dst->linesize[2] - width2 - skip2;
511     }
512     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
513         lum += dstx;
514         cb += skip2;
515         cr += skip2;
516
517         if (dstx & 1) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             p += wrap3;
524             lum += wrap;
525             YUVA_IN(y, u, v, a, p, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += -wrap3 + BPP;
535             lum += -wrap + 1;
536         }
537         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
538             YUVA_IN(y, u, v, a, p, pal);
539             u1 = u;
540             v1 = v;
541             a1 = a;
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544             YUVA_IN(y, u, v, a, p + BPP, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549             p += wrap3;
550             lum += wrap;
551
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
558             YUVA_IN(y, u, v, a, p + BPP, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566
567             cb++;
568             cr++;
569             p += -wrap3 + 2 * BPP;
570             lum += -wrap + 2;
571         }
572         if (w) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             p += wrap3;
579             lum += wrap;
580             YUVA_IN(y, u, v, a, p, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587             cb++;
588             cr++;
589             p += -wrap3 + BPP;
590             lum += -wrap + 1;
591         }
592         p += wrap3 + (wrap3 - dstw * BPP);
593         lum += wrap + (wrap - dstw - dstx);
594         cb += dst->linesize[1] - width2 - skip2;
595         cr += dst->linesize[2] - width2 - skip2;
596     }
597     /* handle odd height */
598     if (h) {
599         lum += dstx;
600         cb += skip2;
601         cr += skip2;
602
603         if (dstx & 1) {
604             YUVA_IN(y, u, v, a, p, pal);
605             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608             cb++;
609             cr++;
610             lum++;
611             p += BPP;
612         }
613         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
620             YUVA_IN(y, u, v, a, p + BPP, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627             cb++;
628             cr++;
629             p += 2 * BPP;
630             lum += 2;
631         }
632         if (w) {
633             YUVA_IN(y, u, v, a, p, pal);
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637         }
638     }
639 }
640
641 static void free_subpicture(SubPicture *sp)
642 {
643     avsubtitle_free(&sp->sub);
644 }
645
646 static void video_image_display(VideoState *is)
647 {
648     VideoPicture *vp;
649     SubPicture *sp;
650     AVPicture pict;
651     float aspect_ratio;
652     int width, height, x, y;
653     SDL_Rect rect;
654     int i;
655
656     vp = &is->pictq[is->pictq_rindex];
657     if (vp->bmp) {
658 #if CONFIG_AVFILTER
659          if (vp->picref->video->pixel_aspect.num == 0)
660              aspect_ratio = 0;
661          else
662              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
663 #else
664
665         /* XXX: use variable in the frame */
666         if (is->video_st->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
668         else if (is->video_st->codec->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
670         else
671             aspect_ratio = 0;
672 #endif
673         if (aspect_ratio <= 0.0)
674             aspect_ratio = 1.0;
675         aspect_ratio *= (float)vp->width / (float)vp->height;
676
677         if (is->subtitle_st)
678         {
679             if (is->subpq_size > 0)
680             {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
684                 {
685                     SDL_LockYUVOverlay (vp->bmp);
686
687                     pict.data[0] = vp->bmp->pixels[0];
688                     pict.data[1] = vp->bmp->pixels[2];
689                     pict.data[2] = vp->bmp->pixels[1];
690
691                     pict.linesize[0] = vp->bmp->pitches[0];
692                     pict.linesize[1] = vp->bmp->pitches[2];
693                     pict.linesize[2] = vp->bmp->pitches[1];
694
695                     for (i = 0; i < sp->sub.num_rects; i++)
696                         blend_subrect(&pict, sp->sub.rects[i],
697                                       vp->bmp->w, vp->bmp->h);
698
699                     SDL_UnlockYUVOverlay (vp->bmp);
700                 }
701             }
702         }
703
704
705         /* XXX: we suppose the screen has a 1.0 pixel ratio */
706         height = is->height;
707         width = ((int)rint(height * aspect_ratio)) & ~1;
708         if (width > is->width) {
709             width = is->width;
710             height = ((int)rint(width / aspect_ratio)) & ~1;
711         }
712         x = (is->width - width) / 2;
713         y = (is->height - height) / 2;
714         is->no_background = 0;
715         rect.x = is->xleft + x;
716         rect.y = is->ytop  + y;
717         rect.w = width;
718         rect.h = height;
719         SDL_DisplayYUVOverlay(vp->bmp, &rect);
720     }
721 }
722
723 /* get the current audio output buffer size, in samples. With SDL, we
724    cannot have a precise information */
725 static int audio_write_get_buf_size(VideoState *is)
726 {
727     return is->audio_buf_size - is->audio_buf_index;
728 }
729
730 static inline int compute_mod(int a, int b)
731 {
732     a = a % b;
733     if (a >= 0)
734         return a;
735     else
736         return a + b;
737 }
738
739 static void video_audio_display(VideoState *s)
740 {
741     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
742     int ch, channels, h, h2, bgcolor, fgcolor;
743     int16_t time_diff;
744     int rdft_bits, nb_freq;
745
746     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
747         ;
748     nb_freq= 1<<(rdft_bits-1);
749
750     /* compute display index : center on currently output samples */
751     channels = s->audio_st->codec->channels;
752     nb_display_channels = channels;
753     if (!s->paused) {
754         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
755         n = 2 * channels;
756         delay = audio_write_get_buf_size(s);
757         delay /= n;
758
759         /* to be more precise, we take into account the time spent since
760            the last buffer computation */
761         if (audio_callback_time) {
762             time_diff = av_gettime() - audio_callback_time;
763             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
764         }
765
766         delay += 2*data_used;
767         if (delay < data_used)
768             delay = data_used;
769
770         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
771         if(s->show_audio==1){
772             h= INT_MIN;
773             for(i=0; i<1000; i+=channels){
774                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
775                 int a= s->sample_array[idx];
776                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
777                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
778                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
779                 int score= a-d;
780                 if(h<score && (b^c)<0){
781                     h= score;
782                     i_start= idx;
783                 }
784             }
785         }
786
787         s->last_i_start = i_start;
788     } else {
789         i_start = s->last_i_start;
790     }
791
792     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
793     if(s->show_audio==1){
794         fill_rectangle(screen,
795                        s->xleft, s->ytop, s->width, s->height,
796                        bgcolor);
797
798         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
799
800         /* total height for one channel */
801         h = s->height / nb_display_channels;
802         /* graph height / 2 */
803         h2 = (h * 9) / 20;
804         for(ch = 0;ch < nb_display_channels; ch++) {
805             i = i_start + ch;
806             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
807             for(x = 0; x < s->width; x++) {
808                 y = (s->sample_array[i] * h2) >> 15;
809                 if (y < 0) {
810                     y = -y;
811                     ys = y1 - y;
812                 } else {
813                     ys = y1;
814                 }
815                 fill_rectangle(screen,
816                                s->xleft + x, ys, 1, y,
817                                fgcolor);
818                 i += channels;
819                 if (i >= SAMPLE_ARRAY_SIZE)
820                     i -= SAMPLE_ARRAY_SIZE;
821             }
822         }
823
824         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
825
826         for(ch = 1;ch < nb_display_channels; ch++) {
827             y = s->ytop + ch * h;
828             fill_rectangle(screen,
829                            s->xleft, y, s->width, 1,
830                            fgcolor);
831         }
832         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
833     }else{
834         nb_display_channels= FFMIN(nb_display_channels, 2);
835         if(rdft_bits != s->rdft_bits){
836             av_rdft_end(s->rdft);
837             av_free(s->rdft_data);
838             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
839             s->rdft_bits= rdft_bits;
840             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
841         }
842         {
843             FFTSample *data[2];
844             for(ch = 0;ch < nb_display_channels; ch++) {
845                 data[ch] = s->rdft_data + 2*nb_freq*ch;
846                 i = i_start + ch;
847                 for(x = 0; x < 2*nb_freq; x++) {
848                     double w= (x-nb_freq)*(1.0/nb_freq);
849                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
850                     i += channels;
851                     if (i >= SAMPLE_ARRAY_SIZE)
852                         i -= SAMPLE_ARRAY_SIZE;
853                 }
854                 av_rdft_calc(s->rdft, data[ch]);
855             }
856             //least efficient way to do this, we should of course directly access it but its more than fast enough
857             for(y=0; y<s->height; y++){
858                 double w= 1/sqrt(nb_freq);
859                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
860                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
861                        + data[1][2*y+1]*data[1][2*y+1])) : a;
862                 a= FFMIN(a,255);
863                 b= FFMIN(b,255);
864                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
865
866                 fill_rectangle(screen,
867                             s->xpos, s->height-y, 1, 1,
868                             fgcolor);
869             }
870         }
871         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
872         s->xpos++;
873         if(s->xpos >= s->width)
874             s->xpos= s->xleft;
875     }
876 }
877
878 static int video_open(VideoState *is){
879     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
880     int w,h;
881
882     if(is_full_screen) flags |= SDL_FULLSCREEN;
883     else               flags |= SDL_RESIZABLE;
884
885     if (is_full_screen && fs_screen_width) {
886         w = fs_screen_width;
887         h = fs_screen_height;
888     } else if(!is_full_screen && screen_width){
889         w = screen_width;
890         h = screen_height;
891 #if CONFIG_AVFILTER
892     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
893         w = is->out_video_filter->inputs[0]->w;
894         h = is->out_video_filter->inputs[0]->h;
895 #else
896     }else if (is->video_st && is->video_st->codec->width){
897         w = is->video_st->codec->width;
898         h = is->video_st->codec->height;
899 #endif
900     } else {
901         w = 640;
902         h = 480;
903     }
904     if(screen && is->width == screen->w && screen->w == w
905        && is->height== screen->h && screen->h == h)
906         return 0;
907
908 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
909     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
910     screen = SDL_SetVideoMode(w, h, 24, flags);
911 #else
912     screen = SDL_SetVideoMode(w, h, 0, flags);
913 #endif
914     if (!screen) {
915         fprintf(stderr, "SDL: could not set video mode - exiting\n");
916         return -1;
917     }
918     if (!window_title)
919         window_title = input_filename;
920     SDL_WM_SetCaption(window_title, window_title);
921
922     is->width = screen->w;
923     is->height = screen->h;
924
925     return 0;
926 }
927
928 /* display the current picture, if any */
929 static void video_display(VideoState *is)
930 {
931     if(!screen)
932         video_open(cur_stream);
933     if (is->audio_st && is->show_audio)
934         video_audio_display(is);
935     else if (is->video_st)
936         video_image_display(is);
937 }
938
939 static int refresh_thread(void *opaque)
940 {
941     VideoState *is= opaque;
942     while(!is->abort_request){
943         SDL_Event event;
944         event.type = FF_REFRESH_EVENT;
945         event.user.data1 = opaque;
946         if(!is->refresh){
947             is->refresh=1;
948             SDL_PushEvent(&event);
949         }
950         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
951     }
952     return 0;
953 }
954
955 /* get the current audio clock value */
956 static double get_audio_clock(VideoState *is)
957 {
958     double pts;
959     int hw_buf_size, bytes_per_sec;
960     pts = is->audio_clock;
961     hw_buf_size = audio_write_get_buf_size(is);
962     bytes_per_sec = 0;
963     if (is->audio_st) {
964         bytes_per_sec = is->audio_st->codec->sample_rate *
965             2 * is->audio_st->codec->channels;
966     }
967     if (bytes_per_sec)
968         pts -= (double)hw_buf_size / bytes_per_sec;
969     return pts;
970 }
971
972 /* get the current video clock value */
973 static double get_video_clock(VideoState *is)
974 {
975     if (is->paused) {
976         return is->video_current_pts;
977     } else {
978         return is->video_current_pts_drift + av_gettime() / 1000000.0;
979     }
980 }
981
982 /* get the current external clock value */
983 static double get_external_clock(VideoState *is)
984 {
985     int64_t ti;
986     ti = av_gettime();
987     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
988 }
989
990 /* get the current master clock value */
991 static double get_master_clock(VideoState *is)
992 {
993     double val;
994
995     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
996         if (is->video_st)
997             val = get_video_clock(is);
998         else
999             val = get_audio_clock(is);
1000     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1001         if (is->audio_st)
1002             val = get_audio_clock(is);
1003         else
1004             val = get_video_clock(is);
1005     } else {
1006         val = get_external_clock(is);
1007     }
1008     return val;
1009 }
1010
1011 /* seek in the stream */
1012 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1013 {
1014     if (!is->seek_req) {
1015         is->seek_pos = pos;
1016         is->seek_rel = rel;
1017         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1018         if (seek_by_bytes)
1019             is->seek_flags |= AVSEEK_FLAG_BYTE;
1020         is->seek_req = 1;
1021     }
1022 }
1023
1024 /* pause or resume the video */
1025 static void stream_pause(VideoState *is)
1026 {
1027     if (is->paused) {
1028         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1029         if(is->read_pause_return != AVERROR(ENOSYS)){
1030             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1031         }
1032         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1033     }
1034     is->paused = !is->paused;
1035 }
1036
1037 static double compute_target_time(double frame_current_pts, VideoState *is)
1038 {
1039     double delay, sync_threshold, diff;
1040
1041     /* compute nominal delay */
1042     delay = frame_current_pts - is->frame_last_pts;
1043     if (delay <= 0 || delay >= 10.0) {
1044         /* if incorrect delay, use previous one */
1045         delay = is->frame_last_delay;
1046     } else {
1047         is->frame_last_delay = delay;
1048     }
1049     is->frame_last_pts = frame_current_pts;
1050
1051     /* update delay to follow master synchronisation source */
1052     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1053          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1054         /* if video is slave, we try to correct big delays by
1055            duplicating or deleting a frame */
1056         diff = get_video_clock(is) - get_master_clock(is);
1057
1058         /* skip or repeat frame. We take into account the
1059            delay to compute the threshold. I still don't know
1060            if it is the best guess */
1061         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1062         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1063             if (diff <= -sync_threshold)
1064                 delay = 0;
1065             else if (diff >= sync_threshold)
1066                 delay = 2 * delay;
1067         }
1068     }
1069     is->frame_timer += delay;
1070
1071     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1072             delay, frame_current_pts, -diff);
1073
1074     return is->frame_timer;
1075 }
1076
1077 /* called to display each frame */
1078 static void video_refresh_timer(void *opaque)
1079 {
1080     VideoState *is = opaque;
1081     VideoPicture *vp;
1082
1083     SubPicture *sp, *sp2;
1084
1085     if (is->video_st) {
1086 retry:
1087         if (is->pictq_size == 0) {
1088             //nothing to do, no picture to display in the que
1089         } else {
1090             double time= av_gettime()/1000000.0;
1091             double next_target;
1092             /* dequeue the picture */
1093             vp = &is->pictq[is->pictq_rindex];
1094
1095             if(time < vp->target_clock)
1096                 return;
1097             /* update current video pts */
1098             is->video_current_pts = vp->pts;
1099             is->video_current_pts_drift = is->video_current_pts - time;
1100             is->video_current_pos = vp->pos;
1101             if(is->pictq_size > 1){
1102                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1103                 assert(nextvp->target_clock >= vp->target_clock);
1104                 next_target= nextvp->target_clock;
1105             }else{
1106                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1107             }
1108             if(framedrop && time > next_target){
1109                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1110                 if(is->pictq_size > 1 || time > next_target + 0.5){
1111                     /* update queue size and signal for next picture */
1112                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113                         is->pictq_rindex = 0;
1114
1115                     SDL_LockMutex(is->pictq_mutex);
1116                     is->pictq_size--;
1117                     SDL_CondSignal(is->pictq_cond);
1118                     SDL_UnlockMutex(is->pictq_mutex);
1119                     goto retry;
1120                 }
1121             }
1122
1123             if(is->subtitle_st) {
1124                 if (is->subtitle_stream_changed) {
1125                     SDL_LockMutex(is->subpq_mutex);
1126
1127                     while (is->subpq_size) {
1128                         free_subpicture(&is->subpq[is->subpq_rindex]);
1129
1130                         /* update queue size and signal for next picture */
1131                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1132                             is->subpq_rindex = 0;
1133
1134                         is->subpq_size--;
1135                     }
1136                     is->subtitle_stream_changed = 0;
1137
1138                     SDL_CondSignal(is->subpq_cond);
1139                     SDL_UnlockMutex(is->subpq_mutex);
1140                 } else {
1141                     if (is->subpq_size > 0) {
1142                         sp = &is->subpq[is->subpq_rindex];
1143
1144                         if (is->subpq_size > 1)
1145                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1146                         else
1147                             sp2 = NULL;
1148
1149                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1150                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1151                         {
1152                             free_subpicture(sp);
1153
1154                             /* update queue size and signal for next picture */
1155                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1156                                 is->subpq_rindex = 0;
1157
1158                             SDL_LockMutex(is->subpq_mutex);
1159                             is->subpq_size--;
1160                             SDL_CondSignal(is->subpq_cond);
1161                             SDL_UnlockMutex(is->subpq_mutex);
1162                         }
1163                     }
1164                 }
1165             }
1166
1167             /* display picture */
1168             if (!display_disable)
1169                 video_display(is);
1170
1171             /* update queue size and signal for next picture */
1172             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1173                 is->pictq_rindex = 0;
1174
1175             SDL_LockMutex(is->pictq_mutex);
1176             is->pictq_size--;
1177             SDL_CondSignal(is->pictq_cond);
1178             SDL_UnlockMutex(is->pictq_mutex);
1179         }
1180     } else if (is->audio_st) {
1181         /* draw the next audio frame */
1182
1183         /* if only audio stream, then display the audio bars (better
1184            than nothing, just to test the implementation */
1185
1186         /* display picture */
1187         if (!display_disable)
1188             video_display(is);
1189     }
1190     if (show_status) {
1191         static int64_t last_time;
1192         int64_t cur_time;
1193         int aqsize, vqsize, sqsize;
1194         double av_diff;
1195
1196         cur_time = av_gettime();
1197         if (!last_time || (cur_time - last_time) >= 30000) {
1198             aqsize = 0;
1199             vqsize = 0;
1200             sqsize = 0;
1201             if (is->audio_st)
1202                 aqsize = is->audioq.size;
1203             if (is->video_st)
1204                 vqsize = is->videoq.size;
1205             if (is->subtitle_st)
1206                 sqsize = is->subtitleq.size;
1207             av_diff = 0;
1208             if (is->audio_st && is->video_st)
1209                 av_diff = get_audio_clock(is) - get_video_clock(is);
1210             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1211                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212             fflush(stdout);
1213             last_time = cur_time;
1214         }
1215     }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220     VideoPicture *vp;
1221     int i;
1222     /* XXX: use a special url_shutdown call to abort parse cleanly */
1223     is->abort_request = 1;
1224     SDL_WaitThread(is->parse_tid, NULL);
1225     SDL_WaitThread(is->refresh_tid, NULL);
1226
1227     /* free all pictures */
1228     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229         vp = &is->pictq[i];
1230 #if CONFIG_AVFILTER
1231         if (vp->picref) {
1232             avfilter_unref_buffer(vp->picref);
1233             vp->picref = NULL;
1234         }
1235 #endif
1236         if (vp->bmp) {
1237             SDL_FreeYUVOverlay(vp->bmp);
1238             vp->bmp = NULL;
1239         }
1240     }
1241     SDL_DestroyMutex(is->pictq_mutex);
1242     SDL_DestroyCond(is->pictq_cond);
1243     SDL_DestroyMutex(is->subpq_mutex);
1244     SDL_DestroyCond(is->subpq_cond);
1245 #if !CONFIG_AVFILTER
1246     if (is->img_convert_ctx)
1247         sws_freeContext(is->img_convert_ctx);
1248 #endif
1249     av_free(is);
1250 }
1251
1252 static void do_exit(void)
1253 {
1254     if (cur_stream) {
1255         stream_close(cur_stream);
1256         cur_stream = NULL;
1257     }
1258     uninit_opts();
1259 #if CONFIG_AVFILTER
1260     avfilter_uninit();
1261 #endif
1262     avformat_network_deinit();
1263     if (show_status)
1264         printf("\n");
1265     SDL_Quit();
1266     av_log(NULL, AV_LOG_QUIET, "");
1267     exit(0);
1268 }
1269
1270 /* allocate a picture (needs to do that in main thread to avoid
1271    potential locking problems */
1272 static void alloc_picture(void *opaque)
1273 {
1274     VideoState *is = opaque;
1275     VideoPicture *vp;
1276
1277     vp = &is->pictq[is->pictq_windex];
1278
1279     if (vp->bmp)
1280         SDL_FreeYUVOverlay(vp->bmp);
1281
1282 #if CONFIG_AVFILTER
1283     if (vp->picref)
1284         avfilter_unref_buffer(vp->picref);
1285     vp->picref = NULL;
1286
1287     vp->width   = is->out_video_filter->inputs[0]->w;
1288     vp->height  = is->out_video_filter->inputs[0]->h;
1289     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1290 #else
1291     vp->width   = is->video_st->codec->width;
1292     vp->height  = is->video_st->codec->height;
1293     vp->pix_fmt = is->video_st->codec->pix_fmt;
1294 #endif
1295
1296     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1297                                    SDL_YV12_OVERLAY,
1298                                    screen);
1299     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1300         /* SDL allocates a buffer smaller than requested if the video
1301          * overlay hardware is unable to support the requested size. */
1302         fprintf(stderr, "Error: the video system does not support an image\n"
1303                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1304                         "to reduce the image size.\n", vp->width, vp->height );
1305         do_exit();
1306     }
1307
1308     SDL_LockMutex(is->pictq_mutex);
1309     vp->allocated = 1;
1310     SDL_CondSignal(is->pictq_cond);
1311     SDL_UnlockMutex(is->pictq_mutex);
1312 }
1313
1314 /**
1315  *
1316  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1317  */
1318 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1319 {
1320     VideoPicture *vp;
1321 #if CONFIG_AVFILTER
1322     AVPicture pict_src;
1323 #else
1324     int dst_pix_fmt = PIX_FMT_YUV420P;
1325 #endif
1326     /* wait until we have space to put a new picture */
1327     SDL_LockMutex(is->pictq_mutex);
1328
1329     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1330         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1331
1332     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1333            !is->videoq.abort_request) {
1334         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1335     }
1336     SDL_UnlockMutex(is->pictq_mutex);
1337
1338     if (is->videoq.abort_request)
1339         return -1;
1340
1341     vp = &is->pictq[is->pictq_windex];
1342
1343     /* alloc or resize hardware picture buffer */
1344     if (!vp->bmp || vp->reallocate ||
1345 #if CONFIG_AVFILTER
1346         vp->width  != is->out_video_filter->inputs[0]->w ||
1347         vp->height != is->out_video_filter->inputs[0]->h) {
1348 #else
1349         vp->width != is->video_st->codec->width ||
1350         vp->height != is->video_st->codec->height) {
1351 #endif
1352         SDL_Event event;
1353
1354         vp->allocated  = 0;
1355         vp->reallocate = 0;
1356
1357         /* the allocation must be done in the main thread to avoid
1358            locking problems */
1359         event.type = FF_ALLOC_EVENT;
1360         event.user.data1 = is;
1361         SDL_PushEvent(&event);
1362
1363         /* wait until the picture is allocated */
1364         SDL_LockMutex(is->pictq_mutex);
1365         while (!vp->allocated && !is->videoq.abort_request) {
1366             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367         }
1368         SDL_UnlockMutex(is->pictq_mutex);
1369
1370         if (is->videoq.abort_request)
1371             return -1;
1372     }
1373
1374     /* if the frame is not skipped, then display it */
1375     if (vp->bmp) {
1376         AVPicture pict;
1377 #if CONFIG_AVFILTER
1378         if(vp->picref)
1379             avfilter_unref_buffer(vp->picref);
1380         vp->picref = src_frame->opaque;
1381 #endif
1382
1383         /* get a pointer on the bitmap */
1384         SDL_LockYUVOverlay (vp->bmp);
1385
1386         memset(&pict,0,sizeof(AVPicture));
1387         pict.data[0] = vp->bmp->pixels[0];
1388         pict.data[1] = vp->bmp->pixels[2];
1389         pict.data[2] = vp->bmp->pixels[1];
1390
1391         pict.linesize[0] = vp->bmp->pitches[0];
1392         pict.linesize[1] = vp->bmp->pitches[2];
1393         pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396         pict_src.data[0] = src_frame->data[0];
1397         pict_src.data[1] = src_frame->data[1];
1398         pict_src.data[2] = src_frame->data[2];
1399
1400         pict_src.linesize[0] = src_frame->linesize[0];
1401         pict_src.linesize[1] = src_frame->linesize[1];
1402         pict_src.linesize[2] = src_frame->linesize[2];
1403
1404         //FIXME use direct rendering
1405         av_picture_copy(&pict, &pict_src,
1406                         vp->pix_fmt, vp->width, vp->height);
1407 #else
1408         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1409         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1410             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1411             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1412         if (is->img_convert_ctx == NULL) {
1413             fprintf(stderr, "Cannot initialize the conversion context\n");
1414             exit(1);
1415         }
1416         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1417                   0, vp->height, pict.data, pict.linesize);
1418 #endif
1419         /* update the bitmap content */
1420         SDL_UnlockYUVOverlay(vp->bmp);
1421
1422         vp->pts = pts;
1423         vp->pos = pos;
1424
1425         /* now we can update the picture count */
1426         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1427             is->pictq_windex = 0;
1428         SDL_LockMutex(is->pictq_mutex);
1429         vp->target_clock= compute_target_time(vp->pts, is);
1430
1431         is->pictq_size++;
1432         SDL_UnlockMutex(is->pictq_mutex);
1433     }
1434     return 0;
1435 }
1436
1437 /**
1438  * compute the exact PTS for the picture if it is omitted in the stream
1439  * @param pts1 the dts of the pkt / pts of the frame
1440  */
1441 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1442 {
1443     double frame_delay, pts;
1444
1445     pts = pts1;
1446
1447     if (pts != 0) {
1448         /* update video clock with pts, if present */
1449         is->video_clock = pts;
1450     } else {
1451         pts = is->video_clock;
1452     }
1453     /* update video clock for next frame */
1454     frame_delay = av_q2d(is->video_st->codec->time_base);
1455     /* for MPEG2, the frame can be repeated, so we update the
1456        clock accordingly */
1457     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1458     is->video_clock += frame_delay;
1459
1460     return queue_picture(is, src_frame, pts, pos);
1461 }
1462
1463 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1464 {
1465     int got_picture, i;
1466
1467     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1468         return -1;
1469
1470     if (pkt->data == flush_pkt.data) {
1471         avcodec_flush_buffers(is->video_st->codec);
1472
1473         SDL_LockMutex(is->pictq_mutex);
1474         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1475         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1476             is->pictq[i].target_clock= 0;
1477         }
1478         while (is->pictq_size && !is->videoq.abort_request) {
1479             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1480         }
1481         is->video_current_pos = -1;
1482         SDL_UnlockMutex(is->pictq_mutex);
1483
1484         init_pts_correction(&is->pts_ctx);
1485         is->frame_last_pts = AV_NOPTS_VALUE;
1486         is->frame_last_delay = 0;
1487         is->frame_timer = (double)av_gettime() / 1000000.0;
1488         is->skip_frames = 1;
1489         is->skip_frames_index = 0;
1490         return 0;
1491     }
1492
1493     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1494
1495     if (got_picture) {
1496         if (decoder_reorder_pts == -1) {
1497             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1498         } else if (decoder_reorder_pts) {
1499             *pts = frame->pkt_pts;
1500         } else {
1501             *pts = frame->pkt_dts;
1502         }
1503
1504         if (*pts == AV_NOPTS_VALUE) {
1505             *pts = 0;
1506         }
1507
1508         is->skip_frames_index += 1;
1509         if(is->skip_frames_index >= is->skip_frames){
1510             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1511             return 1;
1512         }
1513
1514     }
1515     return 0;
1516 }
1517
1518 #if CONFIG_AVFILTER
1519 typedef struct {
1520     VideoState *is;
1521     AVFrame *frame;
1522     int use_dr1;
1523 } FilterPriv;
1524
1525 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1526 {
1527     AVFilterContext *ctx = codec->opaque;
1528     AVFilterBufferRef  *ref;
1529     int perms = AV_PERM_WRITE;
1530     int i, w, h, stride[4];
1531     unsigned edge;
1532     int pixel_size;
1533
1534     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1535         perms |= AV_PERM_NEG_LINESIZES;
1536
1537     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1538         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1539         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1540         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1541     }
1542     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1543
1544     w = codec->width;
1545     h = codec->height;
1546     avcodec_align_dimensions2(codec, &w, &h, stride);
1547     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1548     w += edge << 1;
1549     h += edge << 1;
1550
1551     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1552         return -1;
1553
1554     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1555     ref->video->w = codec->width;
1556     ref->video->h = codec->height;
1557     for(i = 0; i < 4; i ++) {
1558         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1559         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1560
1561         if (ref->data[i]) {
1562             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1563         }
1564         pic->data[i]     = ref->data[i];
1565         pic->linesize[i] = ref->linesize[i];
1566     }
1567     pic->opaque = ref;
1568     pic->age    = INT_MAX;
1569     pic->type   = FF_BUFFER_TYPE_USER;
1570     pic->reordered_opaque = codec->reordered_opaque;
1571     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1572     else           pic->pkt_pts = AV_NOPTS_VALUE;
1573     return 0;
1574 }
1575
1576 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1577 {
1578     memset(pic->data, 0, sizeof(pic->data));
1579     avfilter_unref_buffer(pic->opaque);
1580 }
1581
1582 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1583 {
1584     AVFilterBufferRef *ref = pic->opaque;
1585
1586     if (pic->data[0] == NULL) {
1587         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1588         return codec->get_buffer(codec, pic);
1589     }
1590
1591     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1592         (codec->pix_fmt != ref->format)) {
1593         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1594         return -1;
1595     }
1596
1597     pic->reordered_opaque = codec->reordered_opaque;
1598     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1599     else           pic->pkt_pts = AV_NOPTS_VALUE;
1600     return 0;
1601 }
1602
1603 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1604 {
1605     FilterPriv *priv = ctx->priv;
1606     AVCodecContext *codec;
1607     if(!opaque) return -1;
1608
1609     priv->is = opaque;
1610     codec    = priv->is->video_st->codec;
1611     codec->opaque = ctx;
1612     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1613         priv->use_dr1 = 1;
1614         codec->get_buffer     = input_get_buffer;
1615         codec->release_buffer = input_release_buffer;
1616         codec->reget_buffer   = input_reget_buffer;
1617         codec->thread_safe_callbacks = 1;
1618     }
1619
1620     priv->frame = avcodec_alloc_frame();
1621
1622     return 0;
1623 }
1624
1625 static void input_uninit(AVFilterContext *ctx)
1626 {
1627     FilterPriv *priv = ctx->priv;
1628     av_free(priv->frame);
1629 }
1630
1631 static int input_request_frame(AVFilterLink *link)
1632 {
1633     FilterPriv *priv = link->src->priv;
1634     AVFilterBufferRef *picref;
1635     int64_t pts = 0;
1636     AVPacket pkt;
1637     int ret;
1638
1639     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1640         av_free_packet(&pkt);
1641     if (ret < 0)
1642         return -1;
1643
1644     if(priv->use_dr1) {
1645         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1646     } else {
1647         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1648         av_image_copy(picref->data, picref->linesize,
1649                       priv->frame->data, priv->frame->linesize,
1650                       picref->format, link->w, link->h);
1651     }
1652     av_free_packet(&pkt);
1653
1654     picref->pts = pts;
1655     picref->pos = pkt.pos;
1656     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1657     avfilter_start_frame(link, picref);
1658     avfilter_draw_slice(link, 0, link->h, 1);
1659     avfilter_end_frame(link);
1660
1661     return 0;
1662 }
1663
1664 static int input_query_formats(AVFilterContext *ctx)
1665 {
1666     FilterPriv *priv = ctx->priv;
1667     enum PixelFormat pix_fmts[] = {
1668         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1669     };
1670
1671     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1672     return 0;
1673 }
1674
1675 static int input_config_props(AVFilterLink *link)
1676 {
1677     FilterPriv *priv  = link->src->priv;
1678     AVCodecContext *c = priv->is->video_st->codec;
1679
1680     link->w = c->width;
1681     link->h = c->height;
1682     link->time_base = priv->is->video_st->time_base;
1683
1684     return 0;
1685 }
1686
1687 static AVFilter input_filter =
1688 {
1689     .name      = "avplay_input",
1690
1691     .priv_size = sizeof(FilterPriv),
1692
1693     .init      = input_init,
1694     .uninit    = input_uninit,
1695
1696     .query_formats = input_query_formats,
1697
1698     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1699     .outputs   = (AVFilterPad[]) {{ .name = "default",
1700                                     .type = AVMEDIA_TYPE_VIDEO,
1701                                     .request_frame = input_request_frame,
1702                                     .config_props  = input_config_props, },
1703                                   { .name = NULL }},
1704 };
1705
1706 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1707 {
1708     char sws_flags_str[128];
1709     int ret;
1710     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1711     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1712     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1713     graph->scale_sws_opts = av_strdup(sws_flags_str);
1714
1715     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1716                                             NULL, is, graph)) < 0)
1717         return ret;
1718     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1719                                             NULL, &ffsink_ctx, graph)) < 0)
1720         return ret;
1721
1722     if(vfilters) {
1723         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1724         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1725
1726         outputs->name    = av_strdup("in");
1727         outputs->filter_ctx = filt_src;
1728         outputs->pad_idx = 0;
1729         outputs->next    = NULL;
1730
1731         inputs->name    = av_strdup("out");
1732         inputs->filter_ctx = filt_out;
1733         inputs->pad_idx = 0;
1734         inputs->next    = NULL;
1735
1736         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1737             return ret;
1738         av_freep(&vfilters);
1739     } else {
1740         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1741             return ret;
1742     }
1743
1744     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1745         return ret;
1746
1747     is->out_video_filter = filt_out;
1748
1749     return ret;
1750 }
1751
1752 #endif  /* CONFIG_AVFILTER */
1753
1754 static int video_thread(void *arg)
1755 {
1756     VideoState *is = arg;
1757     AVFrame *frame= avcodec_alloc_frame();
1758     int64_t pts_int;
1759     double pts;
1760     int ret;
1761
1762 #if CONFIG_AVFILTER
1763     AVFilterGraph *graph = avfilter_graph_alloc();
1764     AVFilterContext *filt_out = NULL;
1765     int64_t pos;
1766     int last_w = is->video_st->codec->width;
1767     int last_h = is->video_st->codec->height;
1768
1769     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1770         goto the_end;
1771     filt_out = is->out_video_filter;
1772 #endif
1773
1774     for(;;) {
1775 #if !CONFIG_AVFILTER
1776         AVPacket pkt;
1777 #else
1778         AVFilterBufferRef *picref;
1779         AVRational tb;
1780 #endif
1781         while (is->paused && !is->videoq.abort_request)
1782             SDL_Delay(10);
1783 #if CONFIG_AVFILTER
1784         if (   last_w != is->video_st->codec->width
1785             || last_h != is->video_st->codec->height) {
1786             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1787                     is->video_st->codec->width, is->video_st->codec->height);
1788             avfilter_graph_free(&graph);
1789             graph = avfilter_graph_alloc();
1790             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1791                 goto the_end;
1792             filt_out = is->out_video_filter;
1793             last_w = is->video_st->codec->width;
1794             last_h = is->video_st->codec->height;
1795         }
1796         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1797         if (picref) {
1798             pts_int = picref->pts;
1799             pos     = picref->pos;
1800             frame->opaque = picref;
1801         }
1802
1803         if (av_cmp_q(tb, is->video_st->time_base)) {
1804             av_unused int64_t pts1 = pts_int;
1805             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1806             av_dlog(NULL, "video_thread(): "
1807                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1808                     tb.num, tb.den, pts1,
1809                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1810         }
1811 #else
1812         ret = get_video_frame(is, frame, &pts_int, &pkt);
1813 #endif
1814
1815         if (ret < 0) goto the_end;
1816
1817         if (!ret)
1818             continue;
1819
1820         pts = pts_int*av_q2d(is->video_st->time_base);
1821
1822 #if CONFIG_AVFILTER
1823         ret = output_picture2(is, frame, pts, pos);
1824 #else
1825         ret = output_picture2(is, frame, pts,  pkt.pos);
1826         av_free_packet(&pkt);
1827 #endif
1828         if (ret < 0)
1829             goto the_end;
1830
1831         if (step)
1832             if (cur_stream)
1833                 stream_pause(cur_stream);
1834     }
1835  the_end:
1836 #if CONFIG_AVFILTER
1837     avfilter_graph_free(&graph);
1838 #endif
1839     av_free(frame);
1840     return 0;
1841 }
1842
1843 static int subtitle_thread(void *arg)
1844 {
1845     VideoState *is = arg;
1846     SubPicture *sp;
1847     AVPacket pkt1, *pkt = &pkt1;
1848     int got_subtitle;
1849     double pts;
1850     int i, j;
1851     int r, g, b, y, u, v, a;
1852
1853     for(;;) {
1854         while (is->paused && !is->subtitleq.abort_request) {
1855             SDL_Delay(10);
1856         }
1857         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1858             break;
1859
1860         if(pkt->data == flush_pkt.data){
1861             avcodec_flush_buffers(is->subtitle_st->codec);
1862             continue;
1863         }
1864         SDL_LockMutex(is->subpq_mutex);
1865         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1866                !is->subtitleq.abort_request) {
1867             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1868         }
1869         SDL_UnlockMutex(is->subpq_mutex);
1870
1871         if (is->subtitleq.abort_request)
1872             return 0;
1873
1874         sp = &is->subpq[is->subpq_windex];
1875
1876        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1877            this packet, if any */
1878         pts = 0;
1879         if (pkt->pts != AV_NOPTS_VALUE)
1880             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1881
1882         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1883                                  &got_subtitle, pkt);
1884
1885         if (got_subtitle && sp->sub.format == 0) {
1886             sp->pts = pts;
1887
1888             for (i = 0; i < sp->sub.num_rects; i++)
1889             {
1890                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1891                 {
1892                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1893                     y = RGB_TO_Y_CCIR(r, g, b);
1894                     u = RGB_TO_U_CCIR(r, g, b, 0);
1895                     v = RGB_TO_V_CCIR(r, g, b, 0);
1896                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1897                 }
1898             }
1899
1900             /* now we can update the picture count */
1901             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1902                 is->subpq_windex = 0;
1903             SDL_LockMutex(is->subpq_mutex);
1904             is->subpq_size++;
1905             SDL_UnlockMutex(is->subpq_mutex);
1906         }
1907         av_free_packet(pkt);
1908     }
1909     return 0;
1910 }
1911
1912 /* copy samples for viewing in editor window */
1913 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1914 {
1915     int size, len;
1916
1917     size = samples_size / sizeof(short);
1918     while (size > 0) {
1919         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1920         if (len > size)
1921             len = size;
1922         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1923         samples += len;
1924         is->sample_array_index += len;
1925         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1926             is->sample_array_index = 0;
1927         size -= len;
1928     }
1929 }
1930
1931 /* return the new audio buffer size (samples can be added or deleted
1932    to get better sync if video or external master clock) */
1933 static int synchronize_audio(VideoState *is, short *samples,
1934                              int samples_size1, double pts)
1935 {
1936     int n, samples_size;
1937     double ref_clock;
1938
1939     n = 2 * is->audio_st->codec->channels;
1940     samples_size = samples_size1;
1941
1942     /* if not master, then we try to remove or add samples to correct the clock */
1943     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1944          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1945         double diff, avg_diff;
1946         int wanted_size, min_size, max_size, nb_samples;
1947
1948         ref_clock = get_master_clock(is);
1949         diff = get_audio_clock(is) - ref_clock;
1950
1951         if (diff < AV_NOSYNC_THRESHOLD) {
1952             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1953             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1954                 /* not enough measures to have a correct estimate */
1955                 is->audio_diff_avg_count++;
1956             } else {
1957                 /* estimate the A-V difference */
1958                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1959
1960                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1961                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1962                     nb_samples = samples_size / n;
1963
1964                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1965                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1966                     if (wanted_size < min_size)
1967                         wanted_size = min_size;
1968                     else if (wanted_size > max_size)
1969                         wanted_size = max_size;
1970
1971                     /* add or remove samples to correction the synchro */
1972                     if (wanted_size < samples_size) {
1973                         /* remove samples */
1974                         samples_size = wanted_size;
1975                     } else if (wanted_size > samples_size) {
1976                         uint8_t *samples_end, *q;
1977                         int nb;
1978
1979                         /* add samples */
1980                         nb = (samples_size - wanted_size);
1981                         samples_end = (uint8_t *)samples + samples_size - n;
1982                         q = samples_end + n;
1983                         while (nb > 0) {
1984                             memcpy(q, samples_end, n);
1985                             q += n;
1986                             nb -= n;
1987                         }
1988                         samples_size = wanted_size;
1989                     }
1990                 }
1991                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1992                         diff, avg_diff, samples_size - samples_size1,
1993                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1994             }
1995         } else {
1996             /* too big difference : may be initial PTS errors, so
1997                reset A-V filter */
1998             is->audio_diff_avg_count = 0;
1999             is->audio_diff_cum = 0;
2000         }
2001     }
2002
2003     return samples_size;
2004 }
2005
2006 /* decode one audio frame and returns its uncompressed size */
2007 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2008 {
2009     AVPacket *pkt_temp = &is->audio_pkt_temp;
2010     AVPacket *pkt = &is->audio_pkt;
2011     AVCodecContext *dec= is->audio_st->codec;
2012     int n, len1, data_size;
2013     double pts;
2014     int new_packet = 0;
2015     int flush_complete = 0;
2016
2017     for(;;) {
2018         /* NOTE: the audio packet can contain several frames */
2019         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2020             if (flush_complete)
2021                 break;
2022             new_packet = 0;
2023             data_size = sizeof(is->audio_buf1);
2024             len1 = avcodec_decode_audio3(dec,
2025                                         (int16_t *)is->audio_buf1, &data_size,
2026                                         pkt_temp);
2027             if (len1 < 0) {
2028                 /* if error, we skip the frame */
2029                 pkt_temp->size = 0;
2030                 break;
2031             }
2032
2033             pkt_temp->data += len1;
2034             pkt_temp->size -= len1;
2035
2036             if (data_size <= 0) {
2037                 /* stop sending empty packets if the decoder is finished */
2038                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2039                     flush_complete = 1;
2040                 continue;
2041             }
2042
2043             if (dec->sample_fmt != is->audio_src_fmt) {
2044                 if (is->reformat_ctx)
2045                     av_audio_convert_free(is->reformat_ctx);
2046                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2047                                                          dec->sample_fmt, 1, NULL, 0);
2048                 if (!is->reformat_ctx) {
2049                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2050                         av_get_sample_fmt_name(dec->sample_fmt),
2051                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2052                         break;
2053                 }
2054                 is->audio_src_fmt= dec->sample_fmt;
2055             }
2056
2057             if (is->reformat_ctx) {
2058                 const void *ibuf[6]= {is->audio_buf1};
2059                 void *obuf[6]= {is->audio_buf2};
2060                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2061                 int ostride[6]= {2};
2062                 int len= data_size/istride[0];
2063                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2064                     printf("av_audio_convert() failed\n");
2065                     break;
2066                 }
2067                 is->audio_buf= is->audio_buf2;
2068                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2069                           remove this legacy cruft */
2070                 data_size= len*2;
2071             }else{
2072                 is->audio_buf= is->audio_buf1;
2073             }
2074
2075             /* if no pts, then compute it */
2076             pts = is->audio_clock;
2077             *pts_ptr = pts;
2078             n = 2 * dec->channels;
2079             is->audio_clock += (double)data_size /
2080                 (double)(n * dec->sample_rate);
2081 #ifdef DEBUG
2082             {
2083                 static double last_clock;
2084                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2085                        is->audio_clock - last_clock,
2086                        is->audio_clock, pts);
2087                 last_clock = is->audio_clock;
2088             }
2089 #endif
2090             return data_size;
2091         }
2092
2093         /* free the current packet */
2094         if (pkt->data)
2095             av_free_packet(pkt);
2096
2097         if (is->paused || is->audioq.abort_request) {
2098             return -1;
2099         }
2100
2101         /* read next packet */
2102         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2103             return -1;
2104
2105         if (pkt->data == flush_pkt.data)
2106             avcodec_flush_buffers(dec);
2107
2108         pkt_temp->data = pkt->data;
2109         pkt_temp->size = pkt->size;
2110
2111         /* if update the audio clock with the pts */
2112         if (pkt->pts != AV_NOPTS_VALUE) {
2113             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2114         }
2115     }
2116 }
2117
2118 /* prepare a new audio buffer */
2119 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2120 {
2121     VideoState *is = opaque;
2122     int audio_size, len1;
2123     double pts;
2124
2125     audio_callback_time = av_gettime();
2126
2127     while (len > 0) {
2128         if (is->audio_buf_index >= is->audio_buf_size) {
2129            audio_size = audio_decode_frame(is, &pts);
2130            if (audio_size < 0) {
2131                 /* if error, just output silence */
2132                is->audio_buf = is->audio_buf1;
2133                is->audio_buf_size = 1024;
2134                memset(is->audio_buf, 0, is->audio_buf_size);
2135            } else {
2136                if (is->show_audio)
2137                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2138                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2139                                               pts);
2140                is->audio_buf_size = audio_size;
2141            }
2142            is->audio_buf_index = 0;
2143         }
2144         len1 = is->audio_buf_size - is->audio_buf_index;
2145         if (len1 > len)
2146             len1 = len;
2147         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2148         len -= len1;
2149         stream += len1;
2150         is->audio_buf_index += len1;
2151     }
2152 }
2153
2154 /* open a given stream. Return 0 if OK */
2155 static int stream_component_open(VideoState *is, int stream_index)
2156 {
2157     AVFormatContext *ic = is->ic;
2158     AVCodecContext *avctx;
2159     AVCodec *codec;
2160     SDL_AudioSpec wanted_spec, spec;
2161     AVDictionary *opts;
2162     AVDictionaryEntry *t = NULL;
2163
2164     if (stream_index < 0 || stream_index >= ic->nb_streams)
2165         return -1;
2166     avctx = ic->streams[stream_index]->codec;
2167
2168     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2169
2170     /* prepare audio output */
2171     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2172         if (avctx->channels > 0) {
2173             avctx->request_channels = FFMIN(2, avctx->channels);
2174         } else {
2175             avctx->request_channels = 2;
2176         }
2177     }
2178
2179     codec = avcodec_find_decoder(avctx->codec_id);
2180     avctx->debug_mv = debug_mv;
2181     avctx->debug = debug;
2182     avctx->workaround_bugs = workaround_bugs;
2183     avctx->lowres = lowres;
2184     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2185     avctx->idct_algo= idct;
2186     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2187     avctx->skip_frame= skip_frame;
2188     avctx->skip_idct= skip_idct;
2189     avctx->skip_loop_filter= skip_loop_filter;
2190     avctx->error_recognition= error_recognition;
2191     avctx->error_concealment= error_concealment;
2192     avctx->thread_count= thread_count;
2193
2194     if (!codec ||
2195         avcodec_open2(avctx, codec, &opts) < 0)
2196         return -1;
2197     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2198         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2199         return AVERROR_OPTION_NOT_FOUND;
2200     }
2201
2202     /* prepare audio output */
2203     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2204         wanted_spec.freq = avctx->sample_rate;
2205         wanted_spec.format = AUDIO_S16SYS;
2206         wanted_spec.channels = avctx->channels;
2207         wanted_spec.silence = 0;
2208         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2209         wanted_spec.callback = sdl_audio_callback;
2210         wanted_spec.userdata = is;
2211         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2212             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2213             return -1;
2214         }
2215         is->audio_hw_buf_size = spec.size;
2216         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2217     }
2218
2219     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2220     switch(avctx->codec_type) {
2221     case AVMEDIA_TYPE_AUDIO:
2222         is->audio_stream = stream_index;
2223         is->audio_st = ic->streams[stream_index];
2224         is->audio_buf_size = 0;
2225         is->audio_buf_index = 0;
2226
2227         /* init averaging filter */
2228         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2229         is->audio_diff_avg_count = 0;
2230         /* since we do not have a precise anough audio fifo fullness,
2231            we correct audio sync only if larger than this threshold */
2232         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2233
2234         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2235         packet_queue_init(&is->audioq);
2236         SDL_PauseAudio(0);
2237         break;
2238     case AVMEDIA_TYPE_VIDEO:
2239         is->video_stream = stream_index;
2240         is->video_st = ic->streams[stream_index];
2241
2242         packet_queue_init(&is->videoq);
2243         is->video_tid = SDL_CreateThread(video_thread, is);
2244         break;
2245     case AVMEDIA_TYPE_SUBTITLE:
2246         is->subtitle_stream = stream_index;
2247         is->subtitle_st = ic->streams[stream_index];
2248         packet_queue_init(&is->subtitleq);
2249
2250         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2251         break;
2252     default:
2253         break;
2254     }
2255     return 0;
2256 }
2257
2258 static void stream_component_close(VideoState *is, int stream_index)
2259 {
2260     AVFormatContext *ic = is->ic;
2261     AVCodecContext *avctx;
2262
2263     if (stream_index < 0 || stream_index >= ic->nb_streams)
2264         return;
2265     avctx = ic->streams[stream_index]->codec;
2266
2267     switch(avctx->codec_type) {
2268     case AVMEDIA_TYPE_AUDIO:
2269         packet_queue_abort(&is->audioq);
2270
2271         SDL_CloseAudio();
2272
2273         packet_queue_end(&is->audioq);
2274         av_free_packet(&is->audio_pkt);
2275         if (is->reformat_ctx)
2276             av_audio_convert_free(is->reformat_ctx);
2277         is->reformat_ctx = NULL;
2278
2279         if (is->rdft) {
2280             av_rdft_end(is->rdft);
2281             av_freep(&is->rdft_data);
2282             is->rdft = NULL;
2283             is->rdft_bits = 0;
2284         }
2285         break;
2286     case AVMEDIA_TYPE_VIDEO:
2287         packet_queue_abort(&is->videoq);
2288
2289         /* note: we also signal this mutex to make sure we deblock the
2290            video thread in all cases */
2291         SDL_LockMutex(is->pictq_mutex);
2292         SDL_CondSignal(is->pictq_cond);
2293         SDL_UnlockMutex(is->pictq_mutex);
2294
2295         SDL_WaitThread(is->video_tid, NULL);
2296
2297         packet_queue_end(&is->videoq);
2298         break;
2299     case AVMEDIA_TYPE_SUBTITLE:
2300         packet_queue_abort(&is->subtitleq);
2301
2302         /* note: we also signal this mutex to make sure we deblock the
2303            video thread in all cases */
2304         SDL_LockMutex(is->subpq_mutex);
2305         is->subtitle_stream_changed = 1;
2306
2307         SDL_CondSignal(is->subpq_cond);
2308         SDL_UnlockMutex(is->subpq_mutex);
2309
2310         SDL_WaitThread(is->subtitle_tid, NULL);
2311
2312         packet_queue_end(&is->subtitleq);
2313         break;
2314     default:
2315         break;
2316     }
2317
2318     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2319     avcodec_close(avctx);
2320     switch(avctx->codec_type) {
2321     case AVMEDIA_TYPE_AUDIO:
2322         is->audio_st = NULL;
2323         is->audio_stream = -1;
2324         break;
2325     case AVMEDIA_TYPE_VIDEO:
2326         is->video_st = NULL;
2327         is->video_stream = -1;
2328         break;
2329     case AVMEDIA_TYPE_SUBTITLE:
2330         is->subtitle_st = NULL;
2331         is->subtitle_stream = -1;
2332         break;
2333     default:
2334         break;
2335     }
2336 }
2337
2338 /* since we have only one decoding thread, we can use a global
2339    variable instead of a thread local variable */
2340 static VideoState *global_video_state;
2341
2342 static int decode_interrupt_cb(void *ctx)
2343 {
2344     return (global_video_state && global_video_state->abort_request);
2345 }
2346
2347 /* this thread gets the stream from the disk or the network */
2348 static int decode_thread(void *arg)
2349 {
2350     VideoState *is = arg;
2351     AVFormatContext *ic = NULL;
2352     int err, i, ret;
2353     int st_index[AVMEDIA_TYPE_NB];
2354     AVPacket pkt1, *pkt = &pkt1;
2355     int eof=0;
2356     int pkt_in_play_range = 0;
2357     AVDictionaryEntry *t;
2358     AVDictionary **opts;
2359     int orig_nb_streams;
2360
2361     memset(st_index, -1, sizeof(st_index));
2362     is->video_stream = -1;
2363     is->audio_stream = -1;
2364     is->subtitle_stream = -1;
2365
2366     global_video_state = is;
2367
2368     ic = avformat_alloc_context();
2369     ic->interrupt_callback.callback = decode_interrupt_cb;
2370     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2371     if (err < 0) {
2372         print_error(is->filename, err);
2373         ret = -1;
2374         goto fail;
2375     }
2376     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2377         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2378         ret = AVERROR_OPTION_NOT_FOUND;
2379         goto fail;
2380     }
2381     is->ic = ic;
2382
2383     if(genpts)
2384         ic->flags |= AVFMT_FLAG_GENPTS;
2385
2386     opts = setup_find_stream_info_opts(ic, codec_opts);
2387     orig_nb_streams = ic->nb_streams;
2388
2389     err = avformat_find_stream_info(ic, opts);
2390     if (err < 0) {
2391         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2392         ret = -1;
2393         goto fail;
2394     }
2395     for (i = 0; i < orig_nb_streams; i++)
2396         av_dict_free(&opts[i]);
2397     av_freep(&opts);
2398
2399     if(ic->pb)
2400         ic->pb->eof_reached= 0; //FIXME hack, avplay maybe should not use url_feof() to test for the end
2401
2402     if(seek_by_bytes<0)
2403         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2404
2405     /* if seeking requested, we execute it */
2406     if (start_time != AV_NOPTS_VALUE) {
2407         int64_t timestamp;
2408
2409         timestamp = start_time;
2410         /* add the stream start time */
2411         if (ic->start_time != AV_NOPTS_VALUE)
2412             timestamp += ic->start_time;
2413         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2414         if (ret < 0) {
2415             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2416                     is->filename, (double)timestamp / AV_TIME_BASE);
2417         }
2418     }
2419
2420     for (i = 0; i < ic->nb_streams; i++)
2421         ic->streams[i]->discard = AVDISCARD_ALL;
2422     if (!video_disable)
2423         st_index[AVMEDIA_TYPE_VIDEO] =
2424             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2425                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2426     if (!audio_disable)
2427         st_index[AVMEDIA_TYPE_AUDIO] =
2428             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2429                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2430                                 st_index[AVMEDIA_TYPE_VIDEO],
2431                                 NULL, 0);
2432     if (!video_disable)
2433         st_index[AVMEDIA_TYPE_SUBTITLE] =
2434             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2435                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2436                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2437                                  st_index[AVMEDIA_TYPE_AUDIO] :
2438                                  st_index[AVMEDIA_TYPE_VIDEO]),
2439                                 NULL, 0);
2440     if (show_status) {
2441         av_dump_format(ic, 0, is->filename, 0);
2442     }
2443
2444     /* open the streams */
2445     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2446         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2447     }
2448
2449     ret=-1;
2450     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2451         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2452     }
2453     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2454     if(ret<0) {
2455         if (!display_disable)
2456             is->show_audio = 2;
2457     }
2458
2459     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2460         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2461     }
2462
2463     if (is->video_stream < 0 && is->audio_stream < 0) {
2464         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2465         ret = -1;
2466         goto fail;
2467     }
2468
2469     for(;;) {
2470         if (is->abort_request)
2471             break;
2472         if (is->paused != is->last_paused) {
2473             is->last_paused = is->paused;
2474             if (is->paused)
2475                 is->read_pause_return= av_read_pause(ic);
2476             else
2477                 av_read_play(ic);
2478         }
2479 #if CONFIG_RTSP_DEMUXER
2480         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2481             /* wait 10 ms to avoid trying to get another packet */
2482             /* XXX: horrible */
2483             SDL_Delay(10);
2484             continue;
2485         }
2486 #endif
2487         if (is->seek_req) {
2488             int64_t seek_target= is->seek_pos;
2489             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2490             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2491 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2492 //      of the seek_pos/seek_rel variables
2493
2494             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2495             if (ret < 0) {
2496                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2497             }else{
2498                 if (is->audio_stream >= 0) {
2499                     packet_queue_flush(&is->audioq);
2500                     packet_queue_put(&is->audioq, &flush_pkt);
2501                 }
2502                 if (is->subtitle_stream >= 0) {
2503                     packet_queue_flush(&is->subtitleq);
2504                     packet_queue_put(&is->subtitleq, &flush_pkt);
2505                 }
2506                 if (is->video_stream >= 0) {
2507                     packet_queue_flush(&is->videoq);
2508                     packet_queue_put(&is->videoq, &flush_pkt);
2509                 }
2510             }
2511             is->seek_req = 0;
2512             eof= 0;
2513         }
2514
2515         /* if the queue are full, no need to read more */
2516         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2517             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2518                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2519                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2520             /* wait 10 ms */
2521             SDL_Delay(10);
2522             continue;
2523         }
2524         if(eof) {
2525             if(is->video_stream >= 0){
2526                 av_init_packet(pkt);
2527                 pkt->data=NULL;
2528                 pkt->size=0;
2529                 pkt->stream_index= is->video_stream;
2530                 packet_queue_put(&is->videoq, pkt);
2531             }
2532             if (is->audio_stream >= 0 &&
2533                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2534                 av_init_packet(pkt);
2535                 pkt->data = NULL;
2536                 pkt->size = 0;
2537                 pkt->stream_index = is->audio_stream;
2538                 packet_queue_put(&is->audioq, pkt);
2539             }
2540             SDL_Delay(10);
2541             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2542                 if(loop!=1 && (!loop || --loop)){
2543                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2544                 }else if(autoexit){
2545                     ret=AVERROR_EOF;
2546                     goto fail;
2547                 }
2548             }
2549             continue;
2550         }
2551         ret = av_read_frame(ic, pkt);
2552         if (ret < 0) {
2553             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2554                 eof=1;
2555             if (ic->pb && ic->pb->error)
2556                 break;
2557             SDL_Delay(100); /* wait for user event */
2558             continue;
2559         }
2560         /* check if packet is in play range specified by user, then queue, otherwise discard */
2561         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2562                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2563                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2564                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2565                 <= ((double)duration/1000000);
2566         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2567             packet_queue_put(&is->audioq, pkt);
2568         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2569             packet_queue_put(&is->videoq, pkt);
2570         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2571             packet_queue_put(&is->subtitleq, pkt);
2572         } else {
2573             av_free_packet(pkt);
2574         }
2575     }
2576     /* wait until the end */
2577     while (!is->abort_request) {
2578         SDL_Delay(100);
2579     }
2580
2581     ret = 0;
2582  fail:
2583     /* disable interrupting */
2584     global_video_state = NULL;
2585
2586     /* close each stream */
2587     if (is->audio_stream >= 0)
2588         stream_component_close(is, is->audio_stream);
2589     if (is->video_stream >= 0)
2590         stream_component_close(is, is->video_stream);
2591     if (is->subtitle_stream >= 0)
2592         stream_component_close(is, is->subtitle_stream);
2593     if (is->ic) {
2594         av_close_input_file(is->ic);
2595         is->ic = NULL; /* safety */
2596     }
2597     avio_set_interrupt_cb(NULL);
2598
2599     if (ret != 0) {
2600         SDL_Event event;
2601
2602         event.type = FF_QUIT_EVENT;
2603         event.user.data1 = is;
2604         SDL_PushEvent(&event);
2605     }
2606     return 0;
2607 }
2608
2609 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2610 {
2611     VideoState *is;
2612
2613     is = av_mallocz(sizeof(VideoState));
2614     if (!is)
2615         return NULL;
2616     av_strlcpy(is->filename, filename, sizeof(is->filename));
2617     is->iformat = iformat;
2618     is->ytop = 0;
2619     is->xleft = 0;
2620
2621     /* start video display */
2622     is->pictq_mutex = SDL_CreateMutex();
2623     is->pictq_cond = SDL_CreateCond();
2624
2625     is->subpq_mutex = SDL_CreateMutex();
2626     is->subpq_cond = SDL_CreateCond();
2627
2628     is->av_sync_type = av_sync_type;
2629     is->parse_tid = SDL_CreateThread(decode_thread, is);
2630     if (!is->parse_tid) {
2631         av_free(is);
2632         return NULL;
2633     }
2634     return is;
2635 }
2636
2637 static void stream_cycle_channel(VideoState *is, int codec_type)
2638 {
2639     AVFormatContext *ic = is->ic;
2640     int start_index, stream_index;
2641     AVStream *st;
2642
2643     if (codec_type == AVMEDIA_TYPE_VIDEO)
2644         start_index = is->video_stream;
2645     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2646         start_index = is->audio_stream;
2647     else
2648         start_index = is->subtitle_stream;
2649     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2650         return;
2651     stream_index = start_index;
2652     for(;;) {
2653         if (++stream_index >= is->ic->nb_streams)
2654         {
2655             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2656             {
2657                 stream_index = -1;
2658                 goto the_end;
2659             } else
2660                 stream_index = 0;
2661         }
2662         if (stream_index == start_index)
2663             return;
2664         st = ic->streams[stream_index];
2665         if (st->codec->codec_type == codec_type) {
2666             /* check that parameters are OK */
2667             switch(codec_type) {
2668             case AVMEDIA_TYPE_AUDIO:
2669                 if (st->codec->sample_rate != 0 &&
2670                     st->codec->channels != 0)
2671                     goto the_end;
2672                 break;
2673             case AVMEDIA_TYPE_VIDEO:
2674             case AVMEDIA_TYPE_SUBTITLE:
2675                 goto the_end;
2676             default:
2677                 break;
2678             }
2679         }
2680     }
2681  the_end:
2682     stream_component_close(is, start_index);
2683     stream_component_open(is, stream_index);
2684 }
2685
2686
2687 static void toggle_full_screen(void)
2688 {
2689     is_full_screen = !is_full_screen;
2690 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2691     /* OSX needs to empty the picture_queue */
2692     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2693         cur_stream->pictq[i].reallocate = 1;
2694     }
2695 #endif
2696     video_open(cur_stream);
2697 }
2698
2699 static void toggle_pause(void)
2700 {
2701     if (cur_stream)
2702         stream_pause(cur_stream);
2703     step = 0;
2704 }
2705
2706 static void step_to_next_frame(void)
2707 {
2708     if (cur_stream) {
2709         /* if the stream is paused unpause it, then step */
2710         if (cur_stream->paused)
2711             stream_pause(cur_stream);
2712     }
2713     step = 1;
2714 }
2715
2716 static void toggle_audio_display(void)
2717 {
2718     if (cur_stream) {
2719         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2720         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2721         fill_rectangle(screen,
2722                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2723                     bgcolor);
2724         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2725     }
2726 }
2727
2728 /* handle an event sent by the GUI */
2729 static void event_loop(void)
2730 {
2731     SDL_Event event;
2732     double incr, pos, frac;
2733
2734     for(;;) {
2735         double x;
2736         SDL_WaitEvent(&event);
2737         switch(event.type) {
2738         case SDL_KEYDOWN:
2739             if (exit_on_keydown) {
2740                 do_exit();
2741                 break;
2742             }
2743             switch(event.key.keysym.sym) {
2744             case SDLK_ESCAPE:
2745             case SDLK_q:
2746                 do_exit();
2747                 break;
2748             case SDLK_f:
2749                 toggle_full_screen();
2750                 break;
2751             case SDLK_p:
2752             case SDLK_SPACE:
2753                 toggle_pause();
2754                 break;
2755             case SDLK_s: //S: Step to next frame
2756                 step_to_next_frame();
2757                 break;
2758             case SDLK_a:
2759                 if (cur_stream)
2760                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2761                 break;
2762             case SDLK_v:
2763                 if (cur_stream)
2764                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2765                 break;
2766             case SDLK_t:
2767                 if (cur_stream)
2768                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2769                 break;
2770             case SDLK_w:
2771                 toggle_audio_display();
2772                 break;
2773             case SDLK_LEFT:
2774                 incr = -10.0;
2775                 goto do_seek;
2776             case SDLK_RIGHT:
2777                 incr = 10.0;
2778                 goto do_seek;
2779             case SDLK_UP:
2780                 incr = 60.0;
2781                 goto do_seek;
2782             case SDLK_DOWN:
2783                 incr = -60.0;
2784             do_seek:
2785                 if (cur_stream) {
2786                     if (seek_by_bytes) {
2787                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2788                             pos= cur_stream->video_current_pos;
2789                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2790                             pos= cur_stream->audio_pkt.pos;
2791                         }else
2792                             pos = avio_tell(cur_stream->ic->pb);
2793                         if (cur_stream->ic->bit_rate)
2794                             incr *= cur_stream->ic->bit_rate / 8.0;
2795                         else
2796                             incr *= 180000.0;
2797                         pos += incr;
2798                         stream_seek(cur_stream, pos, incr, 1);
2799                     } else {
2800                         pos = get_master_clock(cur_stream);
2801                         pos += incr;
2802                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2803                     }
2804                 }
2805                 break;
2806             default:
2807                 break;
2808             }
2809             break;
2810         case SDL_MOUSEBUTTONDOWN:
2811             if (exit_on_mousedown) {
2812                 do_exit();
2813                 break;
2814             }
2815         case SDL_MOUSEMOTION:
2816             if(event.type ==SDL_MOUSEBUTTONDOWN){
2817                 x= event.button.x;
2818             }else{
2819                 if(event.motion.state != SDL_PRESSED)
2820                     break;
2821                 x= event.motion.x;
2822             }
2823             if (cur_stream) {
2824                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2825                     uint64_t size=  avio_size(cur_stream->ic->pb);
2826                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2827                 }else{
2828                     int64_t ts;
2829                     int ns, hh, mm, ss;
2830                     int tns, thh, tmm, tss;
2831                     tns = cur_stream->ic->duration/1000000LL;
2832                     thh = tns/3600;
2833                     tmm = (tns%3600)/60;
2834                     tss = (tns%60);
2835                     frac = x/cur_stream->width;
2836                     ns = frac*tns;
2837                     hh = ns/3600;
2838                     mm = (ns%3600)/60;
2839                     ss = (ns%60);
2840                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2841                             hh, mm, ss, thh, tmm, tss);
2842                     ts = frac*cur_stream->ic->duration;
2843                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2844                         ts += cur_stream->ic->start_time;
2845                     stream_seek(cur_stream, ts, 0, 0);
2846                 }
2847             }
2848             break;
2849         case SDL_VIDEORESIZE:
2850             if (cur_stream) {
2851                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2852                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2853                 screen_width = cur_stream->width = event.resize.w;
2854                 screen_height= cur_stream->height= event.resize.h;
2855             }
2856             break;
2857         case SDL_QUIT:
2858         case FF_QUIT_EVENT:
2859             do_exit();
2860             break;
2861         case FF_ALLOC_EVENT:
2862             video_open(event.user.data1);
2863             alloc_picture(event.user.data1);
2864             break;
2865         case FF_REFRESH_EVENT:
2866             video_refresh_timer(event.user.data1);
2867             cur_stream->refresh=0;
2868             break;
2869         default:
2870             break;
2871         }
2872     }
2873 }
2874
2875 static int opt_frame_size(const char *opt, const char *arg)
2876 {
2877     av_log(NULL, AV_LOG_ERROR,
2878            "Option '%s' has been removed, use private format options instead\n", opt);
2879     return AVERROR(EINVAL);
2880 }
2881
2882 static int opt_width(const char *opt, const char *arg)
2883 {
2884     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2885     return 0;
2886 }
2887
2888 static int opt_height(const char *opt, const char *arg)
2889 {
2890     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2891     return 0;
2892 }
2893
2894 static int opt_format(const char *opt, const char *arg)
2895 {
2896     file_iformat = av_find_input_format(arg);
2897     if (!file_iformat) {
2898         fprintf(stderr, "Unknown input format: %s\n", arg);
2899         return AVERROR(EINVAL);
2900     }
2901     return 0;
2902 }
2903
2904 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2905 {
2906     av_log(NULL, AV_LOG_ERROR,
2907            "Option '%s' has been removed, use private format options instead\n", opt);
2908     return AVERROR(EINVAL);
2909 }
2910
2911 static int opt_sync(const char *opt, const char *arg)
2912 {
2913     if (!strcmp(arg, "audio"))
2914         av_sync_type = AV_SYNC_AUDIO_MASTER;
2915     else if (!strcmp(arg, "video"))
2916         av_sync_type = AV_SYNC_VIDEO_MASTER;
2917     else if (!strcmp(arg, "ext"))
2918         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2919     else {
2920         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2921         exit(1);
2922     }
2923     return 0;
2924 }
2925
2926 static int opt_seek(const char *opt, const char *arg)
2927 {
2928     start_time = parse_time_or_die(opt, arg, 1);
2929     return 0;
2930 }
2931
2932 static int opt_duration(const char *opt, const char *arg)
2933 {
2934     duration = parse_time_or_die(opt, arg, 1);
2935     return 0;
2936 }
2937
2938 static int opt_debug(const char *opt, const char *arg)
2939 {
2940     av_log_set_level(99);
2941     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2942     return 0;
2943 }
2944
2945 static int opt_vismv(const char *opt, const char *arg)
2946 {
2947     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2948     return 0;
2949 }
2950
2951 static int opt_thread_count(const char *opt, const char *arg)
2952 {
2953     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2954 #if !HAVE_THREADS
2955     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2956 #endif
2957     return 0;
2958 }
2959
2960 static const OptionDef options[] = {
2961 #include "cmdutils_common_opts.h"
2962     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2963     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2964     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2965     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2966     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2967     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2968     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2969     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2970     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2971     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2972     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2973     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2974     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2975     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2976     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2977     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2978     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2979     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2980     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2981     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2982     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2983     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2984     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2985     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2986     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2987     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2988     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2989     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2990     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2991     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2992     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2993     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2994     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2995     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2996     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2997     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2998     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2999 #if CONFIG_AVFILTER
3000     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3001 #endif
3002     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3003     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3004     { "i", 0, {NULL}, "avconv compatibility dummy option", ""},
3005     { NULL, },
3006 };
3007
3008 static void show_usage(void)
3009 {
3010     printf("Simple media player\n");
3011     printf("usage: %s [options] input_file\n", program_name);
3012     printf("\n");
3013 }
3014
3015 static void show_help(void)
3016 {
3017     av_log_set_callback(log_callback_help);
3018     show_usage();
3019     show_help_options(options, "Main options:\n",
3020                       OPT_EXPERT, 0);
3021     show_help_options(options, "\nAdvanced options:\n",
3022                       OPT_EXPERT, OPT_EXPERT);
3023     printf("\n");
3024     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3025     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3026 #if !CONFIG_AVFILTER
3027     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3028 #endif
3029     printf("\nWhile playing:\n"
3030            "q, ESC              quit\n"
3031            "f                   toggle full screen\n"
3032            "p, SPC              pause\n"
3033            "a                   cycle audio channel\n"
3034            "v                   cycle video channel\n"
3035            "t                   cycle subtitle channel\n"
3036            "w                   show audio waves\n"
3037            "s                   activate frame-step mode\n"
3038            "left/right          seek backward/forward 10 seconds\n"
3039            "down/up             seek backward/forward 1 minute\n"
3040            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3041            );
3042 }
3043
3044 static void opt_input_file(void *optctx, const char *filename)
3045 {
3046     if (input_filename) {
3047         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3048                 filename, input_filename);
3049         exit(1);
3050     }
3051     if (!strcmp(filename, "-"))
3052         filename = "pipe:";
3053     input_filename = filename;
3054 }
3055
3056 /* Called from the main */
3057 int main(int argc, char **argv)
3058 {
3059     int flags;
3060
3061     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3062     parse_loglevel(argc, argv, options);
3063
3064     /* register all codecs, demux and protocols */
3065     avcodec_register_all();
3066 #if CONFIG_AVDEVICE
3067     avdevice_register_all();
3068 #endif
3069 #if CONFIG_AVFILTER
3070     avfilter_register_all();
3071 #endif
3072     av_register_all();
3073     avformat_network_init();
3074
3075     init_opts();
3076
3077     show_banner();
3078
3079     parse_options(NULL, argc, argv, options, opt_input_file);
3080
3081     if (!input_filename) {
3082         show_usage();
3083         fprintf(stderr, "An input file must be specified\n");
3084         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3085         exit(1);
3086     }
3087
3088     if (display_disable) {
3089         video_disable = 1;
3090     }
3091     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3092 #if !defined(__MINGW32__) && !defined(__APPLE__)
3093     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3094 #endif
3095     if (SDL_Init (flags)) {
3096         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3097         exit(1);
3098     }
3099
3100     if (!display_disable) {
3101 #if HAVE_SDL_VIDEO_SIZE
3102         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3103         fs_screen_width = vi->current_w;
3104         fs_screen_height = vi->current_h;
3105 #endif
3106     }
3107
3108     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3109     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3110     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3111
3112     av_init_packet(&flush_pkt);
3113     flush_pkt.data= "FLUSH";
3114
3115     cur_stream = stream_open(input_filename, file_iformat);
3116
3117     event_loop();
3118
3119     /* never returns */
3120
3121     return 0;
3122 }