]> git.sesse.net Git - ffmpeg/blob - ffplay.c
aviobuf: Replace av_realloc by av_realloc_f when relevant.
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/buffersink.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int workaround_bugs = 1;
245 static int fast = 0;
246 static int genpts = 0;
247 static int lowres = 0;
248 static int idct = FF_IDCT_AUTO;
249 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
252 static int error_recognition = FF_ER_CAREFUL;
253 static int error_concealment = 3;
254 static int decoder_reorder_pts= -1;
255 static int autoexit;
256 static int exit_on_keydown;
257 static int exit_on_mousedown;
258 static int loop=1;
259 static int framedrop=-1;
260 static enum ShowMode show_mode = SHOW_MODE_NONE;
261 static const char *audio_codec_name;
262 static const char *subtitle_codec_name;
263 static const char *video_codec_name;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 void exit_program(int ret)
283 {
284     exit(ret);
285 }
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
288 {
289     AVPacketList *pkt1;
290
291     /* duplicate the packet */
292     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
293         return -1;
294
295     pkt1 = av_malloc(sizeof(AVPacketList));
296     if (!pkt1)
297         return -1;
298     pkt1->pkt = *pkt;
299     pkt1->next = NULL;
300
301
302     SDL_LockMutex(q->mutex);
303
304     if (!q->last_pkt)
305
306         q->first_pkt = pkt1;
307     else
308         q->last_pkt->next = pkt1;
309     q->last_pkt = pkt1;
310     q->nb_packets++;
311     q->size += pkt1->pkt.size + sizeof(*pkt1);
312     /* XXX: should duplicate packet data in DV case */
313     SDL_CondSignal(q->cond);
314
315     SDL_UnlockMutex(q->mutex);
316     return 0;
317 }
318
319 /* packet queue handling */
320 static void packet_queue_init(PacketQueue *q)
321 {
322     memset(q, 0, sizeof(PacketQueue));
323     q->mutex = SDL_CreateMutex();
324     q->cond = SDL_CreateCond();
325     packet_queue_put(q, &flush_pkt);
326 }
327
328 static void packet_queue_flush(PacketQueue *q)
329 {
330     AVPacketList *pkt, *pkt1;
331
332     SDL_LockMutex(q->mutex);
333     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
334         pkt1 = pkt->next;
335         av_free_packet(&pkt->pkt);
336         av_freep(&pkt);
337     }
338     q->last_pkt = NULL;
339     q->first_pkt = NULL;
340     q->nb_packets = 0;
341     q->size = 0;
342     SDL_UnlockMutex(q->mutex);
343 }
344
345 static void packet_queue_end(PacketQueue *q)
346 {
347     packet_queue_flush(q);
348     SDL_DestroyMutex(q->mutex);
349     SDL_DestroyCond(q->cond);
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #define ALPHA_BLEND(a, oldp, newp, s)\
411 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
412
413 #define RGBA_IN(r, g, b, a, s)\
414 {\
415     unsigned int v = ((const uint32_t *)(s))[0];\
416     a = (v >> 24) & 0xff;\
417     r = (v >> 16) & 0xff;\
418     g = (v >> 8) & 0xff;\
419     b = v & 0xff;\
420 }
421
422 #define YUVA_IN(y, u, v, a, s, pal)\
423 {\
424     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
425     a = (val >> 24) & 0xff;\
426     y = (val >> 16) & 0xff;\
427     u = (val >> 8) & 0xff;\
428     v = val & 0xff;\
429 }
430
431 #define YUVA_OUT(d, y, u, v, a)\
432 {\
433     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 }
435
436
437 #define BPP 1
438
439 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
440 {
441     int wrap, wrap3, width2, skip2;
442     int y, u, v, a, u1, v1, a1, w, h;
443     uint8_t *lum, *cb, *cr;
444     const uint8_t *p;
445     const uint32_t *pal;
446     int dstx, dsty, dstw, dsth;
447
448     dstw = av_clip(rect->w, 0, imgw);
449     dsth = av_clip(rect->h, 0, imgh);
450     dstx = av_clip(rect->x, 0, imgw - dstw);
451     dsty = av_clip(rect->y, 0, imgh - dsth);
452     lum = dst->data[0] + dsty * dst->linesize[0];
453     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
454     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
455
456     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
457     skip2 = dstx >> 1;
458     wrap = dst->linesize[0];
459     wrap3 = rect->pict.linesize[0];
460     p = rect->pict.data[0];
461     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
462
463     if (dsty & 1) {
464         lum += dstx;
465         cb += skip2;
466         cr += skip2;
467
468         if (dstx & 1) {
469             YUVA_IN(y, u, v, a, p, pal);
470             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
472             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
473             cb++;
474             cr++;
475             lum++;
476             p += BPP;
477         }
478         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
479             YUVA_IN(y, u, v, a, p, pal);
480             u1 = u;
481             v1 = v;
482             a1 = a;
483             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484
485             YUVA_IN(y, u, v, a, p + BPP, pal);
486             u1 += u;
487             v1 += v;
488             a1 += a;
489             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
490             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
491             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
492             cb++;
493             cr++;
494             p += 2 * BPP;
495             lum += 2;
496         }
497         if (w) {
498             YUVA_IN(y, u, v, a, p, pal);
499             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
501             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
502             p++;
503             lum++;
504         }
505         p += wrap3 - dstw * BPP;
506         lum += wrap - dstw - dstx;
507         cb += dst->linesize[1] - width2 - skip2;
508         cr += dst->linesize[2] - width2 - skip2;
509     }
510     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
511         lum += dstx;
512         cb += skip2;
513         cr += skip2;
514
515         if (dstx & 1) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             p += wrap3;
522             lum += wrap;
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += -wrap3 + BPP;
533             lum += -wrap + 1;
534         }
535         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
536             YUVA_IN(y, u, v, a, p, pal);
537             u1 = u;
538             v1 = v;
539             a1 = a;
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
542             YUVA_IN(y, u, v, a, p + BPP, pal);
543             u1 += u;
544             v1 += v;
545             a1 += a;
546             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
547             p += wrap3;
548             lum += wrap;
549
550             YUVA_IN(y, u, v, a, p, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555
556             YUVA_IN(y, u, v, a, p + BPP, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
561
562             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
563             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
564
565             cb++;
566             cr++;
567             p += -wrap3 + 2 * BPP;
568             lum += -wrap + 2;
569         }
570         if (w) {
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 = u;
573             v1 = v;
574             a1 = a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             p += wrap3;
577             lum += wrap;
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 += u;
580             v1 += v;
581             a1 += a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
584             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
585             cb++;
586             cr++;
587             p += -wrap3 + BPP;
588             lum += -wrap + 1;
589         }
590         p += wrap3 + (wrap3 - dstw * BPP);
591         lum += wrap + (wrap - dstw - dstx);
592         cb += dst->linesize[1] - width2 - skip2;
593         cr += dst->linesize[2] - width2 - skip2;
594     }
595     /* handle odd height */
596     if (h) {
597         lum += dstx;
598         cb += skip2;
599         cr += skip2;
600
601         if (dstx & 1) {
602             YUVA_IN(y, u, v, a, p, pal);
603             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
605             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
606             cb++;
607             cr++;
608             lum++;
609             p += BPP;
610         }
611         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
618             YUVA_IN(y, u, v, a, p + BPP, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
625             cb++;
626             cr++;
627             p += 2 * BPP;
628             lum += 2;
629         }
630         if (w) {
631             YUVA_IN(y, u, v, a, p, pal);
632             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
633             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
634             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
635         }
636     }
637 }
638
639 static void free_subpicture(SubPicture *sp)
640 {
641     avsubtitle_free(&sp->sub);
642 }
643
644 static void video_image_display(VideoState *is)
645 {
646     VideoPicture *vp;
647     SubPicture *sp;
648     AVPicture pict;
649     float aspect_ratio;
650     int width, height, x, y;
651     SDL_Rect rect;
652     int i;
653
654     vp = &is->pictq[is->pictq_rindex];
655     if (vp->bmp) {
656 #if CONFIG_AVFILTER
657          if (vp->picref->video->sample_aspect_ratio.num == 0)
658              aspect_ratio = 0;
659          else
660              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
661 #else
662
663         /* XXX: use variable in the frame */
664         if (is->video_st->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666         else if (is->video_st->codec->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668         else
669             aspect_ratio = 0;
670 #endif
671         if (aspect_ratio <= 0.0)
672             aspect_ratio = 1.0;
673         aspect_ratio *= (float)vp->width / (float)vp->height;
674
675         if (is->subtitle_st) {
676             if (is->subpq_size > 0) {
677                 sp = &is->subpq[is->subpq_rindex];
678
679                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = FFMAX(width,  1);
713         rect.h = FFMAX(height, 1);
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 static inline int compute_mod(int a, int b)
719 {
720     return a < 0 ? a%b + b : a%b;
721 }
722
723 static void video_audio_display(VideoState *s)
724 {
725     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
726     int ch, channels, h, h2, bgcolor, fgcolor;
727     int16_t time_diff;
728     int rdft_bits, nb_freq;
729
730     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
731         ;
732     nb_freq= 1<<(rdft_bits-1);
733
734     /* compute display index : center on currently output samples */
735     channels = s->audio_st->codec->channels;
736     nb_display_channels = channels;
737     if (!s->paused) {
738         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
739         n = 2 * channels;
740         delay = s->audio_write_buf_size;
741         delay /= n;
742
743         /* to be more precise, we take into account the time spent since
744            the last buffer computation */
745         if (audio_callback_time) {
746             time_diff = av_gettime() - audio_callback_time;
747             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
748         }
749
750         delay += 2*data_used;
751         if (delay < data_used)
752             delay = data_used;
753
754         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
755         if (s->show_mode == SHOW_MODE_WAVES) {
756             h= INT_MIN;
757             for(i=0; i<1000; i+=channels){
758                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
759                 int a= s->sample_array[idx];
760                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
761                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
762                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
763                 int score= a-d;
764                 if(h<score && (b^c)<0){
765                     h= score;
766                     i_start= idx;
767                 }
768             }
769         }
770
771         s->last_i_start = i_start;
772     } else {
773         i_start = s->last_i_start;
774     }
775
776     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
777     if (s->show_mode == SHOW_MODE_WAVES) {
778         fill_rectangle(screen,
779                        s->xleft, s->ytop, s->width, s->height,
780                        bgcolor);
781
782         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
783
784         /* total height for one channel */
785         h = s->height / nb_display_channels;
786         /* graph height / 2 */
787         h2 = (h * 9) / 20;
788         for(ch = 0;ch < nb_display_channels; ch++) {
789             i = i_start + ch;
790             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
791             for(x = 0; x < s->width; x++) {
792                 y = (s->sample_array[i] * h2) >> 15;
793                 if (y < 0) {
794                     y = -y;
795                     ys = y1 - y;
796                 } else {
797                     ys = y1;
798                 }
799                 fill_rectangle(screen,
800                                s->xleft + x, ys, 1, y,
801                                fgcolor);
802                 i += channels;
803                 if (i >= SAMPLE_ARRAY_SIZE)
804                     i -= SAMPLE_ARRAY_SIZE;
805             }
806         }
807
808         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
809
810         for(ch = 1;ch < nb_display_channels; ch++) {
811             y = s->ytop + ch * h;
812             fill_rectangle(screen,
813                            s->xleft, y, s->width, 1,
814                            fgcolor);
815         }
816         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
817     }else{
818         nb_display_channels= FFMIN(nb_display_channels, 2);
819         if(rdft_bits != s->rdft_bits){
820             av_rdft_end(s->rdft);
821             av_free(s->rdft_data);
822             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
823             s->rdft_bits= rdft_bits;
824             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
825         }
826         {
827             FFTSample *data[2];
828             for(ch = 0;ch < nb_display_channels; ch++) {
829                 data[ch] = s->rdft_data + 2*nb_freq*ch;
830                 i = i_start + ch;
831                 for(x = 0; x < 2*nb_freq; x++) {
832                     double w= (x-nb_freq)*(1.0/nb_freq);
833                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
834                     i += channels;
835                     if (i >= SAMPLE_ARRAY_SIZE)
836                         i -= SAMPLE_ARRAY_SIZE;
837                 }
838                 av_rdft_calc(s->rdft, data[ch]);
839             }
840             //least efficient way to do this, we should of course directly access it but its more than fast enough
841             for(y=0; y<s->height; y++){
842                 double w= 1/sqrt(nb_freq);
843                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
844                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
845                        + data[1][2*y+1]*data[1][2*y+1])) : a;
846                 a= FFMIN(a,255);
847                 b= FFMIN(b,255);
848                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
849
850                 fill_rectangle(screen,
851                             s->xpos, s->height-y, 1, 1,
852                             fgcolor);
853             }
854         }
855         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
856         s->xpos++;
857         if(s->xpos >= s->width)
858             s->xpos= s->xleft;
859     }
860 }
861
862 static void stream_close(VideoState *is)
863 {
864     VideoPicture *vp;
865     int i;
866     /* XXX: use a special url_shutdown call to abort parse cleanly */
867     is->abort_request = 1;
868     SDL_WaitThread(is->read_tid, NULL);
869     SDL_WaitThread(is->refresh_tid, NULL);
870
871     /* free all pictures */
872     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
873         vp = &is->pictq[i];
874 #if CONFIG_AVFILTER
875         if (vp->picref) {
876             avfilter_unref_buffer(vp->picref);
877             vp->picref = NULL;
878         }
879 #endif
880         if (vp->bmp) {
881             SDL_FreeYUVOverlay(vp->bmp);
882             vp->bmp = NULL;
883         }
884     }
885     SDL_DestroyMutex(is->pictq_mutex);
886     SDL_DestroyCond(is->pictq_cond);
887     SDL_DestroyMutex(is->subpq_mutex);
888     SDL_DestroyCond(is->subpq_cond);
889 #if !CONFIG_AVFILTER
890     if (is->img_convert_ctx)
891         sws_freeContext(is->img_convert_ctx);
892 #endif
893     av_free(is);
894 }
895
896 static void do_exit(VideoState *is)
897 {
898     if (is) {
899         stream_close(is);
900     }
901     av_lockmgr_register(NULL);
902     uninit_opts();
903 #if CONFIG_AVFILTER
904     avfilter_uninit();
905 #endif
906     if (show_status)
907         printf("\n");
908     SDL_Quit();
909     av_log(NULL, AV_LOG_QUIET, "%s", "");
910     exit(0);
911 }
912
913 static int video_open(VideoState *is){
914     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
915     int w,h;
916
917     if(is_full_screen) flags |= SDL_FULLSCREEN;
918     else               flags |= SDL_RESIZABLE;
919
920     if (is_full_screen && fs_screen_width) {
921         w = fs_screen_width;
922         h = fs_screen_height;
923     } else if(!is_full_screen && screen_width){
924         w = screen_width;
925         h = screen_height;
926 #if CONFIG_AVFILTER
927     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
928         w = is->out_video_filter->inputs[0]->w;
929         h = is->out_video_filter->inputs[0]->h;
930 #else
931     }else if (is->video_st && is->video_st->codec->width){
932         w = is->video_st->codec->width;
933         h = is->video_st->codec->height;
934 #endif
935     } else {
936         w = 640;
937         h = 480;
938     }
939     if(screen && is->width == screen->w && screen->w == w
940        && is->height== screen->h && screen->h == h)
941         return 0;
942
943 #ifndef __APPLE__
944     screen = SDL_SetVideoMode(w, h, 0, flags);
945 #else
946     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
947     screen = SDL_SetVideoMode(w, h, 24, flags);
948 #endif
949     if (!screen) {
950         fprintf(stderr, "SDL: could not set video mode - exiting\n");
951         do_exit(is);
952     }
953     if (!window_title)
954         window_title = input_filename;
955     SDL_WM_SetCaption(window_title, window_title);
956
957     is->width = screen->w;
958     is->height = screen->h;
959
960     return 0;
961 }
962
963 /* display the current picture, if any */
964 static void video_display(VideoState *is)
965 {
966     if(!screen)
967         video_open(is);
968     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
969         video_audio_display(is);
970     else if (is->video_st)
971         video_image_display(is);
972 }
973
974 static int refresh_thread(void *opaque)
975 {
976     VideoState *is= opaque;
977     while(!is->abort_request){
978         SDL_Event event;
979         event.type = FF_REFRESH_EVENT;
980         event.user.data1 = opaque;
981         if(!is->refresh){
982             is->refresh=1;
983             SDL_PushEvent(&event);
984         }
985         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
986         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
987     }
988     return 0;
989 }
990
991 /* get the current audio clock value */
992 static double get_audio_clock(VideoState *is)
993 {
994     if (is->paused) {
995         return is->audio_current_pts;
996     } else {
997         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
998     }
999 }
1000
1001 /* get the current video clock value */
1002 static double get_video_clock(VideoState *is)
1003 {
1004     if (is->paused) {
1005         return is->video_current_pts;
1006     } else {
1007         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1008     }
1009 }
1010
1011 /* get the current external clock value */
1012 static double get_external_clock(VideoState *is)
1013 {
1014     int64_t ti;
1015     ti = av_gettime();
1016     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1017 }
1018
1019 /* get the current master clock value */
1020 static double get_master_clock(VideoState *is)
1021 {
1022     double val;
1023
1024     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1025         if (is->video_st)
1026             val = get_video_clock(is);
1027         else
1028             val = get_audio_clock(is);
1029     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1030         if (is->audio_st)
1031             val = get_audio_clock(is);
1032         else
1033             val = get_video_clock(is);
1034     } else {
1035         val = get_external_clock(is);
1036     }
1037     return val;
1038 }
1039
1040 /* seek in the stream */
1041 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1042 {
1043     if (!is->seek_req) {
1044         is->seek_pos = pos;
1045         is->seek_rel = rel;
1046         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1047         if (seek_by_bytes)
1048             is->seek_flags |= AVSEEK_FLAG_BYTE;
1049         is->seek_req = 1;
1050     }
1051 }
1052
1053 /* pause or resume the video */
1054 static void stream_toggle_pause(VideoState *is)
1055 {
1056     if (is->paused) {
1057         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1058         if(is->read_pause_return != AVERROR(ENOSYS)){
1059             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1060         }
1061         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1062     }
1063     is->paused = !is->paused;
1064 }
1065
1066 static double compute_target_time(double frame_current_pts, VideoState *is)
1067 {
1068     double delay, sync_threshold, diff;
1069
1070     /* compute nominal delay */
1071     delay = frame_current_pts - is->frame_last_pts;
1072     if (delay <= 0 || delay >= 10.0) {
1073         /* if incorrect delay, use previous one */
1074         delay = is->frame_last_delay;
1075     } else {
1076         is->frame_last_delay = delay;
1077     }
1078     is->frame_last_pts = frame_current_pts;
1079
1080     /* update delay to follow master synchronisation source */
1081     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1082          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1083         /* if video is slave, we try to correct big delays by
1084            duplicating or deleting a frame */
1085         diff = get_video_clock(is) - get_master_clock(is);
1086
1087         /* skip or repeat frame. We take into account the
1088            delay to compute the threshold. I still don't know
1089            if it is the best guess */
1090         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1091         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1092             if (diff <= -sync_threshold)
1093                 delay = 0;
1094             else if (diff >= sync_threshold)
1095                 delay = 2 * delay;
1096         }
1097     }
1098     is->frame_timer += delay;
1099
1100     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1101             delay, frame_current_pts, -diff);
1102
1103     return is->frame_timer;
1104 }
1105
1106 /* called to display each frame */
1107 static void video_refresh(void *opaque)
1108 {
1109     VideoState *is = opaque;
1110     VideoPicture *vp;
1111
1112     SubPicture *sp, *sp2;
1113
1114     if (is->video_st) {
1115 retry:
1116         if (is->pictq_size == 0) {
1117             //nothing to do, no picture to display in the que
1118         } else {
1119             double time= av_gettime()/1000000.0;
1120             double next_target;
1121             /* dequeue the picture */
1122             vp = &is->pictq[is->pictq_rindex];
1123
1124             if(time < vp->target_clock)
1125                 return;
1126             /* update current video pts */
1127             is->video_current_pts = vp->pts;
1128             is->video_current_pts_drift = is->video_current_pts - time;
1129             is->video_current_pos = vp->pos;
1130             if(is->pictq_size > 1){
1131                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1132                 assert(nextvp->target_clock >= vp->target_clock);
1133                 next_target= nextvp->target_clock;
1134             }else{
1135                 next_target= vp->target_clock + vp->duration;
1136             }
1137             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1138                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1139                 if(is->pictq_size > 1){
1140                     /* update queue size and signal for next picture */
1141                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1142                         is->pictq_rindex = 0;
1143
1144                     SDL_LockMutex(is->pictq_mutex);
1145                     is->pictq_size--;
1146                     SDL_CondSignal(is->pictq_cond);
1147                     SDL_UnlockMutex(is->pictq_mutex);
1148                     goto retry;
1149                 }
1150             }
1151
1152             if(is->subtitle_st) {
1153                 if (is->subtitle_stream_changed) {
1154                     SDL_LockMutex(is->subpq_mutex);
1155
1156                     while (is->subpq_size) {
1157                         free_subpicture(&is->subpq[is->subpq_rindex]);
1158
1159                         /* update queue size and signal for next picture */
1160                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1161                             is->subpq_rindex = 0;
1162
1163                         is->subpq_size--;
1164                     }
1165                     is->subtitle_stream_changed = 0;
1166
1167                     SDL_CondSignal(is->subpq_cond);
1168                     SDL_UnlockMutex(is->subpq_mutex);
1169                 } else {
1170                     if (is->subpq_size > 0) {
1171                         sp = &is->subpq[is->subpq_rindex];
1172
1173                         if (is->subpq_size > 1)
1174                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1175                         else
1176                             sp2 = NULL;
1177
1178                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1179                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1180                         {
1181                             free_subpicture(sp);
1182
1183                             /* update queue size and signal for next picture */
1184                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1185                                 is->subpq_rindex = 0;
1186
1187                             SDL_LockMutex(is->subpq_mutex);
1188                             is->subpq_size--;
1189                             SDL_CondSignal(is->subpq_cond);
1190                             SDL_UnlockMutex(is->subpq_mutex);
1191                         }
1192                     }
1193                 }
1194             }
1195
1196             /* display picture */
1197             if (!display_disable)
1198                 video_display(is);
1199
1200             /* update queue size and signal for next picture */
1201             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1202                 is->pictq_rindex = 0;
1203
1204             SDL_LockMutex(is->pictq_mutex);
1205             is->pictq_size--;
1206             SDL_CondSignal(is->pictq_cond);
1207             SDL_UnlockMutex(is->pictq_mutex);
1208         }
1209     } else if (is->audio_st) {
1210         /* draw the next audio frame */
1211
1212         /* if only audio stream, then display the audio bars (better
1213            than nothing, just to test the implementation */
1214
1215         /* display picture */
1216         if (!display_disable)
1217             video_display(is);
1218     }
1219     if (show_status) {
1220         static int64_t last_time;
1221         int64_t cur_time;
1222         int aqsize, vqsize, sqsize;
1223         double av_diff;
1224
1225         cur_time = av_gettime();
1226         if (!last_time || (cur_time - last_time) >= 30000) {
1227             aqsize = 0;
1228             vqsize = 0;
1229             sqsize = 0;
1230             if (is->audio_st)
1231                 aqsize = is->audioq.size;
1232             if (is->video_st)
1233                 vqsize = is->videoq.size;
1234             if (is->subtitle_st)
1235                 sqsize = is->subtitleq.size;
1236             av_diff = 0;
1237             if (is->audio_st && is->video_st)
1238                 av_diff = get_audio_clock(is) - get_video_clock(is);
1239             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1240                    get_master_clock(is),
1241                    av_diff,
1242                    FFMAX(is->skip_frames-1, 0),
1243                    aqsize / 1024,
1244                    vqsize / 1024,
1245                    sqsize,
1246                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1247                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1248             fflush(stdout);
1249             last_time = cur_time;
1250         }
1251     }
1252 }
1253
1254 /* allocate a picture (needs to do that in main thread to avoid
1255    potential locking problems */
1256 static void alloc_picture(void *opaque)
1257 {
1258     VideoState *is = opaque;
1259     VideoPicture *vp;
1260
1261     vp = &is->pictq[is->pictq_windex];
1262
1263     if (vp->bmp)
1264         SDL_FreeYUVOverlay(vp->bmp);
1265
1266 #if CONFIG_AVFILTER
1267     if (vp->picref)
1268         avfilter_unref_buffer(vp->picref);
1269     vp->picref = NULL;
1270
1271     vp->width   = is->out_video_filter->inputs[0]->w;
1272     vp->height  = is->out_video_filter->inputs[0]->h;
1273     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1274 #else
1275     vp->width   = is->video_st->codec->width;
1276     vp->height  = is->video_st->codec->height;
1277     vp->pix_fmt = is->video_st->codec->pix_fmt;
1278 #endif
1279
1280     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1281                                    SDL_YV12_OVERLAY,
1282                                    screen);
1283     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1284         /* SDL allocates a buffer smaller than requested if the video
1285          * overlay hardware is unable to support the requested size. */
1286         fprintf(stderr, "Error: the video system does not support an image\n"
1287                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1288                         "to reduce the image size.\n", vp->width, vp->height );
1289         do_exit(is);
1290     }
1291
1292     SDL_LockMutex(is->pictq_mutex);
1293     vp->allocated = 1;
1294     SDL_CondSignal(is->pictq_cond);
1295     SDL_UnlockMutex(is->pictq_mutex);
1296 }
1297
1298 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1299 {
1300     VideoPicture *vp;
1301     double frame_delay, pts = pts1;
1302
1303     /* compute the exact PTS for the picture if it is omitted in the stream
1304      * pts1 is the dts of the pkt / pts of the frame */
1305     if (pts != 0) {
1306         /* update video clock with pts, if present */
1307         is->video_clock = pts;
1308     } else {
1309         pts = is->video_clock;
1310     }
1311     /* update video clock for next frame */
1312     frame_delay = av_q2d(is->video_st->codec->time_base);
1313     /* for MPEG2, the frame can be repeated, so we update the
1314        clock accordingly */
1315     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1316     is->video_clock += frame_delay;
1317
1318 #if defined(DEBUG_SYNC) && 0
1319     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1320            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1321 #endif
1322
1323     /* wait until we have space to put a new picture */
1324     SDL_LockMutex(is->pictq_mutex);
1325
1326     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1327         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1328
1329     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1330            !is->videoq.abort_request) {
1331         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1332     }
1333     SDL_UnlockMutex(is->pictq_mutex);
1334
1335     if (is->videoq.abort_request)
1336         return -1;
1337
1338     vp = &is->pictq[is->pictq_windex];
1339
1340     vp->duration = frame_delay;
1341
1342     /* alloc or resize hardware picture buffer */
1343     if (!vp->bmp ||
1344 #if CONFIG_AVFILTER
1345         vp->width  != is->out_video_filter->inputs[0]->w ||
1346         vp->height != is->out_video_filter->inputs[0]->h) {
1347 #else
1348         vp->width != is->video_st->codec->width ||
1349         vp->height != is->video_st->codec->height) {
1350 #endif
1351         SDL_Event event;
1352
1353         vp->allocated = 0;
1354
1355         /* the allocation must be done in the main thread to avoid
1356            locking problems */
1357         event.type = FF_ALLOC_EVENT;
1358         event.user.data1 = is;
1359         SDL_PushEvent(&event);
1360
1361         /* wait until the picture is allocated */
1362         SDL_LockMutex(is->pictq_mutex);
1363         while (!vp->allocated && !is->videoq.abort_request) {
1364             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1365         }
1366         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1367         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1368             while (!vp->allocated) {
1369                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370             }
1371         }
1372         SDL_UnlockMutex(is->pictq_mutex);
1373
1374         if (is->videoq.abort_request)
1375             return -1;
1376     }
1377
1378     /* if the frame is not skipped, then display it */
1379     if (vp->bmp) {
1380         AVPicture pict;
1381 #if CONFIG_AVFILTER
1382         if(vp->picref)
1383             avfilter_unref_buffer(vp->picref);
1384         vp->picref = src_frame->opaque;
1385 #endif
1386
1387         /* get a pointer on the bitmap */
1388         SDL_LockYUVOverlay (vp->bmp);
1389
1390         memset(&pict,0,sizeof(AVPicture));
1391         pict.data[0] = vp->bmp->pixels[0];
1392         pict.data[1] = vp->bmp->pixels[2];
1393         pict.data[2] = vp->bmp->pixels[1];
1394
1395         pict.linesize[0] = vp->bmp->pitches[0];
1396         pict.linesize[1] = vp->bmp->pitches[2];
1397         pict.linesize[2] = vp->bmp->pitches[1];
1398
1399 #if CONFIG_AVFILTER
1400         //FIXME use direct rendering
1401         av_picture_copy(&pict, (AVPicture *)src_frame,
1402                         vp->pix_fmt, vp->width, vp->height);
1403 #else
1404         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1405         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1406             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1407             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1408         if (is->img_convert_ctx == NULL) {
1409             fprintf(stderr, "Cannot initialize the conversion context\n");
1410             exit(1);
1411         }
1412         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1413                   0, vp->height, pict.data, pict.linesize);
1414 #endif
1415         /* update the bitmap content */
1416         SDL_UnlockYUVOverlay(vp->bmp);
1417
1418         vp->pts = pts;
1419         vp->pos = pos;
1420
1421         /* now we can update the picture count */
1422         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1423             is->pictq_windex = 0;
1424         SDL_LockMutex(is->pictq_mutex);
1425         vp->target_clock= compute_target_time(vp->pts, is);
1426
1427         is->pictq_size++;
1428         SDL_UnlockMutex(is->pictq_mutex);
1429     }
1430     return 0;
1431 }
1432
1433 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1434 {
1435     int got_picture, i;
1436
1437     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1438         return -1;
1439
1440     if (pkt->data == flush_pkt.data) {
1441         avcodec_flush_buffers(is->video_st->codec);
1442
1443         SDL_LockMutex(is->pictq_mutex);
1444         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1445         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1446             is->pictq[i].target_clock= 0;
1447         }
1448         while (is->pictq_size && !is->videoq.abort_request) {
1449             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1450         }
1451         is->video_current_pos = -1;
1452         SDL_UnlockMutex(is->pictq_mutex);
1453
1454         is->frame_last_pts = AV_NOPTS_VALUE;
1455         is->frame_last_delay = 0;
1456         is->frame_timer = (double)av_gettime() / 1000000.0;
1457         is->skip_frames = 1;
1458         is->skip_frames_index = 0;
1459         return 0;
1460     }
1461
1462     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1463
1464     if (got_picture) {
1465         if (decoder_reorder_pts == -1) {
1466             *pts = frame->best_effort_timestamp;
1467         } else if (decoder_reorder_pts) {
1468             *pts = frame->pkt_pts;
1469         } else {
1470             *pts = frame->pkt_dts;
1471         }
1472
1473         if (*pts == AV_NOPTS_VALUE) {
1474             *pts = 0;
1475         }
1476
1477         is->skip_frames_index += 1;
1478         if(is->skip_frames_index >= is->skip_frames){
1479             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1480             return 1;
1481         }
1482
1483     }
1484     return 0;
1485 }
1486
1487 #if CONFIG_AVFILTER
1488 typedef struct {
1489     VideoState *is;
1490     AVFrame *frame;
1491     int use_dr1;
1492 } FilterPriv;
1493
1494 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1495 {
1496     AVFilterContext *ctx = codec->opaque;
1497     AVFilterBufferRef  *ref;
1498     int perms = AV_PERM_WRITE;
1499     int i, w, h, stride[4];
1500     unsigned edge;
1501     int pixel_size;
1502
1503     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1504
1505     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1506         perms |= AV_PERM_NEG_LINESIZES;
1507
1508     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1511         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1512     }
1513     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1514
1515     w = codec->width;
1516     h = codec->height;
1517
1518     if(av_image_check_size(w, h, 0, codec))
1519         return -1;
1520
1521     avcodec_align_dimensions2(codec, &w, &h, stride);
1522     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1523     w += edge << 1;
1524     h += edge << 1;
1525
1526     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1527         return -1;
1528
1529     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1530     ref->video->w = codec->width;
1531     ref->video->h = codec->height;
1532     for(i = 0; i < 4; i ++) {
1533         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1534         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1535
1536         if (ref->data[i]) {
1537             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1538         }
1539         pic->data[i]     = ref->data[i];
1540         pic->linesize[i] = ref->linesize[i];
1541     }
1542     pic->opaque = ref;
1543     pic->age    = INT_MAX;
1544     pic->type   = FF_BUFFER_TYPE_USER;
1545     pic->reordered_opaque = codec->reordered_opaque;
1546     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1547     else           pic->pkt_pts = AV_NOPTS_VALUE;
1548     return 0;
1549 }
1550
1551 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1552 {
1553     memset(pic->data, 0, sizeof(pic->data));
1554     avfilter_unref_buffer(pic->opaque);
1555 }
1556
1557 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1558 {
1559     AVFilterBufferRef *ref = pic->opaque;
1560
1561     if (pic->data[0] == NULL) {
1562         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1563         return codec->get_buffer(codec, pic);
1564     }
1565
1566     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1567         (codec->pix_fmt != ref->format)) {
1568         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1569         return -1;
1570     }
1571
1572     pic->reordered_opaque = codec->reordered_opaque;
1573     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1574     else           pic->pkt_pts = AV_NOPTS_VALUE;
1575     return 0;
1576 }
1577
1578 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1579 {
1580     FilterPriv *priv = ctx->priv;
1581     AVCodecContext *codec;
1582     if(!opaque) return -1;
1583
1584     priv->is = opaque;
1585     codec    = priv->is->video_st->codec;
1586     codec->opaque = ctx;
1587     if((codec->codec->capabilities & CODEC_CAP_DR1)
1588     ) {
1589         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1590         priv->use_dr1 = 1;
1591         codec->get_buffer     = input_get_buffer;
1592         codec->release_buffer = input_release_buffer;
1593         codec->reget_buffer   = input_reget_buffer;
1594         codec->thread_safe_callbacks = 1;
1595     }
1596
1597     priv->frame = avcodec_alloc_frame();
1598
1599     return 0;
1600 }
1601
1602 static void input_uninit(AVFilterContext *ctx)
1603 {
1604     FilterPriv *priv = ctx->priv;
1605     av_free(priv->frame);
1606 }
1607
1608 static int input_request_frame(AVFilterLink *link)
1609 {
1610     FilterPriv *priv = link->src->priv;
1611     AVFilterBufferRef *picref;
1612     int64_t pts = 0;
1613     AVPacket pkt;
1614     int ret;
1615
1616     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1617         av_free_packet(&pkt);
1618     if (ret < 0)
1619         return -1;
1620
1621     if(priv->use_dr1 && priv->frame->opaque) {
1622         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1623     } else {
1624         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1625         av_image_copy(picref->data, picref->linesize,
1626                       priv->frame->data, priv->frame->linesize,
1627                       picref->format, link->w, link->h);
1628     }
1629     av_free_packet(&pkt);
1630
1631     avfilter_copy_frame_props(picref, priv->frame);
1632     picref->pts = pts;
1633
1634     avfilter_start_frame(link, picref);
1635     avfilter_draw_slice(link, 0, link->h, 1);
1636     avfilter_end_frame(link);
1637
1638     return 0;
1639 }
1640
1641 static int input_query_formats(AVFilterContext *ctx)
1642 {
1643     FilterPriv *priv = ctx->priv;
1644     enum PixelFormat pix_fmts[] = {
1645         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1646     };
1647
1648     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1649     return 0;
1650 }
1651
1652 static int input_config_props(AVFilterLink *link)
1653 {
1654     FilterPriv *priv  = link->src->priv;
1655     AVStream *s = priv->is->video_st;
1656
1657     link->w = s->codec->width;
1658     link->h = s->codec->height;
1659     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1660         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1661     link->time_base = s->time_base;
1662
1663     return 0;
1664 }
1665
1666 static AVFilter input_filter =
1667 {
1668     .name      = "ffplay_input",
1669
1670     .priv_size = sizeof(FilterPriv),
1671
1672     .init      = input_init,
1673     .uninit    = input_uninit,
1674
1675     .query_formats = input_query_formats,
1676
1677     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1678     .outputs   = (AVFilterPad[]) {{ .name = "default",
1679                                     .type = AVMEDIA_TYPE_VIDEO,
1680                                     .request_frame = input_request_frame,
1681                                     .config_props  = input_config_props, },
1682                                   { .name = NULL }},
1683 };
1684
1685 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1686 {
1687     char sws_flags_str[128];
1688     int ret;
1689     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1690     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1691     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1692     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1693     graph->scale_sws_opts = av_strdup(sws_flags_str);
1694
1695     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1696                                             NULL, is, graph)) < 0)
1697         return ret;
1698 #if FF_API_OLD_VSINK_API
1699     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1700                                        NULL, pix_fmts, graph);
1701 #else
1702     buffersink_params->pixel_fmts = pix_fmts;
1703     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1704                                        NULL, buffersink_params, graph);
1705 #endif
1706     av_freep(&buffersink_params);
1707     if (ret < 0)
1708         return ret;
1709
1710     if(vfilters) {
1711         AVFilterInOut *outputs = avfilter_inout_alloc();
1712         AVFilterInOut *inputs  = avfilter_inout_alloc();
1713
1714         outputs->name    = av_strdup("in");
1715         outputs->filter_ctx = filt_src;
1716         outputs->pad_idx = 0;
1717         outputs->next    = NULL;
1718
1719         inputs->name    = av_strdup("out");
1720         inputs->filter_ctx = filt_out;
1721         inputs->pad_idx = 0;
1722         inputs->next    = NULL;
1723
1724         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1725             return ret;
1726     } else {
1727         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1728             return ret;
1729     }
1730
1731     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1732         return ret;
1733
1734     is->out_video_filter = filt_out;
1735
1736     return ret;
1737 }
1738
1739 #endif  /* CONFIG_AVFILTER */
1740
1741 static int video_thread(void *arg)
1742 {
1743     VideoState *is = arg;
1744     AVFrame *frame= avcodec_alloc_frame();
1745     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1746     double pts;
1747     int ret;
1748
1749 #if CONFIG_AVFILTER
1750     AVFilterGraph *graph = avfilter_graph_alloc();
1751     AVFilterContext *filt_out = NULL;
1752     int last_w = is->video_st->codec->width;
1753     int last_h = is->video_st->codec->height;
1754
1755     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1756         goto the_end;
1757     filt_out = is->out_video_filter;
1758 #endif
1759
1760     for(;;) {
1761 #if !CONFIG_AVFILTER
1762         AVPacket pkt;
1763 #else
1764         AVFilterBufferRef *picref;
1765         AVRational tb = filt_out->inputs[0]->time_base;
1766 #endif
1767         while (is->paused && !is->videoq.abort_request)
1768             SDL_Delay(10);
1769 #if CONFIG_AVFILTER
1770         if (   last_w != is->video_st->codec->width
1771             || last_h != is->video_st->codec->height) {
1772             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1773                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1774             avfilter_graph_free(&graph);
1775             graph = avfilter_graph_alloc();
1776             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1777                 goto the_end;
1778             filt_out = is->out_video_filter;
1779             last_w = is->video_st->codec->width;
1780             last_h = is->video_st->codec->height;
1781         }
1782         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1783         if (picref) {
1784             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1785             pts_int = picref->pts;
1786             pos     = picref->pos;
1787             frame->opaque = picref;
1788         }
1789
1790         if (av_cmp_q(tb, is->video_st->time_base)) {
1791             av_unused int64_t pts1 = pts_int;
1792             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1793             av_dlog(NULL, "video_thread(): "
1794                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1795                     tb.num, tb.den, pts1,
1796                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1797         }
1798 #else
1799         ret = get_video_frame(is, frame, &pts_int, &pkt);
1800         pos = pkt.pos;
1801         av_free_packet(&pkt);
1802 #endif
1803
1804         if (ret < 0) goto the_end;
1805
1806 #if CONFIG_AVFILTER
1807         if (!picref)
1808             continue;
1809 #endif
1810
1811         pts = pts_int*av_q2d(is->video_st->time_base);
1812
1813         ret = queue_picture(is, frame, pts, pos);
1814
1815         if (ret < 0)
1816             goto the_end;
1817
1818         if (is->step)
1819             stream_toggle_pause(is);
1820     }
1821  the_end:
1822 #if CONFIG_AVFILTER
1823     avfilter_graph_free(&graph);
1824 #endif
1825     av_free(frame);
1826     return 0;
1827 }
1828
1829 static int subtitle_thread(void *arg)
1830 {
1831     VideoState *is = arg;
1832     SubPicture *sp;
1833     AVPacket pkt1, *pkt = &pkt1;
1834     int got_subtitle;
1835     double pts;
1836     int i, j;
1837     int r, g, b, y, u, v, a;
1838
1839     for(;;) {
1840         while (is->paused && !is->subtitleq.abort_request) {
1841             SDL_Delay(10);
1842         }
1843         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1844             break;
1845
1846         if(pkt->data == flush_pkt.data){
1847             avcodec_flush_buffers(is->subtitle_st->codec);
1848             continue;
1849         }
1850         SDL_LockMutex(is->subpq_mutex);
1851         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1852                !is->subtitleq.abort_request) {
1853             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1854         }
1855         SDL_UnlockMutex(is->subpq_mutex);
1856
1857         if (is->subtitleq.abort_request)
1858             return 0;
1859
1860         sp = &is->subpq[is->subpq_windex];
1861
1862        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1863            this packet, if any */
1864         pts = 0;
1865         if (pkt->pts != AV_NOPTS_VALUE)
1866             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1867
1868         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1869                                  &got_subtitle, pkt);
1870
1871         if (got_subtitle && sp->sub.format == 0) {
1872             sp->pts = pts;
1873
1874             for (i = 0; i < sp->sub.num_rects; i++)
1875             {
1876                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1877                 {
1878                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1879                     y = RGB_TO_Y_CCIR(r, g, b);
1880                     u = RGB_TO_U_CCIR(r, g, b, 0);
1881                     v = RGB_TO_V_CCIR(r, g, b, 0);
1882                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1883                 }
1884             }
1885
1886             /* now we can update the picture count */
1887             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1888                 is->subpq_windex = 0;
1889             SDL_LockMutex(is->subpq_mutex);
1890             is->subpq_size++;
1891             SDL_UnlockMutex(is->subpq_mutex);
1892         }
1893         av_free_packet(pkt);
1894     }
1895     return 0;
1896 }
1897
1898 /* copy samples for viewing in editor window */
1899 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1900 {
1901     int size, len;
1902
1903     size = samples_size / sizeof(short);
1904     while (size > 0) {
1905         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1906         if (len > size)
1907             len = size;
1908         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1909         samples += len;
1910         is->sample_array_index += len;
1911         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1912             is->sample_array_index = 0;
1913         size -= len;
1914     }
1915 }
1916
1917 /* return the new audio buffer size (samples can be added or deleted
1918    to get better sync if video or external master clock) */
1919 static int synchronize_audio(VideoState *is, short *samples,
1920                              int samples_size1, double pts)
1921 {
1922     int n, samples_size;
1923     double ref_clock;
1924
1925     n = 2 * is->audio_st->codec->channels;
1926     samples_size = samples_size1;
1927
1928     /* if not master, then we try to remove or add samples to correct the clock */
1929     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1930          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1931         double diff, avg_diff;
1932         int wanted_size, min_size, max_size, nb_samples;
1933
1934         ref_clock = get_master_clock(is);
1935         diff = get_audio_clock(is) - ref_clock;
1936
1937         if (diff < AV_NOSYNC_THRESHOLD) {
1938             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1939             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1940                 /* not enough measures to have a correct estimate */
1941                 is->audio_diff_avg_count++;
1942             } else {
1943                 /* estimate the A-V difference */
1944                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1945
1946                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1947                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1948                     nb_samples = samples_size / n;
1949
1950                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1951                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1952                     if (wanted_size < min_size)
1953                         wanted_size = min_size;
1954                     else if (wanted_size > max_size)
1955                         wanted_size = max_size;
1956
1957                     /* add or remove samples to correction the synchro */
1958                     if (wanted_size < samples_size) {
1959                         /* remove samples */
1960                         samples_size = wanted_size;
1961                     } else if (wanted_size > samples_size) {
1962                         uint8_t *samples_end, *q;
1963                         int nb;
1964
1965                         /* add samples */
1966                         nb = (samples_size - wanted_size);
1967                         samples_end = (uint8_t *)samples + samples_size - n;
1968                         q = samples_end + n;
1969                         while (nb > 0) {
1970                             memcpy(q, samples_end, n);
1971                             q += n;
1972                             nb -= n;
1973                         }
1974                         samples_size = wanted_size;
1975                     }
1976                 }
1977                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1978                         diff, avg_diff, samples_size - samples_size1,
1979                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1980             }
1981         } else {
1982             /* too big difference : may be initial PTS errors, so
1983                reset A-V filter */
1984             is->audio_diff_avg_count = 0;
1985             is->audio_diff_cum = 0;
1986         }
1987     }
1988
1989     return samples_size;
1990 }
1991
1992 /* decode one audio frame and returns its uncompressed size */
1993 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1994 {
1995     AVPacket *pkt_temp = &is->audio_pkt_temp;
1996     AVPacket *pkt = &is->audio_pkt;
1997     AVCodecContext *dec= is->audio_st->codec;
1998     int n, len1, data_size;
1999     double pts;
2000     int new_packet = 0;
2001     int flush_complete = 0;
2002
2003     for(;;) {
2004         /* NOTE: the audio packet can contain several frames */
2005         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2006             if (flush_complete)
2007                 break;
2008             new_packet = 0;
2009             data_size = sizeof(is->audio_buf1);
2010             len1 = avcodec_decode_audio3(dec,
2011                                         (int16_t *)is->audio_buf1, &data_size,
2012                                         pkt_temp);
2013             if (len1 < 0) {
2014                 /* if error, we skip the frame */
2015                 pkt_temp->size = 0;
2016                 break;
2017             }
2018
2019             pkt_temp->data += len1;
2020             pkt_temp->size -= len1;
2021
2022             if (data_size <= 0) {
2023                 /* stop sending empty packets if the decoder is finished */
2024                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2025                     flush_complete = 1;
2026                 continue;
2027             }
2028
2029             if (dec->sample_fmt != is->audio_src_fmt) {
2030                 if (is->reformat_ctx)
2031                     av_audio_convert_free(is->reformat_ctx);
2032                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2033                                                          dec->sample_fmt, 1, NULL, 0);
2034                 if (!is->reformat_ctx) {
2035                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2036                         av_get_sample_fmt_name(dec->sample_fmt),
2037                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2038                         break;
2039                 }
2040                 is->audio_src_fmt= dec->sample_fmt;
2041             }
2042
2043             if (is->reformat_ctx) {
2044                 const void *ibuf[6]= {is->audio_buf1};
2045                 void *obuf[6]= {is->audio_buf2};
2046                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2047                 int ostride[6]= {2};
2048                 int len= data_size/istride[0];
2049                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2050                     printf("av_audio_convert() failed\n");
2051                     break;
2052                 }
2053                 is->audio_buf= is->audio_buf2;
2054                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2055                           remove this legacy cruft */
2056                 data_size= len*2;
2057             }else{
2058                 is->audio_buf= is->audio_buf1;
2059             }
2060
2061             /* if no pts, then compute it */
2062             pts = is->audio_clock;
2063             *pts_ptr = pts;
2064             n = 2 * dec->channels;
2065             is->audio_clock += (double)data_size /
2066                 (double)(n * dec->sample_rate);
2067 #ifdef DEBUG
2068             {
2069                 static double last_clock;
2070                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2071                        is->audio_clock - last_clock,
2072                        is->audio_clock, pts);
2073                 last_clock = is->audio_clock;
2074             }
2075 #endif
2076             return data_size;
2077         }
2078
2079         /* free the current packet */
2080         if (pkt->data)
2081             av_free_packet(pkt);
2082
2083         if (is->paused || is->audioq.abort_request) {
2084             return -1;
2085         }
2086
2087         /* read next packet */
2088         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2089             return -1;
2090
2091         if (pkt->data == flush_pkt.data)
2092             avcodec_flush_buffers(dec);
2093
2094         pkt_temp->data = pkt->data;
2095         pkt_temp->size = pkt->size;
2096
2097         /* if update the audio clock with the pts */
2098         if (pkt->pts != AV_NOPTS_VALUE) {
2099             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2100         }
2101     }
2102 }
2103
2104 /* prepare a new audio buffer */
2105 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2106 {
2107     VideoState *is = opaque;
2108     int audio_size, len1;
2109     int bytes_per_sec;
2110     double pts;
2111
2112     audio_callback_time = av_gettime();
2113
2114     while (len > 0) {
2115         if (is->audio_buf_index >= is->audio_buf_size) {
2116            audio_size = audio_decode_frame(is, &pts);
2117            if (audio_size < 0) {
2118                 /* if error, just output silence */
2119                is->audio_buf = is->audio_buf1;
2120                is->audio_buf_size = 1024;
2121                memset(is->audio_buf, 0, is->audio_buf_size);
2122            } else {
2123                if (is->show_mode != SHOW_MODE_VIDEO)
2124                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2125                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2126                                               pts);
2127                is->audio_buf_size = audio_size;
2128            }
2129            is->audio_buf_index = 0;
2130         }
2131         len1 = is->audio_buf_size - is->audio_buf_index;
2132         if (len1 > len)
2133             len1 = len;
2134         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2135         len -= len1;
2136         stream += len1;
2137         is->audio_buf_index += len1;
2138     }
2139     bytes_per_sec = is->audio_st->codec->sample_rate *
2140             2 * is->audio_st->codec->channels;
2141     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2142     /* Let's assume the audio driver that is used by SDL has two periods. */
2143     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2144     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2145 }
2146
2147 /* open a given stream. Return 0 if OK */
2148 static int stream_component_open(VideoState *is, int stream_index)
2149 {
2150     AVFormatContext *ic = is->ic;
2151     AVCodecContext *avctx;
2152     AVCodec *codec;
2153     SDL_AudioSpec wanted_spec, spec;
2154     AVDictionary *opts;
2155     AVDictionaryEntry *t = NULL;
2156
2157     if (stream_index < 0 || stream_index >= ic->nb_streams)
2158         return -1;
2159     avctx = ic->streams[stream_index]->codec;
2160
2161     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2162
2163     /* prepare audio output */
2164     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2165         if (avctx->channels > 0) {
2166             avctx->request_channels = FFMIN(2, avctx->channels);
2167         } else {
2168             avctx->request_channels = 2;
2169         }
2170     }
2171
2172     codec = avcodec_find_decoder(avctx->codec_id);
2173     switch(avctx->codec_type){
2174         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2175         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2176         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2177     }
2178     if (!codec)
2179         return -1;
2180
2181     avctx->workaround_bugs = workaround_bugs;
2182     avctx->lowres = lowres;
2183     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2184     avctx->idct_algo= idct;
2185     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2186     avctx->skip_frame= skip_frame;
2187     avctx->skip_idct= skip_idct;
2188     avctx->skip_loop_filter= skip_loop_filter;
2189     avctx->error_recognition= error_recognition;
2190     avctx->error_concealment= error_concealment;
2191
2192     if(codec->capabilities & CODEC_CAP_DR1)
2193         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2194
2195     wanted_spec.freq = avctx->sample_rate;
2196     wanted_spec.channels = avctx->channels;
2197     if (!codec ||
2198         avcodec_open2(avctx, codec, &opts) < 0)
2199         return -1;
2200     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2201         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2202         return AVERROR_OPTION_NOT_FOUND;
2203     }
2204
2205     /* prepare audio output */
2206     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2207         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2208             fprintf(stderr, "Invalid sample rate or channel count\n");
2209             return -1;
2210         }
2211         wanted_spec.format = AUDIO_S16SYS;
2212         wanted_spec.silence = 0;
2213         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2214         wanted_spec.callback = sdl_audio_callback;
2215         wanted_spec.userdata = is;
2216         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2217             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2218             return -1;
2219         }
2220         is->audio_hw_buf_size = spec.size;
2221         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2222     }
2223
2224     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2225     switch(avctx->codec_type) {
2226     case AVMEDIA_TYPE_AUDIO:
2227         is->audio_stream = stream_index;
2228         is->audio_st = ic->streams[stream_index];
2229         is->audio_buf_size = 0;
2230         is->audio_buf_index = 0;
2231
2232         /* init averaging filter */
2233         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2234         is->audio_diff_avg_count = 0;
2235         /* since we do not have a precise anough audio fifo fullness,
2236            we correct audio sync only if larger than this threshold */
2237         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2238
2239         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2240         packet_queue_init(&is->audioq);
2241         SDL_PauseAudio(0);
2242         break;
2243     case AVMEDIA_TYPE_VIDEO:
2244         is->video_stream = stream_index;
2245         is->video_st = ic->streams[stream_index];
2246
2247         packet_queue_init(&is->videoq);
2248         is->video_tid = SDL_CreateThread(video_thread, is);
2249         break;
2250     case AVMEDIA_TYPE_SUBTITLE:
2251         is->subtitle_stream = stream_index;
2252         is->subtitle_st = ic->streams[stream_index];
2253         packet_queue_init(&is->subtitleq);
2254
2255         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2256         break;
2257     default:
2258         break;
2259     }
2260     return 0;
2261 }
2262
2263 static void stream_component_close(VideoState *is, int stream_index)
2264 {
2265     AVFormatContext *ic = is->ic;
2266     AVCodecContext *avctx;
2267
2268     if (stream_index < 0 || stream_index >= ic->nb_streams)
2269         return;
2270     avctx = ic->streams[stream_index]->codec;
2271
2272     switch(avctx->codec_type) {
2273     case AVMEDIA_TYPE_AUDIO:
2274         packet_queue_abort(&is->audioq);
2275
2276         SDL_CloseAudio();
2277
2278         packet_queue_end(&is->audioq);
2279         if (is->reformat_ctx)
2280             av_audio_convert_free(is->reformat_ctx);
2281         is->reformat_ctx = NULL;
2282         break;
2283     case AVMEDIA_TYPE_VIDEO:
2284         packet_queue_abort(&is->videoq);
2285
2286         /* note: we also signal this mutex to make sure we deblock the
2287            video thread in all cases */
2288         SDL_LockMutex(is->pictq_mutex);
2289         SDL_CondSignal(is->pictq_cond);
2290         SDL_UnlockMutex(is->pictq_mutex);
2291
2292         SDL_WaitThread(is->video_tid, NULL);
2293
2294         packet_queue_end(&is->videoq);
2295         break;
2296     case AVMEDIA_TYPE_SUBTITLE:
2297         packet_queue_abort(&is->subtitleq);
2298
2299         /* note: we also signal this mutex to make sure we deblock the
2300            video thread in all cases */
2301         SDL_LockMutex(is->subpq_mutex);
2302         is->subtitle_stream_changed = 1;
2303
2304         SDL_CondSignal(is->subpq_cond);
2305         SDL_UnlockMutex(is->subpq_mutex);
2306
2307         SDL_WaitThread(is->subtitle_tid, NULL);
2308
2309         packet_queue_end(&is->subtitleq);
2310         break;
2311     default:
2312         break;
2313     }
2314
2315     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2316     avcodec_close(avctx);
2317     switch(avctx->codec_type) {
2318     case AVMEDIA_TYPE_AUDIO:
2319         is->audio_st = NULL;
2320         is->audio_stream = -1;
2321         break;
2322     case AVMEDIA_TYPE_VIDEO:
2323         is->video_st = NULL;
2324         is->video_stream = -1;
2325         break;
2326     case AVMEDIA_TYPE_SUBTITLE:
2327         is->subtitle_st = NULL;
2328         is->subtitle_stream = -1;
2329         break;
2330     default:
2331         break;
2332     }
2333 }
2334
2335 /* since we have only one decoding thread, we can use a global
2336    variable instead of a thread local variable */
2337 static VideoState *global_video_state;
2338
2339 static int decode_interrupt_cb(void)
2340 {
2341     return (global_video_state && global_video_state->abort_request);
2342 }
2343
2344 /* this thread gets the stream from the disk or the network */
2345 static int read_thread(void *arg)
2346 {
2347     VideoState *is = arg;
2348     AVFormatContext *ic = NULL;
2349     int err, i, ret;
2350     int st_index[AVMEDIA_TYPE_NB];
2351     AVPacket pkt1, *pkt = &pkt1;
2352     int eof=0;
2353     int pkt_in_play_range = 0;
2354     AVDictionaryEntry *t;
2355     AVDictionary **opts;
2356     int orig_nb_streams;
2357
2358     memset(st_index, -1, sizeof(st_index));
2359     is->video_stream = -1;
2360     is->audio_stream = -1;
2361     is->subtitle_stream = -1;
2362
2363     global_video_state = is;
2364     avio_set_interrupt_cb(decode_interrupt_cb);
2365
2366     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2367     if (err < 0) {
2368         print_error(is->filename, err);
2369         ret = -1;
2370         goto fail;
2371     }
2372     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2373         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2374         ret = AVERROR_OPTION_NOT_FOUND;
2375         goto fail;
2376     }
2377     is->ic = ic;
2378
2379     if(genpts)
2380         ic->flags |= AVFMT_FLAG_GENPTS;
2381
2382     opts = setup_find_stream_info_opts(ic, codec_opts);
2383     orig_nb_streams = ic->nb_streams;
2384
2385     err = avformat_find_stream_info(ic, opts);
2386     if (err < 0) {
2387         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2388         ret = -1;
2389         goto fail;
2390     }
2391     for (i = 0; i < orig_nb_streams; i++)
2392         av_dict_free(&opts[i]);
2393     av_freep(&opts);
2394
2395     if(ic->pb)
2396         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2397
2398     if(seek_by_bytes<0)
2399         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2400
2401     /* if seeking requested, we execute it */
2402     if (start_time != AV_NOPTS_VALUE) {
2403         int64_t timestamp;
2404
2405         timestamp = start_time;
2406         /* add the stream start time */
2407         if (ic->start_time != AV_NOPTS_VALUE)
2408             timestamp += ic->start_time;
2409         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2410         if (ret < 0) {
2411             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2412                     is->filename, (double)timestamp / AV_TIME_BASE);
2413         }
2414     }
2415
2416     for (i = 0; i < ic->nb_streams; i++)
2417         ic->streams[i]->discard = AVDISCARD_ALL;
2418     if (!video_disable)
2419         st_index[AVMEDIA_TYPE_VIDEO] =
2420             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2421                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2422     if (!audio_disable)
2423         st_index[AVMEDIA_TYPE_AUDIO] =
2424             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2425                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2426                                 st_index[AVMEDIA_TYPE_VIDEO],
2427                                 NULL, 0);
2428     if (!video_disable)
2429         st_index[AVMEDIA_TYPE_SUBTITLE] =
2430             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2431                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2432                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2433                                  st_index[AVMEDIA_TYPE_AUDIO] :
2434                                  st_index[AVMEDIA_TYPE_VIDEO]),
2435                                 NULL, 0);
2436     if (show_status) {
2437         av_dump_format(ic, 0, is->filename, 0);
2438     }
2439
2440     is->show_mode = show_mode;
2441
2442     /* open the streams */
2443     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2444         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2445     }
2446
2447     ret=-1;
2448     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2449         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2450     }
2451     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2452     if (is->show_mode == SHOW_MODE_NONE)
2453         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2454
2455     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2456         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2457     }
2458
2459     if (is->video_stream < 0 && is->audio_stream < 0) {
2460         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2461         ret = -1;
2462         goto fail;
2463     }
2464
2465     for(;;) {
2466         if (is->abort_request)
2467             break;
2468         if (is->paused != is->last_paused) {
2469             is->last_paused = is->paused;
2470             if (is->paused)
2471                 is->read_pause_return= av_read_pause(ic);
2472             else
2473                 av_read_play(ic);
2474         }
2475 #if CONFIG_RTSP_DEMUXER
2476         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2477             /* wait 10 ms to avoid trying to get another packet */
2478             /* XXX: horrible */
2479             SDL_Delay(10);
2480             continue;
2481         }
2482 #endif
2483         if (is->seek_req) {
2484             int64_t seek_target= is->seek_pos;
2485             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2486             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2487 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2488 //      of the seek_pos/seek_rel variables
2489
2490             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2491             if (ret < 0) {
2492                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2493             }else{
2494                 if (is->audio_stream >= 0) {
2495                     packet_queue_flush(&is->audioq);
2496                     packet_queue_put(&is->audioq, &flush_pkt);
2497                 }
2498                 if (is->subtitle_stream >= 0) {
2499                     packet_queue_flush(&is->subtitleq);
2500                     packet_queue_put(&is->subtitleq, &flush_pkt);
2501                 }
2502                 if (is->video_stream >= 0) {
2503                     packet_queue_flush(&is->videoq);
2504                     packet_queue_put(&is->videoq, &flush_pkt);
2505                 }
2506             }
2507             is->seek_req = 0;
2508             eof= 0;
2509         }
2510
2511         /* if the queue are full, no need to read more */
2512         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2513             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2514                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2515                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2516             /* wait 10 ms */
2517             SDL_Delay(10);
2518             continue;
2519         }
2520         if(eof) {
2521             if(is->video_stream >= 0){
2522                 av_init_packet(pkt);
2523                 pkt->data=NULL;
2524                 pkt->size=0;
2525                 pkt->stream_index= is->video_stream;
2526                 packet_queue_put(&is->videoq, pkt);
2527             }
2528             if (is->audio_stream >= 0 &&
2529                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2530                 av_init_packet(pkt);
2531                 pkt->data = NULL;
2532                 pkt->size = 0;
2533                 pkt->stream_index = is->audio_stream;
2534                 packet_queue_put(&is->audioq, pkt);
2535             }
2536             SDL_Delay(10);
2537             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2538                 if(loop!=1 && (!loop || --loop)){
2539                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2540                 }else if(autoexit){
2541                     ret=AVERROR_EOF;
2542                     goto fail;
2543                 }
2544             }
2545             eof=0;
2546             continue;
2547         }
2548         ret = av_read_frame(ic, pkt);
2549         if (ret < 0) {
2550             if (ret == AVERROR_EOF || url_feof(ic->pb))
2551                 eof=1;
2552             if (ic->pb && ic->pb->error)
2553                 break;
2554             SDL_Delay(100); /* wait for user event */
2555             continue;
2556         }
2557         /* check if packet is in play range specified by user, then queue, otherwise discard */
2558         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2559                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2560                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2561                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2562                 <= ((double)duration/1000000);
2563         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2564             packet_queue_put(&is->audioq, pkt);
2565         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2566             packet_queue_put(&is->videoq, pkt);
2567         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2568             packet_queue_put(&is->subtitleq, pkt);
2569         } else {
2570             av_free_packet(pkt);
2571         }
2572     }
2573     /* wait until the end */
2574     while (!is->abort_request) {
2575         SDL_Delay(100);
2576     }
2577
2578     ret = 0;
2579  fail:
2580     /* disable interrupting */
2581     global_video_state = NULL;
2582
2583     /* close each stream */
2584     if (is->audio_stream >= 0)
2585         stream_component_close(is, is->audio_stream);
2586     if (is->video_stream >= 0)
2587         stream_component_close(is, is->video_stream);
2588     if (is->subtitle_stream >= 0)
2589         stream_component_close(is, is->subtitle_stream);
2590     if (is->ic) {
2591         av_close_input_file(is->ic);
2592         is->ic = NULL; /* safety */
2593     }
2594     avio_set_interrupt_cb(NULL);
2595
2596     if (ret != 0) {
2597         SDL_Event event;
2598
2599         event.type = FF_QUIT_EVENT;
2600         event.user.data1 = is;
2601         SDL_PushEvent(&event);
2602     }
2603     return 0;
2604 }
2605
2606 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2607 {
2608     VideoState *is;
2609
2610     is = av_mallocz(sizeof(VideoState));
2611     if (!is)
2612         return NULL;
2613     av_strlcpy(is->filename, filename, sizeof(is->filename));
2614     is->iformat = iformat;
2615     is->ytop = 0;
2616     is->xleft = 0;
2617
2618     /* start video display */
2619     is->pictq_mutex = SDL_CreateMutex();
2620     is->pictq_cond = SDL_CreateCond();
2621
2622     is->subpq_mutex = SDL_CreateMutex();
2623     is->subpq_cond = SDL_CreateCond();
2624
2625     is->av_sync_type = av_sync_type;
2626     is->read_tid = SDL_CreateThread(read_thread, is);
2627     if (!is->read_tid) {
2628         av_free(is);
2629         return NULL;
2630     }
2631     return is;
2632 }
2633
2634 static void stream_cycle_channel(VideoState *is, int codec_type)
2635 {
2636     AVFormatContext *ic = is->ic;
2637     int start_index, stream_index;
2638     AVStream *st;
2639
2640     if (codec_type == AVMEDIA_TYPE_VIDEO)
2641         start_index = is->video_stream;
2642     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2643         start_index = is->audio_stream;
2644     else
2645         start_index = is->subtitle_stream;
2646     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2647         return;
2648     stream_index = start_index;
2649     for(;;) {
2650         if (++stream_index >= is->ic->nb_streams)
2651         {
2652             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2653             {
2654                 stream_index = -1;
2655                 goto the_end;
2656             } else
2657                 stream_index = 0;
2658         }
2659         if (stream_index == start_index)
2660             return;
2661         st = ic->streams[stream_index];
2662         if (st->codec->codec_type == codec_type) {
2663             /* check that parameters are OK */
2664             switch(codec_type) {
2665             case AVMEDIA_TYPE_AUDIO:
2666                 if (st->codec->sample_rate != 0 &&
2667                     st->codec->channels != 0)
2668                     goto the_end;
2669                 break;
2670             case AVMEDIA_TYPE_VIDEO:
2671             case AVMEDIA_TYPE_SUBTITLE:
2672                 goto the_end;
2673             default:
2674                 break;
2675             }
2676         }
2677     }
2678  the_end:
2679     stream_component_close(is, start_index);
2680     stream_component_open(is, stream_index);
2681 }
2682
2683
2684 static void toggle_full_screen(VideoState *is)
2685 {
2686     is_full_screen = !is_full_screen;
2687     video_open(is);
2688 }
2689
2690 static void toggle_pause(VideoState *is)
2691 {
2692     stream_toggle_pause(is);
2693     is->step = 0;
2694 }
2695
2696 static void step_to_next_frame(VideoState *is)
2697 {
2698     /* if the stream is paused unpause it, then step */
2699     if (is->paused)
2700         stream_toggle_pause(is);
2701     is->step = 1;
2702 }
2703
2704 static void toggle_audio_display(VideoState *is)
2705 {
2706     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2707     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2708     fill_rectangle(screen,
2709                 is->xleft, is->ytop, is->width, is->height,
2710                 bgcolor);
2711     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2712 }
2713
2714 /* handle an event sent by the GUI */
2715 static void event_loop(VideoState *cur_stream)
2716 {
2717     SDL_Event event;
2718     double incr, pos, frac;
2719
2720     for(;;) {
2721         double x;
2722         SDL_WaitEvent(&event);
2723         switch(event.type) {
2724         case SDL_KEYDOWN:
2725             if (exit_on_keydown) {
2726                 do_exit(cur_stream);
2727                 break;
2728             }
2729             switch(event.key.keysym.sym) {
2730             case SDLK_ESCAPE:
2731             case SDLK_q:
2732                 do_exit(cur_stream);
2733                 break;
2734             case SDLK_f:
2735                 toggle_full_screen(cur_stream);
2736                 break;
2737             case SDLK_p:
2738             case SDLK_SPACE:
2739                 toggle_pause(cur_stream);
2740                 break;
2741             case SDLK_s: //S: Step to next frame
2742                 step_to_next_frame(cur_stream);
2743                 break;
2744             case SDLK_a:
2745                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2746                 break;
2747             case SDLK_v:
2748                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2749                 break;
2750             case SDLK_t:
2751                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2752                 break;
2753             case SDLK_w:
2754                 toggle_audio_display(cur_stream);
2755                 break;
2756             case SDLK_LEFT:
2757                 incr = -10.0;
2758                 goto do_seek;
2759             case SDLK_RIGHT:
2760                 incr = 10.0;
2761                 goto do_seek;
2762             case SDLK_UP:
2763                 incr = 60.0;
2764                 goto do_seek;
2765             case SDLK_DOWN:
2766                 incr = -60.0;
2767             do_seek:
2768                 if (seek_by_bytes) {
2769                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2770                         pos= cur_stream->video_current_pos;
2771                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2772                         pos= cur_stream->audio_pkt.pos;
2773                     }else
2774                         pos = avio_tell(cur_stream->ic->pb);
2775                     if (cur_stream->ic->bit_rate)
2776                         incr *= cur_stream->ic->bit_rate / 8.0;
2777                     else
2778                         incr *= 180000.0;
2779                     pos += incr;
2780                     stream_seek(cur_stream, pos, incr, 1);
2781                 } else {
2782                     pos = get_master_clock(cur_stream);
2783                     pos += incr;
2784                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2785                 }
2786                 break;
2787             default:
2788                 break;
2789             }
2790             break;
2791         case SDL_MOUSEBUTTONDOWN:
2792             if (exit_on_mousedown) {
2793                 do_exit(cur_stream);
2794                 break;
2795             }
2796         case SDL_MOUSEMOTION:
2797             if(event.type ==SDL_MOUSEBUTTONDOWN){
2798                 x= event.button.x;
2799             }else{
2800                 if(event.motion.state != SDL_PRESSED)
2801                     break;
2802                 x= event.motion.x;
2803             }
2804             if(seek_by_bytes || cur_stream->ic->duration<=0){
2805                 uint64_t size=  avio_size(cur_stream->ic->pb);
2806                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2807             }else{
2808                 int64_t ts;
2809                 int ns, hh, mm, ss;
2810                 int tns, thh, tmm, tss;
2811                 tns = cur_stream->ic->duration/1000000LL;
2812                 thh = tns/3600;
2813                 tmm = (tns%3600)/60;
2814                 tss = (tns%60);
2815                 frac = x/cur_stream->width;
2816                 ns = frac*tns;
2817                 hh = ns/3600;
2818                 mm = (ns%3600)/60;
2819                 ss = (ns%60);
2820                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2821                         hh, mm, ss, thh, tmm, tss);
2822                 ts = frac*cur_stream->ic->duration;
2823                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2824                     ts += cur_stream->ic->start_time;
2825                 stream_seek(cur_stream, ts, 0, 0);
2826             }
2827             break;
2828         case SDL_VIDEORESIZE:
2829             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2830                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2831             screen_width = cur_stream->width = event.resize.w;
2832             screen_height= cur_stream->height= event.resize.h;
2833             break;
2834         case SDL_QUIT:
2835         case FF_QUIT_EVENT:
2836             do_exit(cur_stream);
2837             break;
2838         case FF_ALLOC_EVENT:
2839             video_open(event.user.data1);
2840             alloc_picture(event.user.data1);
2841             break;
2842         case FF_REFRESH_EVENT:
2843             video_refresh(event.user.data1);
2844             cur_stream->refresh=0;
2845             break;
2846         default:
2847             break;
2848         }
2849     }
2850 }
2851
2852 static int opt_frame_size(const char *opt, const char *arg)
2853 {
2854     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2855     return opt_default("video_size", arg);
2856 }
2857
2858 static int opt_width(const char *opt, const char *arg)
2859 {
2860     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2861     return 0;
2862 }
2863
2864 static int opt_height(const char *opt, const char *arg)
2865 {
2866     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2867     return 0;
2868 }
2869
2870 static int opt_format(const char *opt, const char *arg)
2871 {
2872     file_iformat = av_find_input_format(arg);
2873     if (!file_iformat) {
2874         fprintf(stderr, "Unknown input format: %s\n", arg);
2875         return AVERROR(EINVAL);
2876     }
2877     return 0;
2878 }
2879
2880 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2881 {
2882     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2883     return opt_default("pixel_format", arg);
2884 }
2885
2886 static int opt_sync(const char *opt, const char *arg)
2887 {
2888     if (!strcmp(arg, "audio"))
2889         av_sync_type = AV_SYNC_AUDIO_MASTER;
2890     else if (!strcmp(arg, "video"))
2891         av_sync_type = AV_SYNC_VIDEO_MASTER;
2892     else if (!strcmp(arg, "ext"))
2893         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2894     else {
2895         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2896         exit(1);
2897     }
2898     return 0;
2899 }
2900
2901 static int opt_seek(const char *opt, const char *arg)
2902 {
2903     start_time = parse_time_or_die(opt, arg, 1);
2904     return 0;
2905 }
2906
2907 static int opt_duration(const char *opt, const char *arg)
2908 {
2909     duration = parse_time_or_die(opt, arg, 1);
2910     return 0;
2911 }
2912
2913 static int opt_show_mode(const char *opt, const char *arg)
2914 {
2915     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2916                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2917                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2918                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2919     return 0;
2920 }
2921
2922 static void opt_input_file(void *optctx, const char *filename)
2923 {
2924     if (input_filename) {
2925         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2926                 filename, input_filename);
2927         exit_program(1);
2928     }
2929     if (!strcmp(filename, "-"))
2930         filename = "pipe:";
2931     input_filename = filename;
2932 }
2933
2934 static int opt_codec(void *o, const char *opt, const char *arg)
2935 {
2936     switch(opt[strlen(opt)-1]){
2937     case 'a' :    audio_codec_name = arg; break;
2938     case 's' : subtitle_codec_name = arg; break;
2939     case 'v' :    video_codec_name = arg; break;
2940     }
2941     return 0;
2942 }
2943
2944 static int dummy;
2945
2946 static const OptionDef options[] = {
2947 #include "cmdutils_common_opts.h"
2948     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2949     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2950     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2951     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2952     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2953     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2954     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2955     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2956     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2957     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2958     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2959     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2960     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2961     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2962     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2963     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2964     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2965     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2966     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2967     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2968     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2969     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2970     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2971     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2972     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2973     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2974     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2975     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2976     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2977     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2978     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2979     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2980     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2981     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2982 #if CONFIG_AVFILTER
2983     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2984 #endif
2985     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2986     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2987     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2988     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
2989     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
2990     { NULL, },
2991 };
2992
2993 static void show_usage(void)
2994 {
2995     printf("Simple media player\n");
2996     printf("usage: %s [options] input_file\n", program_name);
2997     printf("\n");
2998 }
2999
3000 static int opt_help(const char *opt, const char *arg)
3001 {
3002     const AVClass *class;
3003     av_log_set_callback(log_callback_help);
3004     show_usage();
3005     show_help_options(options, "Main options:\n",
3006                       OPT_EXPERT, 0);
3007     show_help_options(options, "\nAdvanced options:\n",
3008                       OPT_EXPERT, OPT_EXPERT);
3009     printf("\n");
3010     class = avcodec_get_class();
3011     av_opt_show2(&class, NULL,
3012                  AV_OPT_FLAG_DECODING_PARAM, 0);
3013     printf("\n");
3014     class = avformat_get_class();
3015     av_opt_show2(&class, NULL,
3016                  AV_OPT_FLAG_DECODING_PARAM, 0);
3017 #if !CONFIG_AVFILTER
3018     printf("\n");
3019     class = sws_get_class();
3020     av_opt_show2(&class, NULL,
3021                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3022 #endif
3023     printf("\nWhile playing:\n"
3024            "q, ESC              quit\n"
3025            "f                   toggle full screen\n"
3026            "p, SPC              pause\n"
3027            "a                   cycle audio channel\n"
3028            "v                   cycle video channel\n"
3029            "t                   cycle subtitle channel\n"
3030            "w                   show audio waves\n"
3031            "s                   activate frame-step mode\n"
3032            "left/right          seek backward/forward 10 seconds\n"
3033            "down/up             seek backward/forward 1 minute\n"
3034            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3035            );
3036     return 0;
3037 }
3038
3039 static int lockmgr(void **mtx, enum AVLockOp op)
3040 {
3041    switch(op) {
3042       case AV_LOCK_CREATE:
3043           *mtx = SDL_CreateMutex();
3044           if(!*mtx)
3045               return 1;
3046           return 0;
3047       case AV_LOCK_OBTAIN:
3048           return !!SDL_LockMutex(*mtx);
3049       case AV_LOCK_RELEASE:
3050           return !!SDL_UnlockMutex(*mtx);
3051       case AV_LOCK_DESTROY:
3052           SDL_DestroyMutex(*mtx);
3053           return 0;
3054    }
3055    return 1;
3056 }
3057
3058 /* Called from the main */
3059 int main(int argc, char **argv)
3060 {
3061     int flags;
3062     VideoState *is;
3063
3064     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3065     parse_loglevel(argc, argv, options);
3066
3067     /* register all codecs, demux and protocols */
3068     avcodec_register_all();
3069 #if CONFIG_AVDEVICE
3070     avdevice_register_all();
3071 #endif
3072 #if CONFIG_AVFILTER
3073     avfilter_register_all();
3074 #endif
3075     av_register_all();
3076
3077     init_opts();
3078
3079     show_banner();
3080
3081     parse_options(NULL, argc, argv, options, opt_input_file);
3082
3083     if (!input_filename) {
3084         show_usage();
3085         fprintf(stderr, "An input file must be specified\n");
3086         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3087         exit(1);
3088     }
3089
3090     if (display_disable) {
3091         video_disable = 1;
3092     }
3093     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3094     if (audio_disable)
3095         flags &= ~SDL_INIT_AUDIO;
3096 #if !defined(__MINGW32__) && !defined(__APPLE__)
3097     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3098 #endif
3099     if (SDL_Init (flags)) {
3100         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3101         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3102         exit(1);
3103     }
3104
3105     if (!display_disable) {
3106 #if HAVE_SDL_VIDEO_SIZE
3107         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3108         fs_screen_width = vi->current_w;
3109         fs_screen_height = vi->current_h;
3110 #endif
3111     }
3112
3113     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3114     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3115     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3116
3117     if (av_lockmgr_register(lockmgr)) {
3118         fprintf(stderr, "Could not initialize lock manager!\n");
3119         do_exit(NULL);
3120     }
3121
3122     av_init_packet(&flush_pkt);
3123     flush_pkt.data= "FLUSH";
3124
3125     is = stream_open(input_filename, file_iformat);
3126     if (!is) {
3127         fprintf(stderr, "Failed to initialize VideoState!\n");
3128         do_exit(NULL);
3129     }
3130
3131     event_loop(is);
3132
3133     /* never returns */
3134
3135     return 0;
3136 }