]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: remove early frame drop functionality
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double duration;                             ///<expected duration of the frame
100     int64_t pos;                                 ///<byte position in file
101     int skip;
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation, resampling and format conversion */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     int audio_write_buf_size;
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat audio_src_fmt;
164     enum AVSampleFormat audio_tgt_fmt;
165     int audio_src_channels;
166     int audio_tgt_channels;
167     int64_t audio_src_channel_layout;
168     int64_t audio_tgt_channel_layout;
169     int audio_src_freq;
170     int audio_tgt_freq;
171     struct SwrContext *swr_ctx;
172     double audio_current_pts;
173     double audio_current_pts_drift;
174
175     enum ShowMode {
176         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
177     } show_mode;
178     int16_t sample_array[SAMPLE_ARRAY_SIZE];
179     int sample_array_index;
180     int last_i_start;
181     RDFTContext *rdft;
182     int rdft_bits;
183     FFTSample *rdft_data;
184     int xpos;
185
186     SDL_Thread *subtitle_tid;
187     int subtitle_stream;
188     int subtitle_stream_changed;
189     AVStream *subtitle_st;
190     PacketQueue subtitleq;
191     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
192     int subpq_size, subpq_rindex, subpq_windex;
193     SDL_mutex *subpq_mutex;
194     SDL_cond *subpq_cond;
195
196     double frame_timer;
197     double frame_last_pts;
198     double frame_last_duration;
199     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
200     int video_stream;
201     AVStream *video_st;
202     PacketQueue videoq;
203     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
204     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
205     int64_t video_current_pos;                   ///<current displayed file pos
206     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
207     int pictq_size, pictq_rindex, pictq_windex;
208     SDL_mutex *pictq_mutex;
209     SDL_cond *pictq_cond;
210 #if !CONFIG_AVFILTER
211     struct SwsContext *img_convert_ctx;
212 #endif
213
214     char filename[1024];
215     int width, height, xleft, ytop;
216     int step;
217
218 #if CONFIG_AVFILTER
219     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
220 #endif
221
222     int refresh;
223 } VideoState;
224
225 static int opt_help(const char *opt, const char *arg);
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int lowres = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
256 static int error_recognition = FF_ER_CAREFUL;
257 static int error_concealment = 3;
258 static int decoder_reorder_pts= -1;
259 static int autoexit;
260 static int exit_on_keydown;
261 static int exit_on_mousedown;
262 static int loop=1;
263 static int framedrop=-1;
264 static enum ShowMode show_mode = SHOW_MODE_NONE;
265 static const char *audio_codec_name;
266 static const char *subtitle_codec_name;
267 static const char *video_codec_name;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 void exit_program(int ret)
287 {
288     exit(ret);
289 }
290
291 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
292 {
293     AVPacketList *pkt1;
294
295     /* duplicate the packet */
296     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
297         return -1;
298
299     pkt1 = av_malloc(sizeof(AVPacketList));
300     if (!pkt1)
301         return -1;
302     pkt1->pkt = *pkt;
303     pkt1->next = NULL;
304
305
306     SDL_LockMutex(q->mutex);
307
308     if (!q->last_pkt)
309
310         q->first_pkt = pkt1;
311     else
312         q->last_pkt->next = pkt1;
313     q->last_pkt = pkt1;
314     q->nb_packets++;
315     q->size += pkt1->pkt.size + sizeof(*pkt1);
316     /* XXX: should duplicate packet data in DV case */
317     SDL_CondSignal(q->cond);
318
319     SDL_UnlockMutex(q->mutex);
320     return 0;
321 }
322
323 /* packet queue handling */
324 static void packet_queue_init(PacketQueue *q)
325 {
326     memset(q, 0, sizeof(PacketQueue));
327     q->mutex = SDL_CreateMutex();
328     q->cond = SDL_CreateCond();
329     packet_queue_put(q, &flush_pkt);
330 }
331
332 static void packet_queue_flush(PacketQueue *q)
333 {
334     AVPacketList *pkt, *pkt1;
335
336     SDL_LockMutex(q->mutex);
337     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
338         pkt1 = pkt->next;
339         av_free_packet(&pkt->pkt);
340         av_freep(&pkt);
341     }
342     q->last_pkt = NULL;
343     q->first_pkt = NULL;
344     q->nb_packets = 0;
345     q->size = 0;
346     SDL_UnlockMutex(q->mutex);
347 }
348
349 static void packet_queue_end(PacketQueue *q)
350 {
351     packet_queue_flush(q);
352     SDL_DestroyMutex(q->mutex);
353     SDL_DestroyCond(q->cond);
354 }
355
356 static void packet_queue_abort(PacketQueue *q)
357 {
358     SDL_LockMutex(q->mutex);
359
360     q->abort_request = 1;
361
362     SDL_CondSignal(q->cond);
363
364     SDL_UnlockMutex(q->mutex);
365 }
366
367 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
368 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
369 {
370     AVPacketList *pkt1;
371     int ret;
372
373     SDL_LockMutex(q->mutex);
374
375     for(;;) {
376         if (q->abort_request) {
377             ret = -1;
378             break;
379         }
380
381         pkt1 = q->first_pkt;
382         if (pkt1) {
383             q->first_pkt = pkt1->next;
384             if (!q->first_pkt)
385                 q->last_pkt = NULL;
386             q->nb_packets--;
387             q->size -= pkt1->pkt.size + sizeof(*pkt1);
388             *pkt = pkt1->pkt;
389             av_free(pkt1);
390             ret = 1;
391             break;
392         } else if (!block) {
393             ret = 0;
394             break;
395         } else {
396             SDL_CondWait(q->cond, q->mutex);
397         }
398     }
399     SDL_UnlockMutex(q->mutex);
400     return ret;
401 }
402
403 static inline void fill_rectangle(SDL_Surface *screen,
404                                   int x, int y, int w, int h, int color)
405 {
406     SDL_Rect rect;
407     rect.x = x;
408     rect.y = y;
409     rect.w = w;
410     rect.h = h;
411     SDL_FillRect(screen, &rect, color);
412 }
413
414 #define ALPHA_BLEND(a, oldp, newp, s)\
415 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
416
417 #define RGBA_IN(r, g, b, a, s)\
418 {\
419     unsigned int v = ((const uint32_t *)(s))[0];\
420     a = (v >> 24) & 0xff;\
421     r = (v >> 16) & 0xff;\
422     g = (v >> 8) & 0xff;\
423     b = v & 0xff;\
424 }
425
426 #define YUVA_IN(y, u, v, a, s, pal)\
427 {\
428     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
429     a = (val >> 24) & 0xff;\
430     y = (val >> 16) & 0xff;\
431     u = (val >> 8) & 0xff;\
432     v = val & 0xff;\
433 }
434
435 #define YUVA_OUT(d, y, u, v, a)\
436 {\
437     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
438 }
439
440
441 #define BPP 1
442
443 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
444 {
445     int wrap, wrap3, width2, skip2;
446     int y, u, v, a, u1, v1, a1, w, h;
447     uint8_t *lum, *cb, *cr;
448     const uint8_t *p;
449     const uint32_t *pal;
450     int dstx, dsty, dstw, dsth;
451
452     dstw = av_clip(rect->w, 0, imgw);
453     dsth = av_clip(rect->h, 0, imgh);
454     dstx = av_clip(rect->x, 0, imgw - dstw);
455     dsty = av_clip(rect->y, 0, imgh - dsth);
456     lum = dst->data[0] + dsty * dst->linesize[0];
457     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
458     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
459
460     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
461     skip2 = dstx >> 1;
462     wrap = dst->linesize[0];
463     wrap3 = rect->pict.linesize[0];
464     p = rect->pict.data[0];
465     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
466
467     if (dsty & 1) {
468         lum += dstx;
469         cb += skip2;
470         cr += skip2;
471
472         if (dstx & 1) {
473             YUVA_IN(y, u, v, a, p, pal);
474             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
476             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
477             cb++;
478             cr++;
479             lum++;
480             p += BPP;
481         }
482         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
483             YUVA_IN(y, u, v, a, p, pal);
484             u1 = u;
485             v1 = v;
486             a1 = a;
487             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
488
489             YUVA_IN(y, u, v, a, p + BPP, pal);
490             u1 += u;
491             v1 += v;
492             a1 += a;
493             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
494             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
495             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
496             cb++;
497             cr++;
498             p += 2 * BPP;
499             lum += 2;
500         }
501         if (w) {
502             YUVA_IN(y, u, v, a, p, pal);
503             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
504             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
505             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
506             p++;
507             lum++;
508         }
509         p += wrap3 - dstw * BPP;
510         lum += wrap - dstw - dstx;
511         cb += dst->linesize[1] - width2 - skip2;
512         cr += dst->linesize[2] - width2 - skip2;
513     }
514     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
515         lum += dstx;
516         cb += skip2;
517         cr += skip2;
518
519         if (dstx & 1) {
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 = u;
522             v1 = v;
523             a1 = a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             p += wrap3;
526             lum += wrap;
527             YUVA_IN(y, u, v, a, p, pal);
528             u1 += u;
529             v1 += v;
530             a1 += a;
531             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
533             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
534             cb++;
535             cr++;
536             p += -wrap3 + BPP;
537             lum += -wrap + 1;
538         }
539         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
540             YUVA_IN(y, u, v, a, p, pal);
541             u1 = u;
542             v1 = v;
543             a1 = a;
544             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545
546             YUVA_IN(y, u, v, a, p + BPP, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551             p += wrap3;
552             lum += wrap;
553
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 += u;
556             v1 += v;
557             a1 += a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559
560             YUVA_IN(y, u, v, a, p + BPP, pal);
561             u1 += u;
562             v1 += v;
563             a1 += a;
564             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
565
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
568
569             cb++;
570             cr++;
571             p += -wrap3 + 2 * BPP;
572             lum += -wrap + 2;
573         }
574         if (w) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             p += wrap3;
581             lum += wrap;
582             YUVA_IN(y, u, v, a, p, pal);
583             u1 += u;
584             v1 += v;
585             a1 += a;
586             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
588             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
589             cb++;
590             cr++;
591             p += -wrap3 + BPP;
592             lum += -wrap + 1;
593         }
594         p += wrap3 + (wrap3 - dstw * BPP);
595         lum += wrap + (wrap - dstw - dstx);
596         cb += dst->linesize[1] - width2 - skip2;
597         cr += dst->linesize[2] - width2 - skip2;
598     }
599     /* handle odd height */
600     if (h) {
601         lum += dstx;
602         cb += skip2;
603         cr += skip2;
604
605         if (dstx & 1) {
606             YUVA_IN(y, u, v, a, p, pal);
607             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
609             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
610             cb++;
611             cr++;
612             lum++;
613             p += BPP;
614         }
615         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 = u;
618             v1 = v;
619             a1 = a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621
622             YUVA_IN(y, u, v, a, p + BPP, pal);
623             u1 += u;
624             v1 += v;
625             a1 += a;
626             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
627             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
628             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
629             cb++;
630             cr++;
631             p += 2 * BPP;
632             lum += 2;
633         }
634         if (w) {
635             YUVA_IN(y, u, v, a, p, pal);
636             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
637             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
638             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
639         }
640     }
641 }
642
643 static void free_subpicture(SubPicture *sp)
644 {
645     avsubtitle_free(&sp->sub);
646 }
647
648 static void video_image_display(VideoState *is)
649 {
650     VideoPicture *vp;
651     SubPicture *sp;
652     AVPicture pict;
653     float aspect_ratio;
654     int width, height, x, y;
655     SDL_Rect rect;
656     int i;
657
658     vp = &is->pictq[is->pictq_rindex];
659     if (vp->bmp) {
660 #if CONFIG_AVFILTER
661          if (vp->picref->video->sample_aspect_ratio.num == 0)
662              aspect_ratio = 0;
663          else
664              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
665 #else
666
667         /* XXX: use variable in the frame */
668         if (is->video_st->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
670         else if (is->video_st->codec->sample_aspect_ratio.num)
671             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
672         else
673             aspect_ratio = 0;
674 #endif
675         if (aspect_ratio <= 0.0)
676             aspect_ratio = 1.0;
677         aspect_ratio *= (float)vp->width / (float)vp->height;
678
679         if (is->subtitle_st) {
680             if (is->subpq_size > 0) {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
684                     SDL_LockYUVOverlay (vp->bmp);
685
686                     pict.data[0] = vp->bmp->pixels[0];
687                     pict.data[1] = vp->bmp->pixels[2];
688                     pict.data[2] = vp->bmp->pixels[1];
689
690                     pict.linesize[0] = vp->bmp->pitches[0];
691                     pict.linesize[1] = vp->bmp->pitches[2];
692                     pict.linesize[2] = vp->bmp->pitches[1];
693
694                     for (i = 0; i < sp->sub.num_rects; i++)
695                         blend_subrect(&pict, sp->sub.rects[i],
696                                       vp->bmp->w, vp->bmp->h);
697
698                     SDL_UnlockYUVOverlay (vp->bmp);
699                 }
700             }
701         }
702
703
704         /* XXX: we suppose the screen has a 1.0 pixel ratio */
705         height = is->height;
706         width = ((int)rint(height * aspect_ratio)) & ~1;
707         if (width > is->width) {
708             width = is->width;
709             height = ((int)rint(width / aspect_ratio)) & ~1;
710         }
711         x = (is->width - width) / 2;
712         y = (is->height - height) / 2;
713         is->no_background = 0;
714         rect.x = is->xleft + x;
715         rect.y = is->ytop  + y;
716         rect.w = FFMAX(width,  1);
717         rect.h = FFMAX(height, 1);
718         SDL_DisplayYUVOverlay(vp->bmp, &rect);
719     }
720 }
721
722 static inline int compute_mod(int a, int b)
723 {
724     return a < 0 ? a%b + b : a%b;
725 }
726
727 static void video_audio_display(VideoState *s)
728 {
729     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
730     int ch, channels, h, h2, bgcolor, fgcolor;
731     int16_t time_diff;
732     int rdft_bits, nb_freq;
733
734     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
735         ;
736     nb_freq= 1<<(rdft_bits-1);
737
738     /* compute display index : center on currently output samples */
739     channels = s->audio_tgt_channels;
740     nb_display_channels = channels;
741     if (!s->paused) {
742         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
743         n = 2 * channels;
744         delay = s->audio_write_buf_size;
745         delay /= n;
746
747         /* to be more precise, we take into account the time spent since
748            the last buffer computation */
749         if (audio_callback_time) {
750             time_diff = av_gettime() - audio_callback_time;
751             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
752         }
753
754         delay += 2*data_used;
755         if (delay < data_used)
756             delay = data_used;
757
758         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
759         if (s->show_mode == SHOW_MODE_WAVES) {
760             h= INT_MIN;
761             for(i=0; i<1000; i+=channels){
762                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
763                 int a= s->sample_array[idx];
764                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
765                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
766                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
767                 int score= a-d;
768                 if(h<score && (b^c)<0){
769                     h= score;
770                     i_start= idx;
771                 }
772             }
773         }
774
775         s->last_i_start = i_start;
776     } else {
777         i_start = s->last_i_start;
778     }
779
780     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
781     if (s->show_mode == SHOW_MODE_WAVES) {
782         fill_rectangle(screen,
783                        s->xleft, s->ytop, s->width, s->height,
784                        bgcolor);
785
786         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
787
788         /* total height for one channel */
789         h = s->height / nb_display_channels;
790         /* graph height / 2 */
791         h2 = (h * 9) / 20;
792         for(ch = 0;ch < nb_display_channels; ch++) {
793             i = i_start + ch;
794             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
795             for(x = 0; x < s->width; x++) {
796                 y = (s->sample_array[i] * h2) >> 15;
797                 if (y < 0) {
798                     y = -y;
799                     ys = y1 - y;
800                 } else {
801                     ys = y1;
802                 }
803                 fill_rectangle(screen,
804                                s->xleft + x, ys, 1, y,
805                                fgcolor);
806                 i += channels;
807                 if (i >= SAMPLE_ARRAY_SIZE)
808                     i -= SAMPLE_ARRAY_SIZE;
809             }
810         }
811
812         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
813
814         for(ch = 1;ch < nb_display_channels; ch++) {
815             y = s->ytop + ch * h;
816             fill_rectangle(screen,
817                            s->xleft, y, s->width, 1,
818                            fgcolor);
819         }
820         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
821     }else{
822         nb_display_channels= FFMIN(nb_display_channels, 2);
823         if(rdft_bits != s->rdft_bits){
824             av_rdft_end(s->rdft);
825             av_free(s->rdft_data);
826             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
827             s->rdft_bits= rdft_bits;
828             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
829         }
830         {
831             FFTSample *data[2];
832             for(ch = 0;ch < nb_display_channels; ch++) {
833                 data[ch] = s->rdft_data + 2*nb_freq*ch;
834                 i = i_start + ch;
835                 for(x = 0; x < 2*nb_freq; x++) {
836                     double w= (x-nb_freq)*(1.0/nb_freq);
837                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
838                     i += channels;
839                     if (i >= SAMPLE_ARRAY_SIZE)
840                         i -= SAMPLE_ARRAY_SIZE;
841                 }
842                 av_rdft_calc(s->rdft, data[ch]);
843             }
844             //least efficient way to do this, we should of course directly access it but its more than fast enough
845             for(y=0; y<s->height; y++){
846                 double w= 1/sqrt(nb_freq);
847                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
848                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
849                        + data[1][2*y+1]*data[1][2*y+1])) : a;
850                 a= FFMIN(a,255);
851                 b= FFMIN(b,255);
852                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
853
854                 fill_rectangle(screen,
855                             s->xpos, s->height-y, 1, 1,
856                             fgcolor);
857             }
858         }
859         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
860         s->xpos++;
861         if(s->xpos >= s->width)
862             s->xpos= s->xleft;
863     }
864 }
865
866 static void stream_close(VideoState *is)
867 {
868     VideoPicture *vp;
869     int i;
870     /* XXX: use a special url_shutdown call to abort parse cleanly */
871     is->abort_request = 1;
872     SDL_WaitThread(is->read_tid, NULL);
873     SDL_WaitThread(is->refresh_tid, NULL);
874
875     /* free all pictures */
876     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
877         vp = &is->pictq[i];
878 #if CONFIG_AVFILTER
879         if (vp->picref) {
880             avfilter_unref_buffer(vp->picref);
881             vp->picref = NULL;
882         }
883 #endif
884         if (vp->bmp) {
885             SDL_FreeYUVOverlay(vp->bmp);
886             vp->bmp = NULL;
887         }
888     }
889     SDL_DestroyMutex(is->pictq_mutex);
890     SDL_DestroyCond(is->pictq_cond);
891     SDL_DestroyMutex(is->subpq_mutex);
892     SDL_DestroyCond(is->subpq_cond);
893 #if !CONFIG_AVFILTER
894     if (is->img_convert_ctx)
895         sws_freeContext(is->img_convert_ctx);
896 #endif
897     av_free(is);
898 }
899
900 static void do_exit(VideoState *is)
901 {
902     if (is) {
903         stream_close(is);
904     }
905     av_lockmgr_register(NULL);
906     uninit_opts();
907 #if CONFIG_AVFILTER
908     avfilter_uninit();
909 #endif
910     if (show_status)
911         printf("\n");
912     SDL_Quit();
913     av_log(NULL, AV_LOG_QUIET, "%s", "");
914     exit(0);
915 }
916
917 static int video_open(VideoState *is){
918     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
919     int w,h;
920
921     if(is_full_screen) flags |= SDL_FULLSCREEN;
922     else               flags |= SDL_RESIZABLE;
923
924     if (is_full_screen && fs_screen_width) {
925         w = fs_screen_width;
926         h = fs_screen_height;
927     } else if(!is_full_screen && screen_width){
928         w = screen_width;
929         h = screen_height;
930 #if CONFIG_AVFILTER
931     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
932         w = is->out_video_filter->inputs[0]->w;
933         h = is->out_video_filter->inputs[0]->h;
934 #else
935     }else if (is->video_st && is->video_st->codec->width){
936         w = is->video_st->codec->width;
937         h = is->video_st->codec->height;
938 #endif
939     } else {
940         w = 640;
941         h = 480;
942     }
943     if(screen && is->width == screen->w && screen->w == w
944        && is->height== screen->h && screen->h == h)
945         return 0;
946
947 #ifndef __APPLE__
948     screen = SDL_SetVideoMode(w, h, 0, flags);
949 #else
950     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
951     screen = SDL_SetVideoMode(w, h, 24, flags);
952 #endif
953     if (!screen) {
954         fprintf(stderr, "SDL: could not set video mode - exiting\n");
955         do_exit(is);
956     }
957     if (!window_title)
958         window_title = input_filename;
959     SDL_WM_SetCaption(window_title, window_title);
960
961     is->width = screen->w;
962     is->height = screen->h;
963
964     return 0;
965 }
966
967 /* display the current picture, if any */
968 static void video_display(VideoState *is)
969 {
970     if(!screen)
971         video_open(is);
972     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
973         video_audio_display(is);
974     else if (is->video_st)
975         video_image_display(is);
976 }
977
978 static int refresh_thread(void *opaque)
979 {
980     VideoState *is= opaque;
981     while(!is->abort_request){
982         SDL_Event event;
983         event.type = FF_REFRESH_EVENT;
984         event.user.data1 = opaque;
985         if(!is->refresh){
986             is->refresh=1;
987             SDL_PushEvent(&event);
988         }
989         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
990         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
991     }
992     return 0;
993 }
994
995 /* get the current audio clock value */
996 static double get_audio_clock(VideoState *is)
997 {
998     if (is->paused) {
999         return is->audio_current_pts;
1000     } else {
1001         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1002     }
1003 }
1004
1005 /* get the current video clock value */
1006 static double get_video_clock(VideoState *is)
1007 {
1008     if (is->paused) {
1009         return is->video_current_pts;
1010     } else {
1011         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1012     }
1013 }
1014
1015 /* get the current external clock value */
1016 static double get_external_clock(VideoState *is)
1017 {
1018     int64_t ti;
1019     ti = av_gettime();
1020     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1021 }
1022
1023 /* get the current master clock value */
1024 static double get_master_clock(VideoState *is)
1025 {
1026     double val;
1027
1028     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1029         if (is->video_st)
1030             val = get_video_clock(is);
1031         else
1032             val = get_audio_clock(is);
1033     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1034         if (is->audio_st)
1035             val = get_audio_clock(is);
1036         else
1037             val = get_video_clock(is);
1038     } else {
1039         val = get_external_clock(is);
1040     }
1041     return val;
1042 }
1043
1044 /* seek in the stream */
1045 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1046 {
1047     if (!is->seek_req) {
1048         is->seek_pos = pos;
1049         is->seek_rel = rel;
1050         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1051         if (seek_by_bytes)
1052             is->seek_flags |= AVSEEK_FLAG_BYTE;
1053         is->seek_req = 1;
1054     }
1055 }
1056
1057 /* pause or resume the video */
1058 static void stream_toggle_pause(VideoState *is)
1059 {
1060     if (is->paused) {
1061         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1062         if(is->read_pause_return != AVERROR(ENOSYS)){
1063             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1064         }
1065         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1066     }
1067     is->paused = !is->paused;
1068 }
1069
1070 static double compute_target_delay(double delay, VideoState *is)
1071 {
1072     double sync_threshold, diff;
1073
1074     /* update delay to follow master synchronisation source */
1075     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1076          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1077         /* if video is slave, we try to correct big delays by
1078            duplicating or deleting a frame */
1079         diff = get_video_clock(is) - get_master_clock(is);
1080
1081         /* skip or repeat frame. We take into account the
1082            delay to compute the threshold. I still don't know
1083            if it is the best guess */
1084         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1085         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1086             if (diff <= -sync_threshold)
1087                 delay = 0;
1088             else if (diff >= sync_threshold)
1089                 delay = 2 * delay;
1090         }
1091     }
1092
1093     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1094             delay, -diff);
1095
1096     return delay;
1097 }
1098
1099 static void pictq_next_picture(VideoState *is) {
1100     /* update queue size and signal for next picture */
1101     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1102         is->pictq_rindex = 0;
1103
1104     SDL_LockMutex(is->pictq_mutex);
1105     is->pictq_size--;
1106     SDL_CondSignal(is->pictq_cond);
1107     SDL_UnlockMutex(is->pictq_mutex);
1108 }
1109
1110 /* called to display each frame */
1111 static void video_refresh(void *opaque)
1112 {
1113     VideoState *is = opaque;
1114     VideoPicture *vp;
1115
1116     SubPicture *sp, *sp2;
1117
1118     if (is->video_st) {
1119 retry:
1120         if (is->pictq_size == 0) {
1121             //nothing to do, no picture to display in the que
1122         } else {
1123             double time= av_gettime()/1000000.0;
1124             double last_duration, duration, delay;
1125             /* dequeue the picture */
1126             vp = &is->pictq[is->pictq_rindex];
1127
1128             if (vp->skip) {
1129                 pictq_next_picture(is);
1130                 goto retry;
1131             }
1132
1133             /* compute nominal last_duration */
1134             last_duration = vp->pts - is->frame_last_pts;
1135             if (last_duration > 0 && last_duration < 10.0) {
1136                 /* if duration of the last frame was sane, update last_duration in video state */
1137                 is->frame_last_duration = last_duration;
1138             }
1139             delay = compute_target_delay(is->frame_last_duration, is);
1140
1141             if(time < is->frame_timer + delay)
1142                 return;
1143
1144             is->frame_last_pts = vp->pts;
1145             is->frame_timer += delay;
1146
1147             /* update current video pts */
1148             is->video_current_pts = vp->pts;
1149             is->video_current_pts_drift = is->video_current_pts - time;
1150             is->video_current_pos = vp->pos;
1151
1152             if(is->pictq_size > 1) {
1153                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1154                  duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1155             } else {
1156                  duration = vp->duration;
1157             }
1158
1159             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1160                 if(is->pictq_size > 1){
1161                     pictq_next_picture(is);
1162                     goto retry;
1163                 }
1164             }
1165
1166             if(is->subtitle_st) {
1167                 if (is->subtitle_stream_changed) {
1168                     SDL_LockMutex(is->subpq_mutex);
1169
1170                     while (is->subpq_size) {
1171                         free_subpicture(&is->subpq[is->subpq_rindex]);
1172
1173                         /* update queue size and signal for next picture */
1174                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1175                             is->subpq_rindex = 0;
1176
1177                         is->subpq_size--;
1178                     }
1179                     is->subtitle_stream_changed = 0;
1180
1181                     SDL_CondSignal(is->subpq_cond);
1182                     SDL_UnlockMutex(is->subpq_mutex);
1183                 } else {
1184                     if (is->subpq_size > 0) {
1185                         sp = &is->subpq[is->subpq_rindex];
1186
1187                         if (is->subpq_size > 1)
1188                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1189                         else
1190                             sp2 = NULL;
1191
1192                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1193                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1194                         {
1195                             free_subpicture(sp);
1196
1197                             /* update queue size and signal for next picture */
1198                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1199                                 is->subpq_rindex = 0;
1200
1201                             SDL_LockMutex(is->subpq_mutex);
1202                             is->subpq_size--;
1203                             SDL_CondSignal(is->subpq_cond);
1204                             SDL_UnlockMutex(is->subpq_mutex);
1205                         }
1206                     }
1207                 }
1208             }
1209
1210             /* display picture */
1211             if (!display_disable)
1212                 video_display(is);
1213
1214             pictq_next_picture(is);
1215         }
1216     } else if (is->audio_st) {
1217         /* draw the next audio frame */
1218
1219         /* if only audio stream, then display the audio bars (better
1220            than nothing, just to test the implementation */
1221
1222         /* display picture */
1223         if (!display_disable)
1224             video_display(is);
1225     }
1226     if (show_status) {
1227         static int64_t last_time;
1228         int64_t cur_time;
1229         int aqsize, vqsize, sqsize;
1230         double av_diff;
1231
1232         cur_time = av_gettime();
1233         if (!last_time || (cur_time - last_time) >= 30000) {
1234             aqsize = 0;
1235             vqsize = 0;
1236             sqsize = 0;
1237             if (is->audio_st)
1238                 aqsize = is->audioq.size;
1239             if (is->video_st)
1240                 vqsize = is->videoq.size;
1241             if (is->subtitle_st)
1242                 sqsize = is->subtitleq.size;
1243             av_diff = 0;
1244             if (is->audio_st && is->video_st)
1245                 av_diff = get_audio_clock(is) - get_video_clock(is);
1246             printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1247                    get_master_clock(is),
1248                    av_diff,
1249                    aqsize / 1024,
1250                    vqsize / 1024,
1251                    sqsize,
1252                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1253                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1254             fflush(stdout);
1255             last_time = cur_time;
1256         }
1257     }
1258 }
1259
1260 /* allocate a picture (needs to do that in main thread to avoid
1261    potential locking problems */
1262 static void alloc_picture(void *opaque)
1263 {
1264     VideoState *is = opaque;
1265     VideoPicture *vp;
1266
1267     vp = &is->pictq[is->pictq_windex];
1268
1269     if (vp->bmp)
1270         SDL_FreeYUVOverlay(vp->bmp);
1271
1272 #if CONFIG_AVFILTER
1273     if (vp->picref)
1274         avfilter_unref_buffer(vp->picref);
1275     vp->picref = NULL;
1276
1277     vp->width   = is->out_video_filter->inputs[0]->w;
1278     vp->height  = is->out_video_filter->inputs[0]->h;
1279     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1280 #else
1281     vp->width   = is->video_st->codec->width;
1282     vp->height  = is->video_st->codec->height;
1283     vp->pix_fmt = is->video_st->codec->pix_fmt;
1284 #endif
1285
1286     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1287                                    SDL_YV12_OVERLAY,
1288                                    screen);
1289     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1290         /* SDL allocates a buffer smaller than requested if the video
1291          * overlay hardware is unable to support the requested size. */
1292         fprintf(stderr, "Error: the video system does not support an image\n"
1293                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1294                         "to reduce the image size.\n", vp->width, vp->height );
1295         do_exit(is);
1296     }
1297
1298     SDL_LockMutex(is->pictq_mutex);
1299     vp->allocated = 1;
1300     SDL_CondSignal(is->pictq_cond);
1301     SDL_UnlockMutex(is->pictq_mutex);
1302 }
1303
1304 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1305 {
1306     VideoPicture *vp;
1307     double frame_delay, pts = pts1;
1308
1309     /* compute the exact PTS for the picture if it is omitted in the stream
1310      * pts1 is the dts of the pkt / pts of the frame */
1311     if (pts != 0) {
1312         /* update video clock with pts, if present */
1313         is->video_clock = pts;
1314     } else {
1315         pts = is->video_clock;
1316     }
1317     /* update video clock for next frame */
1318     frame_delay = av_q2d(is->video_st->codec->time_base);
1319     /* for MPEG2, the frame can be repeated, so we update the
1320        clock accordingly */
1321     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1322     is->video_clock += frame_delay;
1323
1324 #if defined(DEBUG_SYNC) && 0
1325     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1326            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1327 #endif
1328
1329     /* wait until we have space to put a new picture */
1330     SDL_LockMutex(is->pictq_mutex);
1331
1332     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1333            !is->videoq.abort_request) {
1334         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1335     }
1336     SDL_UnlockMutex(is->pictq_mutex);
1337
1338     if (is->videoq.abort_request)
1339         return -1;
1340
1341     vp = &is->pictq[is->pictq_windex];
1342
1343     vp->duration = frame_delay;
1344
1345     /* alloc or resize hardware picture buffer */
1346     if (!vp->bmp ||
1347 #if CONFIG_AVFILTER
1348         vp->width  != is->out_video_filter->inputs[0]->w ||
1349         vp->height != is->out_video_filter->inputs[0]->h) {
1350 #else
1351         vp->width != is->video_st->codec->width ||
1352         vp->height != is->video_st->codec->height) {
1353 #endif
1354         SDL_Event event;
1355
1356         vp->allocated = 0;
1357
1358         /* the allocation must be done in the main thread to avoid
1359            locking problems */
1360         event.type = FF_ALLOC_EVENT;
1361         event.user.data1 = is;
1362         SDL_PushEvent(&event);
1363
1364         /* wait until the picture is allocated */
1365         SDL_LockMutex(is->pictq_mutex);
1366         while (!vp->allocated && !is->videoq.abort_request) {
1367             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368         }
1369         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1370         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1371             while (!vp->allocated) {
1372                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1373             }
1374         }
1375         SDL_UnlockMutex(is->pictq_mutex);
1376
1377         if (is->videoq.abort_request)
1378             return -1;
1379     }
1380
1381     /* if the frame is not skipped, then display it */
1382     if (vp->bmp) {
1383         AVPicture pict;
1384 #if CONFIG_AVFILTER
1385         if(vp->picref)
1386             avfilter_unref_buffer(vp->picref);
1387         vp->picref = src_frame->opaque;
1388 #endif
1389
1390         /* get a pointer on the bitmap */
1391         SDL_LockYUVOverlay (vp->bmp);
1392
1393         memset(&pict,0,sizeof(AVPicture));
1394         pict.data[0] = vp->bmp->pixels[0];
1395         pict.data[1] = vp->bmp->pixels[2];
1396         pict.data[2] = vp->bmp->pixels[1];
1397
1398         pict.linesize[0] = vp->bmp->pitches[0];
1399         pict.linesize[1] = vp->bmp->pitches[2];
1400         pict.linesize[2] = vp->bmp->pitches[1];
1401
1402 #if CONFIG_AVFILTER
1403         //FIXME use direct rendering
1404         av_picture_copy(&pict, (AVPicture *)src_frame,
1405                         vp->pix_fmt, vp->width, vp->height);
1406 #else
1407         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1408         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1409             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1410             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1411         if (is->img_convert_ctx == NULL) {
1412             fprintf(stderr, "Cannot initialize the conversion context\n");
1413             exit(1);
1414         }
1415         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1416                   0, vp->height, pict.data, pict.linesize);
1417 #endif
1418         /* update the bitmap content */
1419         SDL_UnlockYUVOverlay(vp->bmp);
1420
1421         vp->pts = pts;
1422         vp->pos = pos;
1423         vp->skip = 0;
1424
1425         /* now we can update the picture count */
1426         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1427             is->pictq_windex = 0;
1428         SDL_LockMutex(is->pictq_mutex);
1429         is->pictq_size++;
1430         SDL_UnlockMutex(is->pictq_mutex);
1431     }
1432     return 0;
1433 }
1434
1435 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1436 {
1437     int got_picture, i;
1438
1439     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1440         return -1;
1441
1442     if (pkt->data == flush_pkt.data) {
1443         avcodec_flush_buffers(is->video_st->codec);
1444
1445         SDL_LockMutex(is->pictq_mutex);
1446         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1447         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1448             is->pictq[i].skip = 1;
1449         }
1450         while (is->pictq_size && !is->videoq.abort_request) {
1451             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1452         }
1453         is->video_current_pos = -1;
1454         SDL_UnlockMutex(is->pictq_mutex);
1455
1456         is->frame_last_pts = AV_NOPTS_VALUE;
1457         is->frame_last_duration = 0;
1458         is->frame_timer = (double)av_gettime() / 1000000.0;
1459         return 0;
1460     }
1461
1462     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1463
1464     if (got_picture) {
1465         if (decoder_reorder_pts == -1) {
1466             *pts = frame->best_effort_timestamp;
1467         } else if (decoder_reorder_pts) {
1468             *pts = frame->pkt_pts;
1469         } else {
1470             *pts = frame->pkt_dts;
1471         }
1472
1473         if (*pts == AV_NOPTS_VALUE) {
1474             *pts = 0;
1475         }
1476
1477         return 1;
1478
1479     }
1480     return 0;
1481 }
1482
1483 #if CONFIG_AVFILTER
1484 typedef struct {
1485     VideoState *is;
1486     AVFrame *frame;
1487     int use_dr1;
1488 } FilterPriv;
1489
1490 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1491 {
1492     AVFilterContext *ctx = codec->opaque;
1493     AVFilterBufferRef  *ref;
1494     int perms = AV_PERM_WRITE;
1495     int i, w, h, stride[4];
1496     unsigned edge;
1497     int pixel_size;
1498
1499     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1500
1501     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1502         perms |= AV_PERM_NEG_LINESIZES;
1503
1504     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1505         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1506         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1508     }
1509     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1510
1511     w = codec->width;
1512     h = codec->height;
1513
1514     if(av_image_check_size(w, h, 0, codec))
1515         return -1;
1516
1517     avcodec_align_dimensions2(codec, &w, &h, stride);
1518     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1519     w += edge << 1;
1520     h += edge << 1;
1521
1522     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1523         return -1;
1524
1525     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1526     ref->video->w = codec->width;
1527     ref->video->h = codec->height;
1528     for(i = 0; i < 4; i ++) {
1529         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1530         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1531
1532         if (ref->data[i]) {
1533             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1534         }
1535         pic->data[i]     = ref->data[i];
1536         pic->linesize[i] = ref->linesize[i];
1537     }
1538     pic->opaque = ref;
1539     pic->age    = INT_MAX;
1540     pic->type   = FF_BUFFER_TYPE_USER;
1541     pic->reordered_opaque = codec->reordered_opaque;
1542     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1543     else           pic->pkt_pts = AV_NOPTS_VALUE;
1544     return 0;
1545 }
1546
1547 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1548 {
1549     memset(pic->data, 0, sizeof(pic->data));
1550     avfilter_unref_buffer(pic->opaque);
1551 }
1552
1553 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1554 {
1555     AVFilterBufferRef *ref = pic->opaque;
1556
1557     if (pic->data[0] == NULL) {
1558         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1559         return codec->get_buffer(codec, pic);
1560     }
1561
1562     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1563         (codec->pix_fmt != ref->format)) {
1564         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1565         return -1;
1566     }
1567
1568     pic->reordered_opaque = codec->reordered_opaque;
1569     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1570     else           pic->pkt_pts = AV_NOPTS_VALUE;
1571     return 0;
1572 }
1573
1574 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1575 {
1576     FilterPriv *priv = ctx->priv;
1577     AVCodecContext *codec;
1578     if(!opaque) return -1;
1579
1580     priv->is = opaque;
1581     codec    = priv->is->video_st->codec;
1582     codec->opaque = ctx;
1583     if((codec->codec->capabilities & CODEC_CAP_DR1)
1584     ) {
1585         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1586         priv->use_dr1 = 1;
1587         codec->get_buffer     = input_get_buffer;
1588         codec->release_buffer = input_release_buffer;
1589         codec->reget_buffer   = input_reget_buffer;
1590         codec->thread_safe_callbacks = 1;
1591     }
1592
1593     priv->frame = avcodec_alloc_frame();
1594
1595     return 0;
1596 }
1597
1598 static void input_uninit(AVFilterContext *ctx)
1599 {
1600     FilterPriv *priv = ctx->priv;
1601     av_free(priv->frame);
1602 }
1603
1604 static int input_request_frame(AVFilterLink *link)
1605 {
1606     FilterPriv *priv = link->src->priv;
1607     AVFilterBufferRef *picref;
1608     int64_t pts = 0;
1609     AVPacket pkt;
1610     int ret;
1611
1612     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1613         av_free_packet(&pkt);
1614     if (ret < 0)
1615         return -1;
1616
1617     if(priv->use_dr1 && priv->frame->opaque) {
1618         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1619     } else {
1620         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1621         av_image_copy(picref->data, picref->linesize,
1622                       priv->frame->data, priv->frame->linesize,
1623                       picref->format, link->w, link->h);
1624     }
1625     av_free_packet(&pkt);
1626
1627     avfilter_copy_frame_props(picref, priv->frame);
1628     picref->pts = pts;
1629
1630     avfilter_start_frame(link, picref);
1631     avfilter_draw_slice(link, 0, link->h, 1);
1632     avfilter_end_frame(link);
1633
1634     return 0;
1635 }
1636
1637 static int input_query_formats(AVFilterContext *ctx)
1638 {
1639     FilterPriv *priv = ctx->priv;
1640     enum PixelFormat pix_fmts[] = {
1641         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1642     };
1643
1644     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1645     return 0;
1646 }
1647
1648 static int input_config_props(AVFilterLink *link)
1649 {
1650     FilterPriv *priv  = link->src->priv;
1651     AVStream *s = priv->is->video_st;
1652
1653     link->w = s->codec->width;
1654     link->h = s->codec->height;
1655     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1656         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1657     link->time_base = s->time_base;
1658
1659     return 0;
1660 }
1661
1662 static AVFilter input_filter =
1663 {
1664     .name      = "ffplay_input",
1665
1666     .priv_size = sizeof(FilterPriv),
1667
1668     .init      = input_init,
1669     .uninit    = input_uninit,
1670
1671     .query_formats = input_query_formats,
1672
1673     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1674     .outputs   = (AVFilterPad[]) {{ .name = "default",
1675                                     .type = AVMEDIA_TYPE_VIDEO,
1676                                     .request_frame = input_request_frame,
1677                                     .config_props  = input_config_props, },
1678                                   { .name = NULL }},
1679 };
1680
1681 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1682 {
1683     char sws_flags_str[128];
1684     int ret;
1685     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1686     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1687     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1688     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1689     graph->scale_sws_opts = av_strdup(sws_flags_str);
1690
1691     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1692                                             NULL, is, graph)) < 0)
1693         return ret;
1694 #if FF_API_OLD_VSINK_API
1695     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1696                                        NULL, pix_fmts, graph);
1697 #else
1698     buffersink_params->pixel_fmts = pix_fmts;
1699     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1700                                        NULL, buffersink_params, graph);
1701 #endif
1702     av_freep(&buffersink_params);
1703     if (ret < 0)
1704         return ret;
1705
1706     if(vfilters) {
1707         AVFilterInOut *outputs = avfilter_inout_alloc();
1708         AVFilterInOut *inputs  = avfilter_inout_alloc();
1709
1710         outputs->name    = av_strdup("in");
1711         outputs->filter_ctx = filt_src;
1712         outputs->pad_idx = 0;
1713         outputs->next    = NULL;
1714
1715         inputs->name    = av_strdup("out");
1716         inputs->filter_ctx = filt_out;
1717         inputs->pad_idx = 0;
1718         inputs->next    = NULL;
1719
1720         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1721             return ret;
1722     } else {
1723         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1724             return ret;
1725     }
1726
1727     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1728         return ret;
1729
1730     is->out_video_filter = filt_out;
1731
1732     return ret;
1733 }
1734
1735 #endif  /* CONFIG_AVFILTER */
1736
1737 static int video_thread(void *arg)
1738 {
1739     VideoState *is = arg;
1740     AVFrame *frame= avcodec_alloc_frame();
1741     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1742     double pts;
1743     int ret;
1744
1745 #if CONFIG_AVFILTER
1746     AVFilterGraph *graph = avfilter_graph_alloc();
1747     AVFilterContext *filt_out = NULL;
1748     int last_w = is->video_st->codec->width;
1749     int last_h = is->video_st->codec->height;
1750
1751     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1752         goto the_end;
1753     filt_out = is->out_video_filter;
1754 #endif
1755
1756     for(;;) {
1757 #if !CONFIG_AVFILTER
1758         AVPacket pkt;
1759 #else
1760         AVFilterBufferRef *picref;
1761         AVRational tb = filt_out->inputs[0]->time_base;
1762 #endif
1763         while (is->paused && !is->videoq.abort_request)
1764             SDL_Delay(10);
1765 #if CONFIG_AVFILTER
1766         if (   last_w != is->video_st->codec->width
1767             || last_h != is->video_st->codec->height) {
1768             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1769                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1770             avfilter_graph_free(&graph);
1771             graph = avfilter_graph_alloc();
1772             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1773                 goto the_end;
1774             filt_out = is->out_video_filter;
1775             last_w = is->video_st->codec->width;
1776             last_h = is->video_st->codec->height;
1777         }
1778         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1779         if (picref) {
1780             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1781             pts_int = picref->pts;
1782             pos     = picref->pos;
1783             frame->opaque = picref;
1784         }
1785
1786         if (av_cmp_q(tb, is->video_st->time_base)) {
1787             av_unused int64_t pts1 = pts_int;
1788             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1789             av_dlog(NULL, "video_thread(): "
1790                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1791                     tb.num, tb.den, pts1,
1792                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1793         }
1794 #else
1795         ret = get_video_frame(is, frame, &pts_int, &pkt);
1796         pos = pkt.pos;
1797         av_free_packet(&pkt);
1798 #endif
1799
1800         if (ret < 0) goto the_end;
1801
1802 #if CONFIG_AVFILTER
1803         if (!picref)
1804             continue;
1805 #endif
1806
1807         pts = pts_int*av_q2d(is->video_st->time_base);
1808
1809         ret = queue_picture(is, frame, pts, pos);
1810
1811         if (ret < 0)
1812             goto the_end;
1813
1814         if (is->step)
1815             stream_toggle_pause(is);
1816     }
1817  the_end:
1818 #if CONFIG_AVFILTER
1819     avfilter_graph_free(&graph);
1820 #endif
1821     av_free(frame);
1822     return 0;
1823 }
1824
1825 static int subtitle_thread(void *arg)
1826 {
1827     VideoState *is = arg;
1828     SubPicture *sp;
1829     AVPacket pkt1, *pkt = &pkt1;
1830     int got_subtitle;
1831     double pts;
1832     int i, j;
1833     int r, g, b, y, u, v, a;
1834
1835     for(;;) {
1836         while (is->paused && !is->subtitleq.abort_request) {
1837             SDL_Delay(10);
1838         }
1839         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1840             break;
1841
1842         if(pkt->data == flush_pkt.data){
1843             avcodec_flush_buffers(is->subtitle_st->codec);
1844             continue;
1845         }
1846         SDL_LockMutex(is->subpq_mutex);
1847         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1848                !is->subtitleq.abort_request) {
1849             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1850         }
1851         SDL_UnlockMutex(is->subpq_mutex);
1852
1853         if (is->subtitleq.abort_request)
1854             return 0;
1855
1856         sp = &is->subpq[is->subpq_windex];
1857
1858        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1859            this packet, if any */
1860         pts = 0;
1861         if (pkt->pts != AV_NOPTS_VALUE)
1862             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1863
1864         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1865                                  &got_subtitle, pkt);
1866
1867         if (got_subtitle && sp->sub.format == 0) {
1868             sp->pts = pts;
1869
1870             for (i = 0; i < sp->sub.num_rects; i++)
1871             {
1872                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1873                 {
1874                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1875                     y = RGB_TO_Y_CCIR(r, g, b);
1876                     u = RGB_TO_U_CCIR(r, g, b, 0);
1877                     v = RGB_TO_V_CCIR(r, g, b, 0);
1878                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1879                 }
1880             }
1881
1882             /* now we can update the picture count */
1883             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1884                 is->subpq_windex = 0;
1885             SDL_LockMutex(is->subpq_mutex);
1886             is->subpq_size++;
1887             SDL_UnlockMutex(is->subpq_mutex);
1888         }
1889         av_free_packet(pkt);
1890     }
1891     return 0;
1892 }
1893
1894 /* copy samples for viewing in editor window */
1895 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1896 {
1897     int size, len;
1898
1899     size = samples_size / sizeof(short);
1900     while (size > 0) {
1901         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1902         if (len > size)
1903             len = size;
1904         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1905         samples += len;
1906         is->sample_array_index += len;
1907         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1908             is->sample_array_index = 0;
1909         size -= len;
1910     }
1911 }
1912
1913 /* return the new audio buffer size (samples can be added or deleted
1914    to get better sync if video or external master clock) */
1915 static int synchronize_audio(VideoState *is, short *samples,
1916                              int samples_size1, double pts)
1917 {
1918     int n, samples_size;
1919     double ref_clock;
1920
1921     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1922     samples_size = samples_size1;
1923
1924     /* if not master, then we try to remove or add samples to correct the clock */
1925     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1926          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1927         double diff, avg_diff;
1928         int wanted_size, min_size, max_size, nb_samples;
1929
1930         ref_clock = get_master_clock(is);
1931         diff = get_audio_clock(is) - ref_clock;
1932
1933         if (diff < AV_NOSYNC_THRESHOLD) {
1934             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1935             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1936                 /* not enough measures to have a correct estimate */
1937                 is->audio_diff_avg_count++;
1938             } else {
1939                 /* estimate the A-V difference */
1940                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1941
1942                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1943                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1944                     nb_samples = samples_size / n;
1945
1946                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1947                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1948                     if (wanted_size < min_size)
1949                         wanted_size = min_size;
1950                     else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
1951                         wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
1952
1953                     /* add or remove samples to correction the synchro */
1954                     if (wanted_size < samples_size) {
1955                         /* remove samples */
1956                         samples_size = wanted_size;
1957                     } else if (wanted_size > samples_size) {
1958                         uint8_t *samples_end, *q;
1959                         int nb;
1960
1961                         /* add samples */
1962                         nb = (samples_size - wanted_size);
1963                         samples_end = (uint8_t *)samples + samples_size - n;
1964                         q = samples_end + n;
1965                         while (nb > 0) {
1966                             memcpy(q, samples_end, n);
1967                             q += n;
1968                             nb -= n;
1969                         }
1970                         samples_size = wanted_size;
1971                     }
1972                 }
1973                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1974                         diff, avg_diff, samples_size - samples_size1,
1975                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1976             }
1977         } else {
1978             /* too big difference : may be initial PTS errors, so
1979                reset A-V filter */
1980             is->audio_diff_avg_count = 0;
1981             is->audio_diff_cum = 0;
1982         }
1983     }
1984
1985     return samples_size;
1986 }
1987
1988 /* decode one audio frame and returns its uncompressed size */
1989 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1990 {
1991     AVPacket *pkt_temp = &is->audio_pkt_temp;
1992     AVPacket *pkt = &is->audio_pkt;
1993     AVCodecContext *dec= is->audio_st->codec;
1994     int len1, len2, data_size, resampled_data_size;
1995     int64_t dec_channel_layout;
1996     double pts;
1997     int new_packet = 0;
1998     int flush_complete = 0;
1999
2000     for(;;) {
2001         /* NOTE: the audio packet can contain several frames */
2002         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2003             if (flush_complete)
2004                 break;
2005             new_packet = 0;
2006             data_size = sizeof(is->audio_buf1);
2007             len1 = avcodec_decode_audio3(dec,
2008                                         (int16_t *)is->audio_buf1, &data_size,
2009                                         pkt_temp);
2010             if (len1 < 0) {
2011                 /* if error, we skip the frame */
2012                 pkt_temp->size = 0;
2013                 break;
2014             }
2015
2016             pkt_temp->data += len1;
2017             pkt_temp->size -= len1;
2018
2019             if (data_size <= 0) {
2020                 /* stop sending empty packets if the decoder is finished */
2021                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2022                     flush_complete = 1;
2023                 continue;
2024             }
2025
2026             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2027
2028             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2029                 if (is->swr_ctx)
2030                     swr_free(&is->swr_ctx);
2031                 is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2032                                                dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
2033                                                0, NULL);
2034                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2035                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2036                         dec->sample_rate,
2037                         av_get_sample_fmt_name(dec->sample_fmt),
2038                         dec->channels,
2039                         is->audio_tgt_freq,
2040                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2041                         is->audio_tgt_channels);
2042                     break;
2043                 }
2044                 is->audio_src_channel_layout = dec_channel_layout;
2045                 is->audio_src_channels = dec->channels;
2046                 is->audio_src_freq = dec->sample_rate;
2047                 is->audio_src_fmt = dec->sample_fmt;
2048             }
2049
2050             resampled_data_size = data_size;
2051             if (is->swr_ctx) {
2052                 const uint8_t *in[] = {is->audio_buf1};
2053                 uint8_t *out[] = {is->audio_buf2};
2054                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2055                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2056                 if (len2 < 0) {
2057                     fprintf(stderr, "audio_resample() failed\n");
2058                     break;
2059                 }
2060                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2061                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2062                     swr_init(is->swr_ctx);
2063                 }
2064                 is->audio_buf = is->audio_buf2;
2065                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2066             } else {
2067                 is->audio_buf= is->audio_buf1;
2068             }
2069
2070             /* if no pts, then compute it */
2071             pts = is->audio_clock;
2072             *pts_ptr = pts;
2073             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2074 #ifdef DEBUG
2075             {
2076                 static double last_clock;
2077                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2078                        is->audio_clock - last_clock,
2079                        is->audio_clock, pts);
2080                 last_clock = is->audio_clock;
2081             }
2082 #endif
2083             return resampled_data_size;
2084         }
2085
2086         /* free the current packet */
2087         if (pkt->data)
2088             av_free_packet(pkt);
2089
2090         if (is->paused || is->audioq.abort_request) {
2091             return -1;
2092         }
2093
2094         /* read next packet */
2095         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2096             return -1;
2097
2098         if (pkt->data == flush_pkt.data)
2099             avcodec_flush_buffers(dec);
2100
2101         pkt_temp->data = pkt->data;
2102         pkt_temp->size = pkt->size;
2103
2104         /* if update the audio clock with the pts */
2105         if (pkt->pts != AV_NOPTS_VALUE) {
2106             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2107         }
2108     }
2109 }
2110
2111 /* prepare a new audio buffer */
2112 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2113 {
2114     VideoState *is = opaque;
2115     int audio_size, len1;
2116     int bytes_per_sec;
2117     double pts;
2118
2119     audio_callback_time = av_gettime();
2120
2121     while (len > 0) {
2122         if (is->audio_buf_index >= is->audio_buf_size) {
2123            audio_size = audio_decode_frame(is, &pts);
2124            if (audio_size < 0) {
2125                 /* if error, just output silence */
2126                is->audio_buf = is->audio_buf1;
2127                is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2128                memset(is->audio_buf, 0, is->audio_buf_size);
2129            } else {
2130                if (is->show_mode != SHOW_MODE_VIDEO)
2131                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2132                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2133                                               pts);
2134                is->audio_buf_size = audio_size;
2135            }
2136            is->audio_buf_index = 0;
2137         }
2138         len1 = is->audio_buf_size - is->audio_buf_index;
2139         if (len1 > len)
2140             len1 = len;
2141         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2142         len -= len1;
2143         stream += len1;
2144         is->audio_buf_index += len1;
2145     }
2146     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2147     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2148     /* Let's assume the audio driver that is used by SDL has two periods. */
2149     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2150     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2151 }
2152
2153 /* open a given stream. Return 0 if OK */
2154 static int stream_component_open(VideoState *is, int stream_index)
2155 {
2156     AVFormatContext *ic = is->ic;
2157     AVCodecContext *avctx;
2158     AVCodec *codec;
2159     SDL_AudioSpec wanted_spec, spec;
2160     AVDictionary *opts;
2161     AVDictionaryEntry *t = NULL;
2162     int64_t wanted_channel_layout = 0;
2163
2164     if (stream_index < 0 || stream_index >= ic->nb_streams)
2165         return -1;
2166     avctx = ic->streams[stream_index]->codec;
2167
2168     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2169
2170     codec = avcodec_find_decoder(avctx->codec_id);
2171     switch(avctx->codec_type){
2172         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2173         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2174         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2175     }
2176     if (!codec)
2177         return -1;
2178
2179     avctx->workaround_bugs = workaround_bugs;
2180     avctx->lowres = lowres;
2181     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2182     avctx->idct_algo= idct;
2183     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2184     avctx->skip_frame= skip_frame;
2185     avctx->skip_idct= skip_idct;
2186     avctx->skip_loop_filter= skip_loop_filter;
2187     avctx->error_recognition= error_recognition;
2188     avctx->error_concealment= error_concealment;
2189
2190     if(codec->capabilities & CODEC_CAP_DR1)
2191         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2192
2193     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2194         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2195         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2196         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2197         wanted_spec.freq = avctx->sample_rate;
2198         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2199             fprintf(stderr, "Invalid sample rate or channel count!\n");
2200             return -1;
2201         }
2202     }
2203
2204     if (!codec ||
2205         avcodec_open2(avctx, codec, &opts) < 0)
2206         return -1;
2207     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2208         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2209         return AVERROR_OPTION_NOT_FOUND;
2210     }
2211
2212     /* prepare audio output */
2213     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2214         wanted_spec.format = AUDIO_S16SYS;
2215         wanted_spec.silence = 0;
2216         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2217         wanted_spec.callback = sdl_audio_callback;
2218         wanted_spec.userdata = is;
2219         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2220             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2221             return -1;
2222         }
2223         is->audio_hw_buf_size = spec.size;
2224         if (spec.format != AUDIO_S16SYS) {
2225             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2226             return -1;
2227         }
2228         if (spec.channels != wanted_spec.channels) {
2229             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2230             if (!wanted_channel_layout) {
2231                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2232                 return -1;
2233             }
2234         }
2235         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2236         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2237         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2238         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2239     }
2240
2241     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2242     switch(avctx->codec_type) {
2243     case AVMEDIA_TYPE_AUDIO:
2244         is->audio_stream = stream_index;
2245         is->audio_st = ic->streams[stream_index];
2246         is->audio_buf_size = 0;
2247         is->audio_buf_index = 0;
2248
2249         /* init averaging filter */
2250         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2251         is->audio_diff_avg_count = 0;
2252         /* since we do not have a precise anough audio fifo fullness,
2253            we correct audio sync only if larger than this threshold */
2254         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2255
2256         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2257         packet_queue_init(&is->audioq);
2258         SDL_PauseAudio(0);
2259         break;
2260     case AVMEDIA_TYPE_VIDEO:
2261         is->video_stream = stream_index;
2262         is->video_st = ic->streams[stream_index];
2263
2264         packet_queue_init(&is->videoq);
2265         is->video_tid = SDL_CreateThread(video_thread, is);
2266         break;
2267     case AVMEDIA_TYPE_SUBTITLE:
2268         is->subtitle_stream = stream_index;
2269         is->subtitle_st = ic->streams[stream_index];
2270         packet_queue_init(&is->subtitleq);
2271
2272         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2273         break;
2274     default:
2275         break;
2276     }
2277     return 0;
2278 }
2279
2280 static void stream_component_close(VideoState *is, int stream_index)
2281 {
2282     AVFormatContext *ic = is->ic;
2283     AVCodecContext *avctx;
2284
2285     if (stream_index < 0 || stream_index >= ic->nb_streams)
2286         return;
2287     avctx = ic->streams[stream_index]->codec;
2288
2289     switch(avctx->codec_type) {
2290     case AVMEDIA_TYPE_AUDIO:
2291         packet_queue_abort(&is->audioq);
2292
2293         SDL_CloseAudio();
2294
2295         packet_queue_end(&is->audioq);
2296         if (is->swr_ctx)
2297             swr_free(&is->swr_ctx);
2298         av_free_packet(&is->audio_pkt);
2299
2300         if (is->rdft) {
2301             av_rdft_end(is->rdft);
2302             av_freep(&is->rdft_data);
2303         }
2304         break;
2305     case AVMEDIA_TYPE_VIDEO:
2306         packet_queue_abort(&is->videoq);
2307
2308         /* note: we also signal this mutex to make sure we deblock the
2309            video thread in all cases */
2310         SDL_LockMutex(is->pictq_mutex);
2311         SDL_CondSignal(is->pictq_cond);
2312         SDL_UnlockMutex(is->pictq_mutex);
2313
2314         SDL_WaitThread(is->video_tid, NULL);
2315
2316         packet_queue_end(&is->videoq);
2317         break;
2318     case AVMEDIA_TYPE_SUBTITLE:
2319         packet_queue_abort(&is->subtitleq);
2320
2321         /* note: we also signal this mutex to make sure we deblock the
2322            video thread in all cases */
2323         SDL_LockMutex(is->subpq_mutex);
2324         is->subtitle_stream_changed = 1;
2325
2326         SDL_CondSignal(is->subpq_cond);
2327         SDL_UnlockMutex(is->subpq_mutex);
2328
2329         SDL_WaitThread(is->subtitle_tid, NULL);
2330
2331         packet_queue_end(&is->subtitleq);
2332         break;
2333     default:
2334         break;
2335     }
2336
2337     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2338     avcodec_close(avctx);
2339     switch(avctx->codec_type) {
2340     case AVMEDIA_TYPE_AUDIO:
2341         is->audio_st = NULL;
2342         is->audio_stream = -1;
2343         break;
2344     case AVMEDIA_TYPE_VIDEO:
2345         is->video_st = NULL;
2346         is->video_stream = -1;
2347         break;
2348     case AVMEDIA_TYPE_SUBTITLE:
2349         is->subtitle_st = NULL;
2350         is->subtitle_stream = -1;
2351         break;
2352     default:
2353         break;
2354     }
2355 }
2356
2357 /* since we have only one decoding thread, we can use a global
2358    variable instead of a thread local variable */
2359 static VideoState *global_video_state;
2360
2361 static int decode_interrupt_cb(void)
2362 {
2363     return (global_video_state && global_video_state->abort_request);
2364 }
2365
2366 /* this thread gets the stream from the disk or the network */
2367 static int read_thread(void *arg)
2368 {
2369     VideoState *is = arg;
2370     AVFormatContext *ic = NULL;
2371     int err, i, ret;
2372     int st_index[AVMEDIA_TYPE_NB];
2373     AVPacket pkt1, *pkt = &pkt1;
2374     int eof=0;
2375     int pkt_in_play_range = 0;
2376     AVDictionaryEntry *t;
2377     AVDictionary **opts;
2378     int orig_nb_streams;
2379
2380     memset(st_index, -1, sizeof(st_index));
2381     is->video_stream = -1;
2382     is->audio_stream = -1;
2383     is->subtitle_stream = -1;
2384
2385     global_video_state = is;
2386     avio_set_interrupt_cb(decode_interrupt_cb);
2387
2388     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2389     if (err < 0) {
2390         print_error(is->filename, err);
2391         ret = -1;
2392         goto fail;
2393     }
2394     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2395         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2396         ret = AVERROR_OPTION_NOT_FOUND;
2397         goto fail;
2398     }
2399     is->ic = ic;
2400
2401     if(genpts)
2402         ic->flags |= AVFMT_FLAG_GENPTS;
2403
2404     av_dict_set(&codec_opts, "request_channels", "2", 0);
2405
2406     opts = setup_find_stream_info_opts(ic, codec_opts);
2407     orig_nb_streams = ic->nb_streams;
2408
2409     err = avformat_find_stream_info(ic, opts);
2410     if (err < 0) {
2411         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2412         ret = -1;
2413         goto fail;
2414     }
2415     for (i = 0; i < orig_nb_streams; i++)
2416         av_dict_free(&opts[i]);
2417     av_freep(&opts);
2418
2419     if(ic->pb)
2420         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2421
2422     if(seek_by_bytes<0)
2423         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2424
2425     /* if seeking requested, we execute it */
2426     if (start_time != AV_NOPTS_VALUE) {
2427         int64_t timestamp;
2428
2429         timestamp = start_time;
2430         /* add the stream start time */
2431         if (ic->start_time != AV_NOPTS_VALUE)
2432             timestamp += ic->start_time;
2433         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2434         if (ret < 0) {
2435             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2436                     is->filename, (double)timestamp / AV_TIME_BASE);
2437         }
2438     }
2439
2440     for (i = 0; i < ic->nb_streams; i++)
2441         ic->streams[i]->discard = AVDISCARD_ALL;
2442     if (!video_disable)
2443         st_index[AVMEDIA_TYPE_VIDEO] =
2444             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2445                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2446     if (!audio_disable)
2447         st_index[AVMEDIA_TYPE_AUDIO] =
2448             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2449                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2450                                 st_index[AVMEDIA_TYPE_VIDEO],
2451                                 NULL, 0);
2452     if (!video_disable)
2453         st_index[AVMEDIA_TYPE_SUBTITLE] =
2454             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2455                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2456                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2457                                  st_index[AVMEDIA_TYPE_AUDIO] :
2458                                  st_index[AVMEDIA_TYPE_VIDEO]),
2459                                 NULL, 0);
2460     if (show_status) {
2461         av_dump_format(ic, 0, is->filename, 0);
2462     }
2463
2464     is->show_mode = show_mode;
2465
2466     /* open the streams */
2467     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2468         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2469     }
2470
2471     ret=-1;
2472     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2473         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2474     }
2475     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2476     if (is->show_mode == SHOW_MODE_NONE)
2477         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2478
2479     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2480         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2481     }
2482
2483     if (is->video_stream < 0 && is->audio_stream < 0) {
2484         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2485         ret = -1;
2486         goto fail;
2487     }
2488
2489     for(;;) {
2490         if (is->abort_request)
2491             break;
2492         if (is->paused != is->last_paused) {
2493             is->last_paused = is->paused;
2494             if (is->paused)
2495                 is->read_pause_return= av_read_pause(ic);
2496             else
2497                 av_read_play(ic);
2498         }
2499 #if CONFIG_RTSP_DEMUXER
2500         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2501             /* wait 10 ms to avoid trying to get another packet */
2502             /* XXX: horrible */
2503             SDL_Delay(10);
2504             continue;
2505         }
2506 #endif
2507         if (is->seek_req) {
2508             int64_t seek_target= is->seek_pos;
2509             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2510             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2511 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2512 //      of the seek_pos/seek_rel variables
2513
2514             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2515             if (ret < 0) {
2516                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2517             }else{
2518                 if (is->audio_stream >= 0) {
2519                     packet_queue_flush(&is->audioq);
2520                     packet_queue_put(&is->audioq, &flush_pkt);
2521                 }
2522                 if (is->subtitle_stream >= 0) {
2523                     packet_queue_flush(&is->subtitleq);
2524                     packet_queue_put(&is->subtitleq, &flush_pkt);
2525                 }
2526                 if (is->video_stream >= 0) {
2527                     packet_queue_flush(&is->videoq);
2528                     packet_queue_put(&is->videoq, &flush_pkt);
2529                 }
2530             }
2531             is->seek_req = 0;
2532             eof= 0;
2533         }
2534
2535         /* if the queue are full, no need to read more */
2536         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2537             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2538                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2539                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2540             /* wait 10 ms */
2541             SDL_Delay(10);
2542             continue;
2543         }
2544         if(eof) {
2545             if(is->video_stream >= 0){
2546                 av_init_packet(pkt);
2547                 pkt->data=NULL;
2548                 pkt->size=0;
2549                 pkt->stream_index= is->video_stream;
2550                 packet_queue_put(&is->videoq, pkt);
2551             }
2552             if (is->audio_stream >= 0 &&
2553                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2554                 av_init_packet(pkt);
2555                 pkt->data = NULL;
2556                 pkt->size = 0;
2557                 pkt->stream_index = is->audio_stream;
2558                 packet_queue_put(&is->audioq, pkt);
2559             }
2560             SDL_Delay(10);
2561             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2562                 if(loop!=1 && (!loop || --loop)){
2563                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2564                 }else if(autoexit){
2565                     ret=AVERROR_EOF;
2566                     goto fail;
2567                 }
2568             }
2569             eof=0;
2570             continue;
2571         }
2572         ret = av_read_frame(ic, pkt);
2573         if (ret < 0) {
2574             if (ret == AVERROR_EOF || url_feof(ic->pb))
2575                 eof=1;
2576             if (ic->pb && ic->pb->error)
2577                 break;
2578             SDL_Delay(100); /* wait for user event */
2579             continue;
2580         }
2581         /* check if packet is in play range specified by user, then queue, otherwise discard */
2582         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2583                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2584                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2585                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2586                 <= ((double)duration/1000000);
2587         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2588             packet_queue_put(&is->audioq, pkt);
2589         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2590             packet_queue_put(&is->videoq, pkt);
2591         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2592             packet_queue_put(&is->subtitleq, pkt);
2593         } else {
2594             av_free_packet(pkt);
2595         }
2596     }
2597     /* wait until the end */
2598     while (!is->abort_request) {
2599         SDL_Delay(100);
2600     }
2601
2602     ret = 0;
2603  fail:
2604     /* disable interrupting */
2605     global_video_state = NULL;
2606
2607     /* close each stream */
2608     if (is->audio_stream >= 0)
2609         stream_component_close(is, is->audio_stream);
2610     if (is->video_stream >= 0)
2611         stream_component_close(is, is->video_stream);
2612     if (is->subtitle_stream >= 0)
2613         stream_component_close(is, is->subtitle_stream);
2614     if (is->ic) {
2615         av_close_input_file(is->ic);
2616         is->ic = NULL; /* safety */
2617     }
2618     avio_set_interrupt_cb(NULL);
2619
2620     if (ret != 0) {
2621         SDL_Event event;
2622
2623         event.type = FF_QUIT_EVENT;
2624         event.user.data1 = is;
2625         SDL_PushEvent(&event);
2626     }
2627     return 0;
2628 }
2629
2630 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2631 {
2632     VideoState *is;
2633
2634     is = av_mallocz(sizeof(VideoState));
2635     if (!is)
2636         return NULL;
2637     av_strlcpy(is->filename, filename, sizeof(is->filename));
2638     is->iformat = iformat;
2639     is->ytop = 0;
2640     is->xleft = 0;
2641
2642     /* start video display */
2643     is->pictq_mutex = SDL_CreateMutex();
2644     is->pictq_cond = SDL_CreateCond();
2645
2646     is->subpq_mutex = SDL_CreateMutex();
2647     is->subpq_cond = SDL_CreateCond();
2648
2649     is->av_sync_type = av_sync_type;
2650     is->read_tid = SDL_CreateThread(read_thread, is);
2651     if (!is->read_tid) {
2652         av_free(is);
2653         return NULL;
2654     }
2655     return is;
2656 }
2657
2658 static void stream_cycle_channel(VideoState *is, int codec_type)
2659 {
2660     AVFormatContext *ic = is->ic;
2661     int start_index, stream_index;
2662     AVStream *st;
2663
2664     if (codec_type == AVMEDIA_TYPE_VIDEO)
2665         start_index = is->video_stream;
2666     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2667         start_index = is->audio_stream;
2668     else
2669         start_index = is->subtitle_stream;
2670     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2671         return;
2672     stream_index = start_index;
2673     for(;;) {
2674         if (++stream_index >= is->ic->nb_streams)
2675         {
2676             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2677             {
2678                 stream_index = -1;
2679                 goto the_end;
2680             } else
2681                 stream_index = 0;
2682         }
2683         if (stream_index == start_index)
2684             return;
2685         st = ic->streams[stream_index];
2686         if (st->codec->codec_type == codec_type) {
2687             /* check that parameters are OK */
2688             switch(codec_type) {
2689             case AVMEDIA_TYPE_AUDIO:
2690                 if (st->codec->sample_rate != 0 &&
2691                     st->codec->channels != 0)
2692                     goto the_end;
2693                 break;
2694             case AVMEDIA_TYPE_VIDEO:
2695             case AVMEDIA_TYPE_SUBTITLE:
2696                 goto the_end;
2697             default:
2698                 break;
2699             }
2700         }
2701     }
2702  the_end:
2703     stream_component_close(is, start_index);
2704     stream_component_open(is, stream_index);
2705 }
2706
2707
2708 static void toggle_full_screen(VideoState *is)
2709 {
2710     is_full_screen = !is_full_screen;
2711     video_open(is);
2712 }
2713
2714 static void toggle_pause(VideoState *is)
2715 {
2716     stream_toggle_pause(is);
2717     is->step = 0;
2718 }
2719
2720 static void step_to_next_frame(VideoState *is)
2721 {
2722     /* if the stream is paused unpause it, then step */
2723     if (is->paused)
2724         stream_toggle_pause(is);
2725     is->step = 1;
2726 }
2727
2728 static void toggle_audio_display(VideoState *is)
2729 {
2730     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2731     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2732     fill_rectangle(screen,
2733                 is->xleft, is->ytop, is->width, is->height,
2734                 bgcolor);
2735     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2736 }
2737
2738 /* handle an event sent by the GUI */
2739 static void event_loop(VideoState *cur_stream)
2740 {
2741     SDL_Event event;
2742     double incr, pos, frac;
2743
2744     for(;;) {
2745         double x;
2746         SDL_WaitEvent(&event);
2747         switch(event.type) {
2748         case SDL_KEYDOWN:
2749             if (exit_on_keydown) {
2750                 do_exit(cur_stream);
2751                 break;
2752             }
2753             switch(event.key.keysym.sym) {
2754             case SDLK_ESCAPE:
2755             case SDLK_q:
2756                 do_exit(cur_stream);
2757                 break;
2758             case SDLK_f:
2759                 toggle_full_screen(cur_stream);
2760                 break;
2761             case SDLK_p:
2762             case SDLK_SPACE:
2763                 toggle_pause(cur_stream);
2764                 break;
2765             case SDLK_s: //S: Step to next frame
2766                 step_to_next_frame(cur_stream);
2767                 break;
2768             case SDLK_a:
2769                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2770                 break;
2771             case SDLK_v:
2772                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2773                 break;
2774             case SDLK_t:
2775                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2776                 break;
2777             case SDLK_w:
2778                 toggle_audio_display(cur_stream);
2779                 break;
2780             case SDLK_LEFT:
2781                 incr = -10.0;
2782                 goto do_seek;
2783             case SDLK_RIGHT:
2784                 incr = 10.0;
2785                 goto do_seek;
2786             case SDLK_UP:
2787                 incr = 60.0;
2788                 goto do_seek;
2789             case SDLK_DOWN:
2790                 incr = -60.0;
2791             do_seek:
2792                 if (seek_by_bytes) {
2793                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2794                         pos= cur_stream->video_current_pos;
2795                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2796                         pos= cur_stream->audio_pkt.pos;
2797                     }else
2798                         pos = avio_tell(cur_stream->ic->pb);
2799                     if (cur_stream->ic->bit_rate)
2800                         incr *= cur_stream->ic->bit_rate / 8.0;
2801                     else
2802                         incr *= 180000.0;
2803                     pos += incr;
2804                     stream_seek(cur_stream, pos, incr, 1);
2805                 } else {
2806                     pos = get_master_clock(cur_stream);
2807                     pos += incr;
2808                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2809                 }
2810                 break;
2811             default:
2812                 break;
2813             }
2814             break;
2815         case SDL_MOUSEBUTTONDOWN:
2816             if (exit_on_mousedown) {
2817                 do_exit(cur_stream);
2818                 break;
2819             }
2820         case SDL_MOUSEMOTION:
2821             if(event.type ==SDL_MOUSEBUTTONDOWN){
2822                 x= event.button.x;
2823             }else{
2824                 if(event.motion.state != SDL_PRESSED)
2825                     break;
2826                 x= event.motion.x;
2827             }
2828             if(seek_by_bytes || cur_stream->ic->duration<=0){
2829                 uint64_t size=  avio_size(cur_stream->ic->pb);
2830                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2831             }else{
2832                 int64_t ts;
2833                 int ns, hh, mm, ss;
2834                 int tns, thh, tmm, tss;
2835                 tns = cur_stream->ic->duration/1000000LL;
2836                 thh = tns/3600;
2837                 tmm = (tns%3600)/60;
2838                 tss = (tns%60);
2839                 frac = x/cur_stream->width;
2840                 ns = frac*tns;
2841                 hh = ns/3600;
2842                 mm = (ns%3600)/60;
2843                 ss = (ns%60);
2844                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2845                         hh, mm, ss, thh, tmm, tss);
2846                 ts = frac*cur_stream->ic->duration;
2847                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2848                     ts += cur_stream->ic->start_time;
2849                 stream_seek(cur_stream, ts, 0, 0);
2850             }
2851             break;
2852         case SDL_VIDEORESIZE:
2853             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2854                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2855             screen_width = cur_stream->width = event.resize.w;
2856             screen_height= cur_stream->height= event.resize.h;
2857             break;
2858         case SDL_QUIT:
2859         case FF_QUIT_EVENT:
2860             do_exit(cur_stream);
2861             break;
2862         case FF_ALLOC_EVENT:
2863             video_open(event.user.data1);
2864             alloc_picture(event.user.data1);
2865             break;
2866         case FF_REFRESH_EVENT:
2867             video_refresh(event.user.data1);
2868             cur_stream->refresh=0;
2869             break;
2870         default:
2871             break;
2872         }
2873     }
2874 }
2875
2876 static int opt_frame_size(const char *opt, const char *arg)
2877 {
2878     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2879     return opt_default("video_size", arg);
2880 }
2881
2882 static int opt_width(const char *opt, const char *arg)
2883 {
2884     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2885     return 0;
2886 }
2887
2888 static int opt_height(const char *opt, const char *arg)
2889 {
2890     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2891     return 0;
2892 }
2893
2894 static int opt_format(const char *opt, const char *arg)
2895 {
2896     file_iformat = av_find_input_format(arg);
2897     if (!file_iformat) {
2898         fprintf(stderr, "Unknown input format: %s\n", arg);
2899         return AVERROR(EINVAL);
2900     }
2901     return 0;
2902 }
2903
2904 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2905 {
2906     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2907     return opt_default("pixel_format", arg);
2908 }
2909
2910 static int opt_sync(const char *opt, const char *arg)
2911 {
2912     if (!strcmp(arg, "audio"))
2913         av_sync_type = AV_SYNC_AUDIO_MASTER;
2914     else if (!strcmp(arg, "video"))
2915         av_sync_type = AV_SYNC_VIDEO_MASTER;
2916     else if (!strcmp(arg, "ext"))
2917         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2918     else {
2919         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2920         exit(1);
2921     }
2922     return 0;
2923 }
2924
2925 static int opt_seek(const char *opt, const char *arg)
2926 {
2927     start_time = parse_time_or_die(opt, arg, 1);
2928     return 0;
2929 }
2930
2931 static int opt_duration(const char *opt, const char *arg)
2932 {
2933     duration = parse_time_or_die(opt, arg, 1);
2934     return 0;
2935 }
2936
2937 static int opt_show_mode(const char *opt, const char *arg)
2938 {
2939     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2940                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2941                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2942                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2943     return 0;
2944 }
2945
2946 static void opt_input_file(void *optctx, const char *filename)
2947 {
2948     if (input_filename) {
2949         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2950                 filename, input_filename);
2951         exit_program(1);
2952     }
2953     if (!strcmp(filename, "-"))
2954         filename = "pipe:";
2955     input_filename = filename;
2956 }
2957
2958 static int opt_codec(void *o, const char *opt, const char *arg)
2959 {
2960     switch(opt[strlen(opt)-1]){
2961     case 'a' :    audio_codec_name = arg; break;
2962     case 's' : subtitle_codec_name = arg; break;
2963     case 'v' :    video_codec_name = arg; break;
2964     }
2965     return 0;
2966 }
2967
2968 static int dummy;
2969
2970 static const OptionDef options[] = {
2971 #include "cmdutils_common_opts.h"
2972     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2973     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2974     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2975     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2976     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2977     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2978     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2979     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2980     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2981     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2982     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2983     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2984     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2985     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2986     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2987     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2988     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2989     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2990     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2991     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2992     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2993     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2994     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2995     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2996     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2997     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2998     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2999     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3000     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3001     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3002     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3003     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3004     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3005     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3006 #if CONFIG_AVFILTER
3007     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3008 #endif
3009     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3010     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3011     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3012     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3013     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3014     { NULL, },
3015 };
3016
3017 static void show_usage(void)
3018 {
3019     printf("Simple media player\n");
3020     printf("usage: %s [options] input_file\n", program_name);
3021     printf("\n");
3022 }
3023
3024 static int opt_help(const char *opt, const char *arg)
3025 {
3026     av_log_set_callback(log_callback_help);
3027     show_usage();
3028     show_help_options(options, "Main options:\n",
3029                       OPT_EXPERT, 0);
3030     show_help_options(options, "\nAdvanced options:\n",
3031                       OPT_EXPERT, OPT_EXPERT);
3032     printf("\n");
3033     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3034     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3035 #if !CONFIG_AVFILTER
3036     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3037 #endif
3038     printf("\nWhile playing:\n"
3039            "q, ESC              quit\n"
3040            "f                   toggle full screen\n"
3041            "p, SPC              pause\n"
3042            "a                   cycle audio channel\n"
3043            "v                   cycle video channel\n"
3044            "t                   cycle subtitle channel\n"
3045            "w                   show audio waves\n"
3046            "s                   activate frame-step mode\n"
3047            "left/right          seek backward/forward 10 seconds\n"
3048            "down/up             seek backward/forward 1 minute\n"
3049            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3050            );
3051     return 0;
3052 }
3053
3054 static int lockmgr(void **mtx, enum AVLockOp op)
3055 {
3056    switch(op) {
3057       case AV_LOCK_CREATE:
3058           *mtx = SDL_CreateMutex();
3059           if(!*mtx)
3060               return 1;
3061           return 0;
3062       case AV_LOCK_OBTAIN:
3063           return !!SDL_LockMutex(*mtx);
3064       case AV_LOCK_RELEASE:
3065           return !!SDL_UnlockMutex(*mtx);
3066       case AV_LOCK_DESTROY:
3067           SDL_DestroyMutex(*mtx);
3068           return 0;
3069    }
3070    return 1;
3071 }
3072
3073 /* Called from the main */
3074 int main(int argc, char **argv)
3075 {
3076     int flags;
3077     VideoState *is;
3078
3079     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3080     parse_loglevel(argc, argv, options);
3081
3082     /* register all codecs, demux and protocols */
3083     avcodec_register_all();
3084 #if CONFIG_AVDEVICE
3085     avdevice_register_all();
3086 #endif
3087 #if CONFIG_AVFILTER
3088     avfilter_register_all();
3089 #endif
3090     av_register_all();
3091
3092     init_opts();
3093
3094     show_banner();
3095
3096     parse_options(NULL, argc, argv, options, opt_input_file);
3097
3098     if (!input_filename) {
3099         show_usage();
3100         fprintf(stderr, "An input file must be specified\n");
3101         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3102         exit(1);
3103     }
3104
3105     if (display_disable) {
3106         video_disable = 1;
3107     }
3108     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3109     if (audio_disable)
3110         flags &= ~SDL_INIT_AUDIO;
3111 #if !defined(__MINGW32__) && !defined(__APPLE__)
3112     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3113 #endif
3114     if (SDL_Init (flags)) {
3115         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3116         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3117         exit(1);
3118     }
3119
3120     if (!display_disable) {
3121 #if HAVE_SDL_VIDEO_SIZE
3122         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3123         fs_screen_width = vi->current_w;
3124         fs_screen_height = vi->current_h;
3125 #endif
3126     }
3127
3128     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3129     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3130     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3131
3132     if (av_lockmgr_register(lockmgr)) {
3133         fprintf(stderr, "Could not initialize lock manager!\n");
3134         do_exit(NULL);
3135     }
3136
3137     av_init_packet(&flush_pkt);
3138     flush_pkt.data= "FLUSH";
3139
3140     is = stream_open(input_filename, file_iformat);
3141     if (!is) {
3142         fprintf(stderr, "Failed to initialize VideoState!\n");
3143         do_exit(NULL);
3144     }
3145
3146     event_loop(is);
3147
3148     /* never returns */
3149
3150     return 0;
3151 }