]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     double duration;                             ///<expected duration of the frame
103     int64_t pos;                                 ///<byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *read_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation, resampling and format conversion */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     int audio_write_buf_size;
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum AVSampleFormat audio_src_fmt;
166     enum AVSampleFormat audio_tgt_fmt;
167     int audio_src_channels;
168     int audio_tgt_channels;
169     int64_t audio_src_channel_layout;
170     int64_t audio_tgt_channel_layout;
171     int audio_src_freq;
172     int audio_tgt_freq;
173     struct SwrContext *swr_ctx;
174     double audio_current_pts;
175     double audio_current_pts_drift;
176
177     enum ShowMode {
178         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
179     } show_mode;
180     int16_t sample_array[SAMPLE_ARRAY_SIZE];
181     int sample_array_index;
182     int last_i_start;
183     RDFTContext *rdft;
184     int rdft_bits;
185     FFTSample *rdft_data;
186     int xpos;
187
188     SDL_Thread *subtitle_tid;
189     int subtitle_stream;
190     int subtitle_stream_changed;
191     AVStream *subtitle_st;
192     PacketQueue subtitleq;
193     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
194     int subpq_size, subpq_rindex, subpq_windex;
195     SDL_mutex *subpq_mutex;
196     SDL_cond *subpq_cond;
197
198     double frame_timer;
199     double frame_last_pts;
200     double frame_last_delay;
201     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
202     int video_stream;
203     AVStream *video_st;
204     PacketQueue videoq;
205     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
206     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
207     int64_t video_current_pos;                   ///<current displayed file pos
208     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
209     int pictq_size, pictq_rindex, pictq_windex;
210     SDL_mutex *pictq_mutex;
211     SDL_cond *pictq_cond;
212 #if !CONFIG_AVFILTER
213     struct SwsContext *img_convert_ctx;
214 #endif
215
216     char filename[1024];
217     int width, height, xleft, ytop;
218     int step;
219
220 #if CONFIG_AVFILTER
221     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
222 #endif
223
224     float skip_frames;
225     float skip_frames_index;
226     int refresh;
227 } VideoState;
228
229 static int opt_help(const char *opt, const char *arg);
230
231 /* options specified by the user */
232 static AVInputFormat *file_iformat;
233 static const char *input_filename;
234 static const char *window_title;
235 static int fs_screen_width;
236 static int fs_screen_height;
237 static int screen_width = 0;
238 static int screen_height = 0;
239 static int audio_disable;
240 static int video_disable;
241 static int wanted_stream[AVMEDIA_TYPE_NB]={
242     [AVMEDIA_TYPE_AUDIO]=-1,
243     [AVMEDIA_TYPE_VIDEO]=-1,
244     [AVMEDIA_TYPE_SUBTITLE]=-1,
245 };
246 static int seek_by_bytes=-1;
247 static int display_disable;
248 static int show_status = 1;
249 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
250 static int64_t start_time = AV_NOPTS_VALUE;
251 static int64_t duration = AV_NOPTS_VALUE;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=-1;
268 static enum ShowMode show_mode = SHOW_MODE_NONE;
269 static const char *audio_codec_name;
270 static const char *subtitle_codec_name;
271 static const char *video_codec_name;
272
273 static int rdftspeed=20;
274 #if CONFIG_AVFILTER
275 static char *vfilters = NULL;
276 #endif
277
278 /* current context */
279 static int is_full_screen;
280 static int64_t audio_callback_time;
281
282 static AVPacket flush_pkt;
283
284 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
285 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
286 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
287
288 static SDL_Surface *screen;
289
290 void exit_program(int ret)
291 {
292     exit(ret);
293 }
294
295 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
296 {
297     AVPacketList *pkt1;
298
299     /* duplicate the packet */
300     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
301         return -1;
302
303     pkt1 = av_malloc(sizeof(AVPacketList));
304     if (!pkt1)
305         return -1;
306     pkt1->pkt = *pkt;
307     pkt1->next = NULL;
308
309
310     SDL_LockMutex(q->mutex);
311
312     if (!q->last_pkt)
313
314         q->first_pkt = pkt1;
315     else
316         q->last_pkt->next = pkt1;
317     q->last_pkt = pkt1;
318     q->nb_packets++;
319     q->size += pkt1->pkt.size + sizeof(*pkt1);
320     /* XXX: should duplicate packet data in DV case */
321     SDL_CondSignal(q->cond);
322
323     SDL_UnlockMutex(q->mutex);
324     return 0;
325 }
326
327 /* packet queue handling */
328 static void packet_queue_init(PacketQueue *q)
329 {
330     memset(q, 0, sizeof(PacketQueue));
331     q->mutex = SDL_CreateMutex();
332     q->cond = SDL_CreateCond();
333     packet_queue_put(q, &flush_pkt);
334 }
335
336 static void packet_queue_flush(PacketQueue *q)
337 {
338     AVPacketList *pkt, *pkt1;
339
340     SDL_LockMutex(q->mutex);
341     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
342         pkt1 = pkt->next;
343         av_free_packet(&pkt->pkt);
344         av_freep(&pkt);
345     }
346     q->last_pkt = NULL;
347     q->first_pkt = NULL;
348     q->nb_packets = 0;
349     q->size = 0;
350     SDL_UnlockMutex(q->mutex);
351 }
352
353 static void packet_queue_end(PacketQueue *q)
354 {
355     packet_queue_flush(q);
356     SDL_DestroyMutex(q->mutex);
357     SDL_DestroyCond(q->cond);
358 }
359
360 static void packet_queue_abort(PacketQueue *q)
361 {
362     SDL_LockMutex(q->mutex);
363
364     q->abort_request = 1;
365
366     SDL_CondSignal(q->cond);
367
368     SDL_UnlockMutex(q->mutex);
369 }
370
371 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
372 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
373 {
374     AVPacketList *pkt1;
375     int ret;
376
377     SDL_LockMutex(q->mutex);
378
379     for(;;) {
380         if (q->abort_request) {
381             ret = -1;
382             break;
383         }
384
385         pkt1 = q->first_pkt;
386         if (pkt1) {
387             q->first_pkt = pkt1->next;
388             if (!q->first_pkt)
389                 q->last_pkt = NULL;
390             q->nb_packets--;
391             q->size -= pkt1->pkt.size + sizeof(*pkt1);
392             *pkt = pkt1->pkt;
393             av_free(pkt1);
394             ret = 1;
395             break;
396         } else if (!block) {
397             ret = 0;
398             break;
399         } else {
400             SDL_CondWait(q->cond, q->mutex);
401         }
402     }
403     SDL_UnlockMutex(q->mutex);
404     return ret;
405 }
406
407 static inline void fill_rectangle(SDL_Surface *screen,
408                                   int x, int y, int w, int h, int color)
409 {
410     SDL_Rect rect;
411     rect.x = x;
412     rect.y = y;
413     rect.w = w;
414     rect.h = h;
415     SDL_FillRect(screen, &rect, color);
416 }
417
418 #define ALPHA_BLEND(a, oldp, newp, s)\
419 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
420
421 #define RGBA_IN(r, g, b, a, s)\
422 {\
423     unsigned int v = ((const uint32_t *)(s))[0];\
424     a = (v >> 24) & 0xff;\
425     r = (v >> 16) & 0xff;\
426     g = (v >> 8) & 0xff;\
427     b = v & 0xff;\
428 }
429
430 #define YUVA_IN(y, u, v, a, s, pal)\
431 {\
432     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
433     a = (val >> 24) & 0xff;\
434     y = (val >> 16) & 0xff;\
435     u = (val >> 8) & 0xff;\
436     v = val & 0xff;\
437 }
438
439 #define YUVA_OUT(d, y, u, v, a)\
440 {\
441     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
442 }
443
444
445 #define BPP 1
446
447 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
448 {
449     int wrap, wrap3, width2, skip2;
450     int y, u, v, a, u1, v1, a1, w, h;
451     uint8_t *lum, *cb, *cr;
452     const uint8_t *p;
453     const uint32_t *pal;
454     int dstx, dsty, dstw, dsth;
455
456     dstw = av_clip(rect->w, 0, imgw);
457     dsth = av_clip(rect->h, 0, imgh);
458     dstx = av_clip(rect->x, 0, imgw - dstw);
459     dsty = av_clip(rect->y, 0, imgh - dsth);
460     lum = dst->data[0] + dsty * dst->linesize[0];
461     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
462     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
463
464     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
465     skip2 = dstx >> 1;
466     wrap = dst->linesize[0];
467     wrap3 = rect->pict.linesize[0];
468     p = rect->pict.data[0];
469     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
470
471     if (dsty & 1) {
472         lum += dstx;
473         cb += skip2;
474         cr += skip2;
475
476         if (dstx & 1) {
477             YUVA_IN(y, u, v, a, p, pal);
478             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
479             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
480             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
481             cb++;
482             cr++;
483             lum++;
484             p += BPP;
485         }
486         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
487             YUVA_IN(y, u, v, a, p, pal);
488             u1 = u;
489             v1 = v;
490             a1 = a;
491             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
492
493             YUVA_IN(y, u, v, a, p + BPP, pal);
494             u1 += u;
495             v1 += v;
496             a1 += a;
497             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
498             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
499             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
500             cb++;
501             cr++;
502             p += 2 * BPP;
503             lum += 2;
504         }
505         if (w) {
506             YUVA_IN(y, u, v, a, p, pal);
507             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510             p++;
511             lum++;
512         }
513         p += wrap3 - dstw * BPP;
514         lum += wrap - dstw - dstx;
515         cb += dst->linesize[1] - width2 - skip2;
516         cr += dst->linesize[2] - width2 - skip2;
517     }
518     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
519         lum += dstx;
520         cb += skip2;
521         cr += skip2;
522
523         if (dstx & 1) {
524             YUVA_IN(y, u, v, a, p, pal);
525             u1 = u;
526             v1 = v;
527             a1 = a;
528             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
529             p += wrap3;
530             lum += wrap;
531             YUVA_IN(y, u, v, a, p, pal);
532             u1 += u;
533             v1 += v;
534             a1 += a;
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
537             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
538             cb++;
539             cr++;
540             p += -wrap3 + BPP;
541             lum += -wrap + 1;
542         }
543         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
544             YUVA_IN(y, u, v, a, p, pal);
545             u1 = u;
546             v1 = v;
547             a1 = a;
548             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
550             YUVA_IN(y, u, v, a, p + BPP, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555             p += wrap3;
556             lum += wrap;
557
558             YUVA_IN(y, u, v, a, p, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563
564             YUVA_IN(y, u, v, a, p + BPP, pal);
565             u1 += u;
566             v1 += v;
567             a1 += a;
568             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
569
570             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
571             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
572
573             cb++;
574             cr++;
575             p += -wrap3 + 2 * BPP;
576             lum += -wrap + 2;
577         }
578         if (w) {
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 = u;
581             v1 = v;
582             a1 = a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584             p += wrap3;
585             lum += wrap;
586             YUVA_IN(y, u, v, a, p, pal);
587             u1 += u;
588             v1 += v;
589             a1 += a;
590             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
592             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
593             cb++;
594             cr++;
595             p += -wrap3 + BPP;
596             lum += -wrap + 1;
597         }
598         p += wrap3 + (wrap3 - dstw * BPP);
599         lum += wrap + (wrap - dstw - dstx);
600         cb += dst->linesize[1] - width2 - skip2;
601         cr += dst->linesize[2] - width2 - skip2;
602     }
603     /* handle odd height */
604     if (h) {
605         lum += dstx;
606         cb += skip2;
607         cr += skip2;
608
609         if (dstx & 1) {
610             YUVA_IN(y, u, v, a, p, pal);
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
613             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
614             cb++;
615             cr++;
616             lum++;
617             p += BPP;
618         }
619         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
620             YUVA_IN(y, u, v, a, p, pal);
621             u1 = u;
622             v1 = v;
623             a1 = a;
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625
626             YUVA_IN(y, u, v, a, p + BPP, pal);
627             u1 += u;
628             v1 += v;
629             a1 += a;
630             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
631             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
632             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
633             cb++;
634             cr++;
635             p += 2 * BPP;
636             lum += 2;
637         }
638         if (w) {
639             YUVA_IN(y, u, v, a, p, pal);
640             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643         }
644     }
645 }
646
647 static void free_subpicture(SubPicture *sp)
648 {
649     avsubtitle_free(&sp->sub);
650 }
651
652 static void video_image_display(VideoState *is)
653 {
654     VideoPicture *vp;
655     SubPicture *sp;
656     AVPicture pict;
657     float aspect_ratio;
658     int width, height, x, y;
659     SDL_Rect rect;
660     int i;
661
662     vp = &is->pictq[is->pictq_rindex];
663     if (vp->bmp) {
664 #if CONFIG_AVFILTER
665          if (vp->picref->video->sample_aspect_ratio.num == 0)
666              aspect_ratio = 0;
667          else
668              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
669 #else
670
671         /* XXX: use variable in the frame */
672         if (is->video_st->sample_aspect_ratio.num)
673             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
674         else if (is->video_st->codec->sample_aspect_ratio.num)
675             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
676         else
677             aspect_ratio = 0;
678 #endif
679         if (aspect_ratio <= 0.0)
680             aspect_ratio = 1.0;
681         aspect_ratio *= (float)vp->width / (float)vp->height;
682
683         if (is->subtitle_st) {
684             if (is->subpq_size > 0) {
685                 sp = &is->subpq[is->subpq_rindex];
686
687                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
688                     SDL_LockYUVOverlay (vp->bmp);
689
690                     pict.data[0] = vp->bmp->pixels[0];
691                     pict.data[1] = vp->bmp->pixels[2];
692                     pict.data[2] = vp->bmp->pixels[1];
693
694                     pict.linesize[0] = vp->bmp->pitches[0];
695                     pict.linesize[1] = vp->bmp->pitches[2];
696                     pict.linesize[2] = vp->bmp->pitches[1];
697
698                     for (i = 0; i < sp->sub.num_rects; i++)
699                         blend_subrect(&pict, sp->sub.rects[i],
700                                       vp->bmp->w, vp->bmp->h);
701
702                     SDL_UnlockYUVOverlay (vp->bmp);
703                 }
704             }
705         }
706
707
708         /* XXX: we suppose the screen has a 1.0 pixel ratio */
709         height = is->height;
710         width = ((int)rint(height * aspect_ratio)) & ~1;
711         if (width > is->width) {
712             width = is->width;
713             height = ((int)rint(width / aspect_ratio)) & ~1;
714         }
715         x = (is->width - width) / 2;
716         y = (is->height - height) / 2;
717         is->no_background = 0;
718         rect.x = is->xleft + x;
719         rect.y = is->ytop  + y;
720         rect.w = FFMAX(width,  1);
721         rect.h = FFMAX(height, 1);
722         SDL_DisplayYUVOverlay(vp->bmp, &rect);
723     }
724 }
725
726 static inline int compute_mod(int a, int b)
727 {
728     return a < 0 ? a%b + b : a%b;
729 }
730
731 static void video_audio_display(VideoState *s)
732 {
733     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
734     int ch, channels, h, h2, bgcolor, fgcolor;
735     int16_t time_diff;
736     int rdft_bits, nb_freq;
737
738     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
739         ;
740     nb_freq= 1<<(rdft_bits-1);
741
742     /* compute display index : center on currently output samples */
743     channels = s->audio_tgt_channels;
744     nb_display_channels = channels;
745     if (!s->paused) {
746         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
747         n = 2 * channels;
748         delay = s->audio_write_buf_size;
749         delay /= n;
750
751         /* to be more precise, we take into account the time spent since
752            the last buffer computation */
753         if (audio_callback_time) {
754             time_diff = av_gettime() - audio_callback_time;
755             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
756         }
757
758         delay += 2*data_used;
759         if (delay < data_used)
760             delay = data_used;
761
762         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
763         if (s->show_mode == SHOW_MODE_WAVES) {
764             h= INT_MIN;
765             for(i=0; i<1000; i+=channels){
766                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
767                 int a= s->sample_array[idx];
768                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
769                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
770                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
771                 int score= a-d;
772                 if(h<score && (b^c)<0){
773                     h= score;
774                     i_start= idx;
775                 }
776             }
777         }
778
779         s->last_i_start = i_start;
780     } else {
781         i_start = s->last_i_start;
782     }
783
784     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
785     if (s->show_mode == SHOW_MODE_WAVES) {
786         fill_rectangle(screen,
787                        s->xleft, s->ytop, s->width, s->height,
788                        bgcolor);
789
790         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
791
792         /* total height for one channel */
793         h = s->height / nb_display_channels;
794         /* graph height / 2 */
795         h2 = (h * 9) / 20;
796         for(ch = 0;ch < nb_display_channels; ch++) {
797             i = i_start + ch;
798             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
799             for(x = 0; x < s->width; x++) {
800                 y = (s->sample_array[i] * h2) >> 15;
801                 if (y < 0) {
802                     y = -y;
803                     ys = y1 - y;
804                 } else {
805                     ys = y1;
806                 }
807                 fill_rectangle(screen,
808                                s->xleft + x, ys, 1, y,
809                                fgcolor);
810                 i += channels;
811                 if (i >= SAMPLE_ARRAY_SIZE)
812                     i -= SAMPLE_ARRAY_SIZE;
813             }
814         }
815
816         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
817
818         for(ch = 1;ch < nb_display_channels; ch++) {
819             y = s->ytop + ch * h;
820             fill_rectangle(screen,
821                            s->xleft, y, s->width, 1,
822                            fgcolor);
823         }
824         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
825     }else{
826         nb_display_channels= FFMIN(nb_display_channels, 2);
827         if(rdft_bits != s->rdft_bits){
828             av_rdft_end(s->rdft);
829             av_free(s->rdft_data);
830             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
831             s->rdft_bits= rdft_bits;
832             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
833         }
834         {
835             FFTSample *data[2];
836             for(ch = 0;ch < nb_display_channels; ch++) {
837                 data[ch] = s->rdft_data + 2*nb_freq*ch;
838                 i = i_start + ch;
839                 for(x = 0; x < 2*nb_freq; x++) {
840                     double w= (x-nb_freq)*(1.0/nb_freq);
841                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
842                     i += channels;
843                     if (i >= SAMPLE_ARRAY_SIZE)
844                         i -= SAMPLE_ARRAY_SIZE;
845                 }
846                 av_rdft_calc(s->rdft, data[ch]);
847             }
848             //least efficient way to do this, we should of course directly access it but its more than fast enough
849             for(y=0; y<s->height; y++){
850                 double w= 1/sqrt(nb_freq);
851                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
852                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
853                        + data[1][2*y+1]*data[1][2*y+1])) : a;
854                 a= FFMIN(a,255);
855                 b= FFMIN(b,255);
856                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
857
858                 fill_rectangle(screen,
859                             s->xpos, s->height-y, 1, 1,
860                             fgcolor);
861             }
862         }
863         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
864         s->xpos++;
865         if(s->xpos >= s->width)
866             s->xpos= s->xleft;
867     }
868 }
869
870 static void stream_close(VideoState *is)
871 {
872     VideoPicture *vp;
873     int i;
874     /* XXX: use a special url_shutdown call to abort parse cleanly */
875     is->abort_request = 1;
876     SDL_WaitThread(is->read_tid, NULL);
877     SDL_WaitThread(is->refresh_tid, NULL);
878
879     /* free all pictures */
880     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
881         vp = &is->pictq[i];
882 #if CONFIG_AVFILTER
883         if (vp->picref) {
884             avfilter_unref_buffer(vp->picref);
885             vp->picref = NULL;
886         }
887 #endif
888         if (vp->bmp) {
889             SDL_FreeYUVOverlay(vp->bmp);
890             vp->bmp = NULL;
891         }
892     }
893     SDL_DestroyMutex(is->pictq_mutex);
894     SDL_DestroyCond(is->pictq_cond);
895     SDL_DestroyMutex(is->subpq_mutex);
896     SDL_DestroyCond(is->subpq_cond);
897 #if !CONFIG_AVFILTER
898     if (is->img_convert_ctx)
899         sws_freeContext(is->img_convert_ctx);
900 #endif
901     av_free(is);
902 }
903
904 static void do_exit(VideoState *is)
905 {
906     if (is) {
907         stream_close(is);
908     }
909     av_lockmgr_register(NULL);
910     uninit_opts();
911 #if CONFIG_AVFILTER
912     avfilter_uninit();
913 #endif
914     if (show_status)
915         printf("\n");
916     SDL_Quit();
917     av_log(NULL, AV_LOG_QUIET, "%s", "");
918     exit(0);
919 }
920
921 static int video_open(VideoState *is){
922     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
923     int w,h;
924
925     if(is_full_screen) flags |= SDL_FULLSCREEN;
926     else               flags |= SDL_RESIZABLE;
927
928     if (is_full_screen && fs_screen_width) {
929         w = fs_screen_width;
930         h = fs_screen_height;
931     } else if(!is_full_screen && screen_width){
932         w = screen_width;
933         h = screen_height;
934 #if CONFIG_AVFILTER
935     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
936         w = is->out_video_filter->inputs[0]->w;
937         h = is->out_video_filter->inputs[0]->h;
938 #else
939     }else if (is->video_st && is->video_st->codec->width){
940         w = is->video_st->codec->width;
941         h = is->video_st->codec->height;
942 #endif
943     } else {
944         w = 640;
945         h = 480;
946     }
947     if(screen && is->width == screen->w && screen->w == w
948        && is->height== screen->h && screen->h == h)
949         return 0;
950
951 #ifndef __APPLE__
952     screen = SDL_SetVideoMode(w, h, 0, flags);
953 #else
954     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
955     screen = SDL_SetVideoMode(w, h, 24, flags);
956 #endif
957     if (!screen) {
958         fprintf(stderr, "SDL: could not set video mode - exiting\n");
959         do_exit(is);
960     }
961     if (!window_title)
962         window_title = input_filename;
963     SDL_WM_SetCaption(window_title, window_title);
964
965     is->width = screen->w;
966     is->height = screen->h;
967
968     return 0;
969 }
970
971 /* display the current picture, if any */
972 static void video_display(VideoState *is)
973 {
974     if(!screen)
975         video_open(is);
976     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
977         video_audio_display(is);
978     else if (is->video_st)
979         video_image_display(is);
980 }
981
982 static int refresh_thread(void *opaque)
983 {
984     VideoState *is= opaque;
985     while(!is->abort_request){
986         SDL_Event event;
987         event.type = FF_REFRESH_EVENT;
988         event.user.data1 = opaque;
989         if(!is->refresh){
990             is->refresh=1;
991             SDL_PushEvent(&event);
992         }
993         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
994         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
995     }
996     return 0;
997 }
998
999 /* get the current audio clock value */
1000 static double get_audio_clock(VideoState *is)
1001 {
1002     if (is->paused) {
1003         return is->audio_current_pts;
1004     } else {
1005         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1006     }
1007 }
1008
1009 /* get the current video clock value */
1010 static double get_video_clock(VideoState *is)
1011 {
1012     if (is->paused) {
1013         return is->video_current_pts;
1014     } else {
1015         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1016     }
1017 }
1018
1019 /* get the current external clock value */
1020 static double get_external_clock(VideoState *is)
1021 {
1022     int64_t ti;
1023     ti = av_gettime();
1024     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1025 }
1026
1027 /* get the current master clock value */
1028 static double get_master_clock(VideoState *is)
1029 {
1030     double val;
1031
1032     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1033         if (is->video_st)
1034             val = get_video_clock(is);
1035         else
1036             val = get_audio_clock(is);
1037     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1038         if (is->audio_st)
1039             val = get_audio_clock(is);
1040         else
1041             val = get_video_clock(is);
1042     } else {
1043         val = get_external_clock(is);
1044     }
1045     return val;
1046 }
1047
1048 /* seek in the stream */
1049 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1050 {
1051     if (!is->seek_req) {
1052         is->seek_pos = pos;
1053         is->seek_rel = rel;
1054         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1055         if (seek_by_bytes)
1056             is->seek_flags |= AVSEEK_FLAG_BYTE;
1057         is->seek_req = 1;
1058     }
1059 }
1060
1061 /* pause or resume the video */
1062 static void stream_toggle_pause(VideoState *is)
1063 {
1064     if (is->paused) {
1065         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1066         if(is->read_pause_return != AVERROR(ENOSYS)){
1067             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1068         }
1069         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1070     }
1071     is->paused = !is->paused;
1072 }
1073
1074 static double compute_target_time(double frame_current_pts, VideoState *is)
1075 {
1076     double delay, sync_threshold, diff;
1077
1078     /* compute nominal delay */
1079     delay = frame_current_pts - is->frame_last_pts;
1080     if (delay <= 0 || delay >= 10.0) {
1081         /* if incorrect delay, use previous one */
1082         delay = is->frame_last_delay;
1083     } else {
1084         is->frame_last_delay = delay;
1085     }
1086     is->frame_last_pts = frame_current_pts;
1087
1088     /* update delay to follow master synchronisation source */
1089     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1090          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1091         /* if video is slave, we try to correct big delays by
1092            duplicating or deleting a frame */
1093         diff = get_video_clock(is) - get_master_clock(is);
1094
1095         /* skip or repeat frame. We take into account the
1096            delay to compute the threshold. I still don't know
1097            if it is the best guess */
1098         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1099         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1100             if (diff <= -sync_threshold)
1101                 delay = 0;
1102             else if (diff >= sync_threshold)
1103                 delay = 2 * delay;
1104         }
1105     }
1106     is->frame_timer += delay;
1107
1108     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1109             delay, frame_current_pts, -diff);
1110
1111     return is->frame_timer;
1112 }
1113
1114 /* called to display each frame */
1115 static void video_refresh(void *opaque)
1116 {
1117     VideoState *is = opaque;
1118     VideoPicture *vp;
1119
1120     SubPicture *sp, *sp2;
1121
1122     if (is->video_st) {
1123 retry:
1124         if (is->pictq_size == 0) {
1125             //nothing to do, no picture to display in the que
1126         } else {
1127             double time= av_gettime()/1000000.0;
1128             double next_target;
1129             /* dequeue the picture */
1130             vp = &is->pictq[is->pictq_rindex];
1131
1132             if(time < vp->target_clock)
1133                 return;
1134             /* update current video pts */
1135             is->video_current_pts = vp->pts;
1136             is->video_current_pts_drift = is->video_current_pts - time;
1137             is->video_current_pos = vp->pos;
1138             if(is->pictq_size > 1){
1139                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1140                 assert(nextvp->target_clock >= vp->target_clock);
1141                 next_target= nextvp->target_clock;
1142             }else{
1143                 next_target= vp->target_clock + vp->duration;
1144             }
1145             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1146                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1147                 if(is->pictq_size > 1){
1148                     /* update queue size and signal for next picture */
1149                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1150                         is->pictq_rindex = 0;
1151
1152                     SDL_LockMutex(is->pictq_mutex);
1153                     is->pictq_size--;
1154                     SDL_CondSignal(is->pictq_cond);
1155                     SDL_UnlockMutex(is->pictq_mutex);
1156                     goto retry;
1157                 }
1158             }
1159
1160             if(is->subtitle_st) {
1161                 if (is->subtitle_stream_changed) {
1162                     SDL_LockMutex(is->subpq_mutex);
1163
1164                     while (is->subpq_size) {
1165                         free_subpicture(&is->subpq[is->subpq_rindex]);
1166
1167                         /* update queue size and signal for next picture */
1168                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1169                             is->subpq_rindex = 0;
1170
1171                         is->subpq_size--;
1172                     }
1173                     is->subtitle_stream_changed = 0;
1174
1175                     SDL_CondSignal(is->subpq_cond);
1176                     SDL_UnlockMutex(is->subpq_mutex);
1177                 } else {
1178                     if (is->subpq_size > 0) {
1179                         sp = &is->subpq[is->subpq_rindex];
1180
1181                         if (is->subpq_size > 1)
1182                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1183                         else
1184                             sp2 = NULL;
1185
1186                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1187                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1188                         {
1189                             free_subpicture(sp);
1190
1191                             /* update queue size and signal for next picture */
1192                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1193                                 is->subpq_rindex = 0;
1194
1195                             SDL_LockMutex(is->subpq_mutex);
1196                             is->subpq_size--;
1197                             SDL_CondSignal(is->subpq_cond);
1198                             SDL_UnlockMutex(is->subpq_mutex);
1199                         }
1200                     }
1201                 }
1202             }
1203
1204             /* display picture */
1205             if (!display_disable)
1206                 video_display(is);
1207
1208             /* update queue size and signal for next picture */
1209             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1210                 is->pictq_rindex = 0;
1211
1212             SDL_LockMutex(is->pictq_mutex);
1213             is->pictq_size--;
1214             SDL_CondSignal(is->pictq_cond);
1215             SDL_UnlockMutex(is->pictq_mutex);
1216         }
1217     } else if (is->audio_st) {
1218         /* draw the next audio frame */
1219
1220         /* if only audio stream, then display the audio bars (better
1221            than nothing, just to test the implementation */
1222
1223         /* display picture */
1224         if (!display_disable)
1225             video_display(is);
1226     }
1227     if (show_status) {
1228         static int64_t last_time;
1229         int64_t cur_time;
1230         int aqsize, vqsize, sqsize;
1231         double av_diff;
1232
1233         cur_time = av_gettime();
1234         if (!last_time || (cur_time - last_time) >= 30000) {
1235             aqsize = 0;
1236             vqsize = 0;
1237             sqsize = 0;
1238             if (is->audio_st)
1239                 aqsize = is->audioq.size;
1240             if (is->video_st)
1241                 vqsize = is->videoq.size;
1242             if (is->subtitle_st)
1243                 sqsize = is->subtitleq.size;
1244             av_diff = 0;
1245             if (is->audio_st && is->video_st)
1246                 av_diff = get_audio_clock(is) - get_video_clock(is);
1247             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1248                    get_master_clock(is),
1249                    av_diff,
1250                    FFMAX(is->skip_frames-1, 0),
1251                    aqsize / 1024,
1252                    vqsize / 1024,
1253                    sqsize,
1254                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1255                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1256             fflush(stdout);
1257             last_time = cur_time;
1258         }
1259     }
1260 }
1261
1262 /* allocate a picture (needs to do that in main thread to avoid
1263    potential locking problems */
1264 static void alloc_picture(void *opaque)
1265 {
1266     VideoState *is = opaque;
1267     VideoPicture *vp;
1268
1269     vp = &is->pictq[is->pictq_windex];
1270
1271     if (vp->bmp)
1272         SDL_FreeYUVOverlay(vp->bmp);
1273
1274 #if CONFIG_AVFILTER
1275     if (vp->picref)
1276         avfilter_unref_buffer(vp->picref);
1277     vp->picref = NULL;
1278
1279     vp->width   = is->out_video_filter->inputs[0]->w;
1280     vp->height  = is->out_video_filter->inputs[0]->h;
1281     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1282 #else
1283     vp->width   = is->video_st->codec->width;
1284     vp->height  = is->video_st->codec->height;
1285     vp->pix_fmt = is->video_st->codec->pix_fmt;
1286 #endif
1287
1288     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1289                                    SDL_YV12_OVERLAY,
1290                                    screen);
1291     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1292         /* SDL allocates a buffer smaller than requested if the video
1293          * overlay hardware is unable to support the requested size. */
1294         fprintf(stderr, "Error: the video system does not support an image\n"
1295                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1296                         "to reduce the image size.\n", vp->width, vp->height );
1297         do_exit(is);
1298     }
1299
1300     SDL_LockMutex(is->pictq_mutex);
1301     vp->allocated = 1;
1302     SDL_CondSignal(is->pictq_cond);
1303     SDL_UnlockMutex(is->pictq_mutex);
1304 }
1305
1306 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1307 {
1308     VideoPicture *vp;
1309     double frame_delay, pts = pts1;
1310
1311     /* compute the exact PTS for the picture if it is omitted in the stream
1312      * pts1 is the dts of the pkt / pts of the frame */
1313     if (pts != 0) {
1314         /* update video clock with pts, if present */
1315         is->video_clock = pts;
1316     } else {
1317         pts = is->video_clock;
1318     }
1319     /* update video clock for next frame */
1320     frame_delay = av_q2d(is->video_st->codec->time_base);
1321     /* for MPEG2, the frame can be repeated, so we update the
1322        clock accordingly */
1323     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1324     is->video_clock += frame_delay;
1325
1326 #if defined(DEBUG_SYNC) && 0
1327     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1328            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1329 #endif
1330
1331     /* wait until we have space to put a new picture */
1332     SDL_LockMutex(is->pictq_mutex);
1333
1334     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1335         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1336
1337     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1338            !is->videoq.abort_request) {
1339         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1340     }
1341     SDL_UnlockMutex(is->pictq_mutex);
1342
1343     if (is->videoq.abort_request)
1344         return -1;
1345
1346     vp = &is->pictq[is->pictq_windex];
1347
1348     vp->duration = frame_delay;
1349
1350     /* alloc or resize hardware picture buffer */
1351     if (!vp->bmp ||
1352 #if CONFIG_AVFILTER
1353         vp->width  != is->out_video_filter->inputs[0]->w ||
1354         vp->height != is->out_video_filter->inputs[0]->h) {
1355 #else
1356         vp->width != is->video_st->codec->width ||
1357         vp->height != is->video_st->codec->height) {
1358 #endif
1359         SDL_Event event;
1360
1361         vp->allocated = 0;
1362
1363         /* the allocation must be done in the main thread to avoid
1364            locking problems */
1365         event.type = FF_ALLOC_EVENT;
1366         event.user.data1 = is;
1367         SDL_PushEvent(&event);
1368
1369         /* wait until the picture is allocated */
1370         SDL_LockMutex(is->pictq_mutex);
1371         while (!vp->allocated && !is->videoq.abort_request) {
1372             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1373         }
1374         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1375         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1376             while (!vp->allocated) {
1377                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1378             }
1379         }
1380         SDL_UnlockMutex(is->pictq_mutex);
1381
1382         if (is->videoq.abort_request)
1383             return -1;
1384     }
1385
1386     /* if the frame is not skipped, then display it */
1387     if (vp->bmp) {
1388         AVPicture pict;
1389 #if CONFIG_AVFILTER
1390         if(vp->picref)
1391             avfilter_unref_buffer(vp->picref);
1392         vp->picref = src_frame->opaque;
1393 #endif
1394
1395         /* get a pointer on the bitmap */
1396         SDL_LockYUVOverlay (vp->bmp);
1397
1398         memset(&pict,0,sizeof(AVPicture));
1399         pict.data[0] = vp->bmp->pixels[0];
1400         pict.data[1] = vp->bmp->pixels[2];
1401         pict.data[2] = vp->bmp->pixels[1];
1402
1403         pict.linesize[0] = vp->bmp->pitches[0];
1404         pict.linesize[1] = vp->bmp->pitches[2];
1405         pict.linesize[2] = vp->bmp->pitches[1];
1406
1407 #if CONFIG_AVFILTER
1408         //FIXME use direct rendering
1409         av_picture_copy(&pict, (AVPicture *)src_frame,
1410                         vp->pix_fmt, vp->width, vp->height);
1411 #else
1412         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1413         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1414             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1415             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1416         if (is->img_convert_ctx == NULL) {
1417             fprintf(stderr, "Cannot initialize the conversion context\n");
1418             exit(1);
1419         }
1420         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1421                   0, vp->height, pict.data, pict.linesize);
1422 #endif
1423         /* update the bitmap content */
1424         SDL_UnlockYUVOverlay(vp->bmp);
1425
1426         vp->pts = pts;
1427         vp->pos = pos;
1428
1429         /* now we can update the picture count */
1430         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1431             is->pictq_windex = 0;
1432         SDL_LockMutex(is->pictq_mutex);
1433         vp->target_clock= compute_target_time(vp->pts, is);
1434
1435         is->pictq_size++;
1436         SDL_UnlockMutex(is->pictq_mutex);
1437     }
1438     return 0;
1439 }
1440
1441 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1442 {
1443     int got_picture, i;
1444
1445     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1446         return -1;
1447
1448     if (pkt->data == flush_pkt.data) {
1449         avcodec_flush_buffers(is->video_st->codec);
1450
1451         SDL_LockMutex(is->pictq_mutex);
1452         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1453         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1454             is->pictq[i].target_clock= 0;
1455         }
1456         while (is->pictq_size && !is->videoq.abort_request) {
1457             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1458         }
1459         is->video_current_pos = -1;
1460         SDL_UnlockMutex(is->pictq_mutex);
1461
1462         is->frame_last_pts = AV_NOPTS_VALUE;
1463         is->frame_last_delay = 0;
1464         is->frame_timer = (double)av_gettime() / 1000000.0;
1465         is->skip_frames = 1;
1466         is->skip_frames_index = 0;
1467         return 0;
1468     }
1469
1470     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1471
1472     if (got_picture) {
1473         if (decoder_reorder_pts == -1) {
1474             *pts = frame->best_effort_timestamp;
1475         } else if (decoder_reorder_pts) {
1476             *pts = frame->pkt_pts;
1477         } else {
1478             *pts = frame->pkt_dts;
1479         }
1480
1481         if (*pts == AV_NOPTS_VALUE) {
1482             *pts = 0;
1483         }
1484
1485         is->skip_frames_index += 1;
1486         if(is->skip_frames_index >= is->skip_frames){
1487             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1488             return 1;
1489         }
1490
1491     }
1492     return 0;
1493 }
1494
1495 #if CONFIG_AVFILTER
1496 typedef struct {
1497     VideoState *is;
1498     AVFrame *frame;
1499     int use_dr1;
1500 } FilterPriv;
1501
1502 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1503 {
1504     AVFilterContext *ctx = codec->opaque;
1505     AVFilterBufferRef  *ref;
1506     int perms = AV_PERM_WRITE;
1507     int i, w, h, stride[4];
1508     unsigned edge;
1509     int pixel_size;
1510
1511     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1512
1513     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1514         perms |= AV_PERM_NEG_LINESIZES;
1515
1516     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1517         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1518         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1519         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1520     }
1521     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1522
1523     w = codec->width;
1524     h = codec->height;
1525
1526     if(av_image_check_size(w, h, 0, codec))
1527         return -1;
1528
1529     avcodec_align_dimensions2(codec, &w, &h, stride);
1530     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1531     w += edge << 1;
1532     h += edge << 1;
1533
1534     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1535         return -1;
1536
1537     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1538     ref->video->w = codec->width;
1539     ref->video->h = codec->height;
1540     for(i = 0; i < 4; i ++) {
1541         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1542         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1543
1544         if (ref->data[i]) {
1545             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1546         }
1547         pic->data[i]     = ref->data[i];
1548         pic->linesize[i] = ref->linesize[i];
1549     }
1550     pic->opaque = ref;
1551     pic->age    = INT_MAX;
1552     pic->type   = FF_BUFFER_TYPE_USER;
1553     pic->reordered_opaque = codec->reordered_opaque;
1554     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1555     else           pic->pkt_pts = AV_NOPTS_VALUE;
1556     return 0;
1557 }
1558
1559 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1560 {
1561     memset(pic->data, 0, sizeof(pic->data));
1562     avfilter_unref_buffer(pic->opaque);
1563 }
1564
1565 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1566 {
1567     AVFilterBufferRef *ref = pic->opaque;
1568
1569     if (pic->data[0] == NULL) {
1570         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1571         return codec->get_buffer(codec, pic);
1572     }
1573
1574     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1575         (codec->pix_fmt != ref->format)) {
1576         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1577         return -1;
1578     }
1579
1580     pic->reordered_opaque = codec->reordered_opaque;
1581     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1582     else           pic->pkt_pts = AV_NOPTS_VALUE;
1583     return 0;
1584 }
1585
1586 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1587 {
1588     FilterPriv *priv = ctx->priv;
1589     AVCodecContext *codec;
1590     if(!opaque) return -1;
1591
1592     priv->is = opaque;
1593     codec    = priv->is->video_st->codec;
1594     codec->opaque = ctx;
1595     if((codec->codec->capabilities & CODEC_CAP_DR1)
1596     ) {
1597         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1598         priv->use_dr1 = 1;
1599         codec->get_buffer     = input_get_buffer;
1600         codec->release_buffer = input_release_buffer;
1601         codec->reget_buffer   = input_reget_buffer;
1602         codec->thread_safe_callbacks = 1;
1603     }
1604
1605     priv->frame = avcodec_alloc_frame();
1606
1607     return 0;
1608 }
1609
1610 static void input_uninit(AVFilterContext *ctx)
1611 {
1612     FilterPriv *priv = ctx->priv;
1613     av_free(priv->frame);
1614 }
1615
1616 static int input_request_frame(AVFilterLink *link)
1617 {
1618     FilterPriv *priv = link->src->priv;
1619     AVFilterBufferRef *picref;
1620     int64_t pts = 0;
1621     AVPacket pkt;
1622     int ret;
1623
1624     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1625         av_free_packet(&pkt);
1626     if (ret < 0)
1627         return -1;
1628
1629     if(priv->use_dr1 && priv->frame->opaque) {
1630         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1631     } else {
1632         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1633         av_image_copy(picref->data, picref->linesize,
1634                       priv->frame->data, priv->frame->linesize,
1635                       picref->format, link->w, link->h);
1636     }
1637     av_free_packet(&pkt);
1638
1639     avfilter_copy_frame_props(picref, priv->frame);
1640     picref->pts = pts;
1641
1642     avfilter_start_frame(link, picref);
1643     avfilter_draw_slice(link, 0, link->h, 1);
1644     avfilter_end_frame(link);
1645
1646     return 0;
1647 }
1648
1649 static int input_query_formats(AVFilterContext *ctx)
1650 {
1651     FilterPriv *priv = ctx->priv;
1652     enum PixelFormat pix_fmts[] = {
1653         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1654     };
1655
1656     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1657     return 0;
1658 }
1659
1660 static int input_config_props(AVFilterLink *link)
1661 {
1662     FilterPriv *priv  = link->src->priv;
1663     AVStream *s = priv->is->video_st;
1664
1665     link->w = s->codec->width;
1666     link->h = s->codec->height;
1667     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1668         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1669     link->time_base = s->time_base;
1670
1671     return 0;
1672 }
1673
1674 static AVFilter input_filter =
1675 {
1676     .name      = "ffplay_input",
1677
1678     .priv_size = sizeof(FilterPriv),
1679
1680     .init      = input_init,
1681     .uninit    = input_uninit,
1682
1683     .query_formats = input_query_formats,
1684
1685     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1686     .outputs   = (AVFilterPad[]) {{ .name = "default",
1687                                     .type = AVMEDIA_TYPE_VIDEO,
1688                                     .request_frame = input_request_frame,
1689                                     .config_props  = input_config_props, },
1690                                   { .name = NULL }},
1691 };
1692
1693 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1694 {
1695     char sws_flags_str[128];
1696     int ret;
1697     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1698     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1699     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1700     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1701     graph->scale_sws_opts = av_strdup(sws_flags_str);
1702
1703     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1704                                             NULL, is, graph)) < 0)
1705         return ret;
1706 #if FF_API_OLD_VSINK_API
1707     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1708                                        NULL, pix_fmts, graph);
1709 #else
1710     buffersink_params->pixel_fmts = pix_fmts;
1711     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1712                                        NULL, buffersink_params, graph);
1713 #endif
1714     av_freep(&buffersink_params);
1715     if (ret < 0)
1716         return ret;
1717
1718     if(vfilters) {
1719         AVFilterInOut *outputs = avfilter_inout_alloc();
1720         AVFilterInOut *inputs  = avfilter_inout_alloc();
1721
1722         outputs->name    = av_strdup("in");
1723         outputs->filter_ctx = filt_src;
1724         outputs->pad_idx = 0;
1725         outputs->next    = NULL;
1726
1727         inputs->name    = av_strdup("out");
1728         inputs->filter_ctx = filt_out;
1729         inputs->pad_idx = 0;
1730         inputs->next    = NULL;
1731
1732         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1733             return ret;
1734     } else {
1735         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1736             return ret;
1737     }
1738
1739     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1740         return ret;
1741
1742     is->out_video_filter = filt_out;
1743
1744     return ret;
1745 }
1746
1747 #endif  /* CONFIG_AVFILTER */
1748
1749 static int video_thread(void *arg)
1750 {
1751     VideoState *is = arg;
1752     AVFrame *frame= avcodec_alloc_frame();
1753     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1754     double pts;
1755     int ret;
1756
1757 #if CONFIG_AVFILTER
1758     AVFilterGraph *graph = avfilter_graph_alloc();
1759     AVFilterContext *filt_out = NULL;
1760     int last_w = is->video_st->codec->width;
1761     int last_h = is->video_st->codec->height;
1762
1763     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1764         goto the_end;
1765     filt_out = is->out_video_filter;
1766 #endif
1767
1768     for(;;) {
1769 #if !CONFIG_AVFILTER
1770         AVPacket pkt;
1771 #else
1772         AVFilterBufferRef *picref;
1773         AVRational tb = filt_out->inputs[0]->time_base;
1774 #endif
1775         while (is->paused && !is->videoq.abort_request)
1776             SDL_Delay(10);
1777 #if CONFIG_AVFILTER
1778         if (   last_w != is->video_st->codec->width
1779             || last_h != is->video_st->codec->height) {
1780             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1781                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1782             avfilter_graph_free(&graph);
1783             graph = avfilter_graph_alloc();
1784             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1785                 goto the_end;
1786             filt_out = is->out_video_filter;
1787             last_w = is->video_st->codec->width;
1788             last_h = is->video_st->codec->height;
1789         }
1790         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1791         if (picref) {
1792             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1793             pts_int = picref->pts;
1794             pos     = picref->pos;
1795             frame->opaque = picref;
1796         }
1797
1798         if (av_cmp_q(tb, is->video_st->time_base)) {
1799             av_unused int64_t pts1 = pts_int;
1800             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1801             av_dlog(NULL, "video_thread(): "
1802                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1803                     tb.num, tb.den, pts1,
1804                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1805         }
1806 #else
1807         ret = get_video_frame(is, frame, &pts_int, &pkt);
1808         pos = pkt.pos;
1809         av_free_packet(&pkt);
1810 #endif
1811
1812         if (ret < 0) goto the_end;
1813
1814 #if CONFIG_AVFILTER
1815         if (!picref)
1816             continue;
1817 #endif
1818
1819         pts = pts_int*av_q2d(is->video_st->time_base);
1820
1821         ret = queue_picture(is, frame, pts, pos);
1822
1823         if (ret < 0)
1824             goto the_end;
1825
1826         if (is->step)
1827             stream_toggle_pause(is);
1828     }
1829  the_end:
1830 #if CONFIG_AVFILTER
1831     avfilter_graph_free(&graph);
1832 #endif
1833     av_free(frame);
1834     return 0;
1835 }
1836
1837 static int subtitle_thread(void *arg)
1838 {
1839     VideoState *is = arg;
1840     SubPicture *sp;
1841     AVPacket pkt1, *pkt = &pkt1;
1842     int got_subtitle;
1843     double pts;
1844     int i, j;
1845     int r, g, b, y, u, v, a;
1846
1847     for(;;) {
1848         while (is->paused && !is->subtitleq.abort_request) {
1849             SDL_Delay(10);
1850         }
1851         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1852             break;
1853
1854         if(pkt->data == flush_pkt.data){
1855             avcodec_flush_buffers(is->subtitle_st->codec);
1856             continue;
1857         }
1858         SDL_LockMutex(is->subpq_mutex);
1859         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1860                !is->subtitleq.abort_request) {
1861             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1862         }
1863         SDL_UnlockMutex(is->subpq_mutex);
1864
1865         if (is->subtitleq.abort_request)
1866             return 0;
1867
1868         sp = &is->subpq[is->subpq_windex];
1869
1870        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1871            this packet, if any */
1872         pts = 0;
1873         if (pkt->pts != AV_NOPTS_VALUE)
1874             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1875
1876         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1877                                  &got_subtitle, pkt);
1878
1879         if (got_subtitle && sp->sub.format == 0) {
1880             sp->pts = pts;
1881
1882             for (i = 0; i < sp->sub.num_rects; i++)
1883             {
1884                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1885                 {
1886                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1887                     y = RGB_TO_Y_CCIR(r, g, b);
1888                     u = RGB_TO_U_CCIR(r, g, b, 0);
1889                     v = RGB_TO_V_CCIR(r, g, b, 0);
1890                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1891                 }
1892             }
1893
1894             /* now we can update the picture count */
1895             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1896                 is->subpq_windex = 0;
1897             SDL_LockMutex(is->subpq_mutex);
1898             is->subpq_size++;
1899             SDL_UnlockMutex(is->subpq_mutex);
1900         }
1901         av_free_packet(pkt);
1902     }
1903     return 0;
1904 }
1905
1906 /* copy samples for viewing in editor window */
1907 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1908 {
1909     int size, len;
1910
1911     size = samples_size / sizeof(short);
1912     while (size > 0) {
1913         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1914         if (len > size)
1915             len = size;
1916         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1917         samples += len;
1918         is->sample_array_index += len;
1919         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1920             is->sample_array_index = 0;
1921         size -= len;
1922     }
1923 }
1924
1925 /* return the new audio buffer size (samples can be added or deleted
1926    to get better sync if video or external master clock) */
1927 static int synchronize_audio(VideoState *is, short *samples,
1928                              int samples_size1, double pts)
1929 {
1930     int n, samples_size;
1931     double ref_clock;
1932
1933     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1934     samples_size = samples_size1;
1935
1936     /* if not master, then we try to remove or add samples to correct the clock */
1937     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1938          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1939         double diff, avg_diff;
1940         int wanted_size, min_size, max_size, nb_samples;
1941
1942         ref_clock = get_master_clock(is);
1943         diff = get_audio_clock(is) - ref_clock;
1944
1945         if (diff < AV_NOSYNC_THRESHOLD) {
1946             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1947             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1948                 /* not enough measures to have a correct estimate */
1949                 is->audio_diff_avg_count++;
1950             } else {
1951                 /* estimate the A-V difference */
1952                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1953
1954                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1955                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1956                     nb_samples = samples_size / n;
1957
1958                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1959                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1960                     if (wanted_size < min_size)
1961                         wanted_size = min_size;
1962                     else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
1963                         wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
1964
1965                     /* add or remove samples to correction the synchro */
1966                     if (wanted_size < samples_size) {
1967                         /* remove samples */
1968                         samples_size = wanted_size;
1969                     } else if (wanted_size > samples_size) {
1970                         uint8_t *samples_end, *q;
1971                         int nb;
1972
1973                         /* add samples */
1974                         nb = (samples_size - wanted_size);
1975                         samples_end = (uint8_t *)samples + samples_size - n;
1976                         q = samples_end + n;
1977                         while (nb > 0) {
1978                             memcpy(q, samples_end, n);
1979                             q += n;
1980                             nb -= n;
1981                         }
1982                         samples_size = wanted_size;
1983                     }
1984                 }
1985                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1986                         diff, avg_diff, samples_size - samples_size1,
1987                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1988             }
1989         } else {
1990             /* too big difference : may be initial PTS errors, so
1991                reset A-V filter */
1992             is->audio_diff_avg_count = 0;
1993             is->audio_diff_cum = 0;
1994         }
1995     }
1996
1997     return samples_size;
1998 }
1999
2000 /* decode one audio frame and returns its uncompressed size */
2001 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2002 {
2003     AVPacket *pkt_temp = &is->audio_pkt_temp;
2004     AVPacket *pkt = &is->audio_pkt;
2005     AVCodecContext *dec= is->audio_st->codec;
2006     int len1, len2, data_size, resampled_data_size;
2007     int64_t dec_channel_layout;
2008     double pts;
2009     int new_packet = 0;
2010     int flush_complete = 0;
2011
2012     for(;;) {
2013         /* NOTE: the audio packet can contain several frames */
2014         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2015             if (flush_complete)
2016                 break;
2017             new_packet = 0;
2018             data_size = sizeof(is->audio_buf1);
2019             len1 = avcodec_decode_audio3(dec,
2020                                         (int16_t *)is->audio_buf1, &data_size,
2021                                         pkt_temp);
2022             if (len1 < 0) {
2023                 /* if error, we skip the frame */
2024                 pkt_temp->size = 0;
2025                 break;
2026             }
2027
2028             pkt_temp->data += len1;
2029             pkt_temp->size -= len1;
2030
2031             if (data_size <= 0) {
2032                 /* stop sending empty packets if the decoder is finished */
2033                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2034                     flush_complete = 1;
2035                 continue;
2036             }
2037
2038             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2039
2040             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2041                 if (is->swr_ctx)
2042                     swr_free(&is->swr_ctx);
2043                 is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2044                                                dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
2045                                                0, NULL);
2046                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2047                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2048                         dec->sample_rate,
2049                         av_get_sample_fmt_name(dec->sample_fmt),
2050                         dec->channels,
2051                         is->audio_tgt_freq,
2052                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2053                         is->audio_tgt_channels);
2054                     break;
2055                 }
2056                 is->audio_src_channel_layout = dec_channel_layout;
2057                 is->audio_src_channels = dec->channels;
2058                 is->audio_src_freq = dec->sample_rate;
2059                 is->audio_src_fmt = dec->sample_fmt;
2060             }
2061
2062             resampled_data_size = data_size;
2063             if (is->swr_ctx) {
2064                 const uint8_t *in[] = {is->audio_buf1};
2065                 uint8_t *out[] = {is->audio_buf2};
2066                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2067                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2068                 if (len2 < 0) {
2069                     fprintf(stderr, "audio_resample() failed\n");
2070                     break;
2071                 }
2072                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2073                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2074                     swr_init(is->swr_ctx);
2075                 }
2076                 is->audio_buf = is->audio_buf2;
2077                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2078             } else {
2079                 is->audio_buf= is->audio_buf1;
2080             }
2081
2082             /* if no pts, then compute it */
2083             pts = is->audio_clock;
2084             *pts_ptr = pts;
2085             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2086 #ifdef DEBUG
2087             {
2088                 static double last_clock;
2089                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2090                        is->audio_clock - last_clock,
2091                        is->audio_clock, pts);
2092                 last_clock = is->audio_clock;
2093             }
2094 #endif
2095             return resampled_data_size;
2096         }
2097
2098         /* free the current packet */
2099         if (pkt->data)
2100             av_free_packet(pkt);
2101
2102         if (is->paused || is->audioq.abort_request) {
2103             return -1;
2104         }
2105
2106         /* read next packet */
2107         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2108             return -1;
2109
2110         if (pkt->data == flush_pkt.data)
2111             avcodec_flush_buffers(dec);
2112
2113         pkt_temp->data = pkt->data;
2114         pkt_temp->size = pkt->size;
2115
2116         /* if update the audio clock with the pts */
2117         if (pkt->pts != AV_NOPTS_VALUE) {
2118             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2119         }
2120     }
2121 }
2122
2123 /* prepare a new audio buffer */
2124 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2125 {
2126     VideoState *is = opaque;
2127     int audio_size, len1;
2128     int bytes_per_sec;
2129     double pts;
2130
2131     audio_callback_time = av_gettime();
2132
2133     while (len > 0) {
2134         if (is->audio_buf_index >= is->audio_buf_size) {
2135            audio_size = audio_decode_frame(is, &pts);
2136            if (audio_size < 0) {
2137                 /* if error, just output silence */
2138                is->audio_buf = is->audio_buf1;
2139                is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2140                memset(is->audio_buf, 0, is->audio_buf_size);
2141            } else {
2142                if (is->show_mode != SHOW_MODE_VIDEO)
2143                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2144                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2145                                               pts);
2146                is->audio_buf_size = audio_size;
2147            }
2148            is->audio_buf_index = 0;
2149         }
2150         len1 = is->audio_buf_size - is->audio_buf_index;
2151         if (len1 > len)
2152             len1 = len;
2153         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2154         len -= len1;
2155         stream += len1;
2156         is->audio_buf_index += len1;
2157     }
2158     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2159     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2160     /* Let's assume the audio driver that is used by SDL has two periods. */
2161     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2162     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2163 }
2164
2165 /* open a given stream. Return 0 if OK */
2166 static int stream_component_open(VideoState *is, int stream_index)
2167 {
2168     AVFormatContext *ic = is->ic;
2169     AVCodecContext *avctx;
2170     AVCodec *codec;
2171     SDL_AudioSpec wanted_spec, spec;
2172     AVDictionary *opts;
2173     AVDictionaryEntry *t = NULL;
2174     int64_t wanted_channel_layout = 0;
2175
2176     if (stream_index < 0 || stream_index >= ic->nb_streams)
2177         return -1;
2178     avctx = ic->streams[stream_index]->codec;
2179
2180     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2181
2182     codec = avcodec_find_decoder(avctx->codec_id);
2183     switch(avctx->codec_type){
2184         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2185         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2186         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2187     }
2188     if (!codec)
2189         return -1;
2190
2191     avctx->workaround_bugs = workaround_bugs;
2192     avctx->lowres = lowres;
2193     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2194     avctx->idct_algo= idct;
2195     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2196     avctx->skip_frame= skip_frame;
2197     avctx->skip_idct= skip_idct;
2198     avctx->skip_loop_filter= skip_loop_filter;
2199     avctx->error_recognition= error_recognition;
2200     avctx->error_concealment= error_concealment;
2201
2202     if(codec->capabilities & CODEC_CAP_DR1)
2203         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2204
2205     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2206         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2207         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2208         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2209         wanted_spec.freq = avctx->sample_rate;
2210         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2211             fprintf(stderr, "Invalid sample rate or channel count!\n");
2212             return -1;
2213         }
2214     }
2215
2216     if (!codec ||
2217         avcodec_open2(avctx, codec, &opts) < 0)
2218         return -1;
2219     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2220         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2221         return AVERROR_OPTION_NOT_FOUND;
2222     }
2223
2224     /* prepare audio output */
2225     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2226         wanted_spec.format = AUDIO_S16SYS;
2227         wanted_spec.silence = 0;
2228         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2229         wanted_spec.callback = sdl_audio_callback;
2230         wanted_spec.userdata = is;
2231         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2232             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2233             return -1;
2234         }
2235         is->audio_hw_buf_size = spec.size;
2236         if (spec.format != AUDIO_S16SYS) {
2237             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2238             return -1;
2239         }
2240         if (spec.channels != wanted_spec.channels) {
2241             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2242             if (!wanted_channel_layout) {
2243                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2244                 return -1;
2245             }
2246         }
2247         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2248         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2249         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2250         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2251     }
2252
2253     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2254     switch(avctx->codec_type) {
2255     case AVMEDIA_TYPE_AUDIO:
2256         is->audio_stream = stream_index;
2257         is->audio_st = ic->streams[stream_index];
2258         is->audio_buf_size = 0;
2259         is->audio_buf_index = 0;
2260
2261         /* init averaging filter */
2262         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2263         is->audio_diff_avg_count = 0;
2264         /* since we do not have a precise anough audio fifo fullness,
2265            we correct audio sync only if larger than this threshold */
2266         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2267
2268         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2269         packet_queue_init(&is->audioq);
2270         SDL_PauseAudio(0);
2271         break;
2272     case AVMEDIA_TYPE_VIDEO:
2273         is->video_stream = stream_index;
2274         is->video_st = ic->streams[stream_index];
2275
2276         packet_queue_init(&is->videoq);
2277         is->video_tid = SDL_CreateThread(video_thread, is);
2278         break;
2279     case AVMEDIA_TYPE_SUBTITLE:
2280         is->subtitle_stream = stream_index;
2281         is->subtitle_st = ic->streams[stream_index];
2282         packet_queue_init(&is->subtitleq);
2283
2284         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2285         break;
2286     default:
2287         break;
2288     }
2289     return 0;
2290 }
2291
2292 static void stream_component_close(VideoState *is, int stream_index)
2293 {
2294     AVFormatContext *ic = is->ic;
2295     AVCodecContext *avctx;
2296
2297     if (stream_index < 0 || stream_index >= ic->nb_streams)
2298         return;
2299     avctx = ic->streams[stream_index]->codec;
2300
2301     switch(avctx->codec_type) {
2302     case AVMEDIA_TYPE_AUDIO:
2303         packet_queue_abort(&is->audioq);
2304
2305         SDL_CloseAudio();
2306
2307         packet_queue_end(&is->audioq);
2308         if (is->swr_ctx)
2309             swr_free(&is->swr_ctx);
2310         break;
2311     case AVMEDIA_TYPE_VIDEO:
2312         packet_queue_abort(&is->videoq);
2313
2314         /* note: we also signal this mutex to make sure we deblock the
2315            video thread in all cases */
2316         SDL_LockMutex(is->pictq_mutex);
2317         SDL_CondSignal(is->pictq_cond);
2318         SDL_UnlockMutex(is->pictq_mutex);
2319
2320         SDL_WaitThread(is->video_tid, NULL);
2321
2322         packet_queue_end(&is->videoq);
2323         break;
2324     case AVMEDIA_TYPE_SUBTITLE:
2325         packet_queue_abort(&is->subtitleq);
2326
2327         /* note: we also signal this mutex to make sure we deblock the
2328            video thread in all cases */
2329         SDL_LockMutex(is->subpq_mutex);
2330         is->subtitle_stream_changed = 1;
2331
2332         SDL_CondSignal(is->subpq_cond);
2333         SDL_UnlockMutex(is->subpq_mutex);
2334
2335         SDL_WaitThread(is->subtitle_tid, NULL);
2336
2337         packet_queue_end(&is->subtitleq);
2338         break;
2339     default:
2340         break;
2341     }
2342
2343     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2344     avcodec_close(avctx);
2345     switch(avctx->codec_type) {
2346     case AVMEDIA_TYPE_AUDIO:
2347         is->audio_st = NULL;
2348         is->audio_stream = -1;
2349         break;
2350     case AVMEDIA_TYPE_VIDEO:
2351         is->video_st = NULL;
2352         is->video_stream = -1;
2353         break;
2354     case AVMEDIA_TYPE_SUBTITLE:
2355         is->subtitle_st = NULL;
2356         is->subtitle_stream = -1;
2357         break;
2358     default:
2359         break;
2360     }
2361 }
2362
2363 /* since we have only one decoding thread, we can use a global
2364    variable instead of a thread local variable */
2365 static VideoState *global_video_state;
2366
2367 static int decode_interrupt_cb(void)
2368 {
2369     return (global_video_state && global_video_state->abort_request);
2370 }
2371
2372 /* this thread gets the stream from the disk or the network */
2373 static int read_thread(void *arg)
2374 {
2375     VideoState *is = arg;
2376     AVFormatContext *ic = NULL;
2377     int err, i, ret;
2378     int st_index[AVMEDIA_TYPE_NB];
2379     AVPacket pkt1, *pkt = &pkt1;
2380     int eof=0;
2381     int pkt_in_play_range = 0;
2382     AVDictionaryEntry *t;
2383     AVDictionary **opts;
2384     int orig_nb_streams;
2385
2386     memset(st_index, -1, sizeof(st_index));
2387     is->video_stream = -1;
2388     is->audio_stream = -1;
2389     is->subtitle_stream = -1;
2390
2391     global_video_state = is;
2392     avio_set_interrupt_cb(decode_interrupt_cb);
2393
2394     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2395     if (err < 0) {
2396         print_error(is->filename, err);
2397         ret = -1;
2398         goto fail;
2399     }
2400     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2401         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2402         ret = AVERROR_OPTION_NOT_FOUND;
2403         goto fail;
2404     }
2405     is->ic = ic;
2406
2407     if(genpts)
2408         ic->flags |= AVFMT_FLAG_GENPTS;
2409
2410     av_dict_set(&codec_opts, "request_channels", "2", 0);
2411
2412     opts = setup_find_stream_info_opts(ic, codec_opts);
2413     orig_nb_streams = ic->nb_streams;
2414
2415     err = avformat_find_stream_info(ic, opts);
2416     if (err < 0) {
2417         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2418         ret = -1;
2419         goto fail;
2420     }
2421     for (i = 0; i < orig_nb_streams; i++)
2422         av_dict_free(&opts[i]);
2423     av_freep(&opts);
2424
2425     if(ic->pb)
2426         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2427
2428     if(seek_by_bytes<0)
2429         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2430
2431     /* if seeking requested, we execute it */
2432     if (start_time != AV_NOPTS_VALUE) {
2433         int64_t timestamp;
2434
2435         timestamp = start_time;
2436         /* add the stream start time */
2437         if (ic->start_time != AV_NOPTS_VALUE)
2438             timestamp += ic->start_time;
2439         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2440         if (ret < 0) {
2441             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2442                     is->filename, (double)timestamp / AV_TIME_BASE);
2443         }
2444     }
2445
2446     for (i = 0; i < ic->nb_streams; i++)
2447         ic->streams[i]->discard = AVDISCARD_ALL;
2448     if (!video_disable)
2449         st_index[AVMEDIA_TYPE_VIDEO] =
2450             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2451                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2452     if (!audio_disable)
2453         st_index[AVMEDIA_TYPE_AUDIO] =
2454             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2455                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2456                                 st_index[AVMEDIA_TYPE_VIDEO],
2457                                 NULL, 0);
2458     if (!video_disable)
2459         st_index[AVMEDIA_TYPE_SUBTITLE] =
2460             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2461                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2462                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2463                                  st_index[AVMEDIA_TYPE_AUDIO] :
2464                                  st_index[AVMEDIA_TYPE_VIDEO]),
2465                                 NULL, 0);
2466     if (show_status) {
2467         av_dump_format(ic, 0, is->filename, 0);
2468     }
2469
2470     is->show_mode = show_mode;
2471
2472     /* open the streams */
2473     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2474         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2475     }
2476
2477     ret=-1;
2478     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2479         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2480     }
2481     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2482     if (is->show_mode == SHOW_MODE_NONE)
2483         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2484
2485     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2486         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2487     }
2488
2489     if (is->video_stream < 0 && is->audio_stream < 0) {
2490         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2491         ret = -1;
2492         goto fail;
2493     }
2494
2495     for(;;) {
2496         if (is->abort_request)
2497             break;
2498         if (is->paused != is->last_paused) {
2499             is->last_paused = is->paused;
2500             if (is->paused)
2501                 is->read_pause_return= av_read_pause(ic);
2502             else
2503                 av_read_play(ic);
2504         }
2505 #if CONFIG_RTSP_DEMUXER
2506         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2507             /* wait 10 ms to avoid trying to get another packet */
2508             /* XXX: horrible */
2509             SDL_Delay(10);
2510             continue;
2511         }
2512 #endif
2513         if (is->seek_req) {
2514             int64_t seek_target= is->seek_pos;
2515             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2516             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2517 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2518 //      of the seek_pos/seek_rel variables
2519
2520             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2521             if (ret < 0) {
2522                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2523             }else{
2524                 if (is->audio_stream >= 0) {
2525                     packet_queue_flush(&is->audioq);
2526                     packet_queue_put(&is->audioq, &flush_pkt);
2527                 }
2528                 if (is->subtitle_stream >= 0) {
2529                     packet_queue_flush(&is->subtitleq);
2530                     packet_queue_put(&is->subtitleq, &flush_pkt);
2531                 }
2532                 if (is->video_stream >= 0) {
2533                     packet_queue_flush(&is->videoq);
2534                     packet_queue_put(&is->videoq, &flush_pkt);
2535                 }
2536             }
2537             is->seek_req = 0;
2538             eof= 0;
2539         }
2540
2541         /* if the queue are full, no need to read more */
2542         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2543             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2544                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2545                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2546             /* wait 10 ms */
2547             SDL_Delay(10);
2548             continue;
2549         }
2550         if(eof) {
2551             if(is->video_stream >= 0){
2552                 av_init_packet(pkt);
2553                 pkt->data=NULL;
2554                 pkt->size=0;
2555                 pkt->stream_index= is->video_stream;
2556                 packet_queue_put(&is->videoq, pkt);
2557             }
2558             if (is->audio_stream >= 0 &&
2559                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2560                 av_init_packet(pkt);
2561                 pkt->data = NULL;
2562                 pkt->size = 0;
2563                 pkt->stream_index = is->audio_stream;
2564                 packet_queue_put(&is->audioq, pkt);
2565             }
2566             SDL_Delay(10);
2567             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2568                 if(loop!=1 && (!loop || --loop)){
2569                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2570                 }else if(autoexit){
2571                     ret=AVERROR_EOF;
2572                     goto fail;
2573                 }
2574             }
2575             eof=0;
2576             continue;
2577         }
2578         ret = av_read_frame(ic, pkt);
2579         if (ret < 0) {
2580             if (ret == AVERROR_EOF || url_feof(ic->pb))
2581                 eof=1;
2582             if (ic->pb && ic->pb->error)
2583                 break;
2584             SDL_Delay(100); /* wait for user event */
2585             continue;
2586         }
2587         /* check if packet is in play range specified by user, then queue, otherwise discard */
2588         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2589                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2590                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2591                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2592                 <= ((double)duration/1000000);
2593         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2594             packet_queue_put(&is->audioq, pkt);
2595         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2596             packet_queue_put(&is->videoq, pkt);
2597         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2598             packet_queue_put(&is->subtitleq, pkt);
2599         } else {
2600             av_free_packet(pkt);
2601         }
2602     }
2603     /* wait until the end */
2604     while (!is->abort_request) {
2605         SDL_Delay(100);
2606     }
2607
2608     ret = 0;
2609  fail:
2610     /* disable interrupting */
2611     global_video_state = NULL;
2612
2613     /* close each stream */
2614     if (is->audio_stream >= 0)
2615         stream_component_close(is, is->audio_stream);
2616     if (is->video_stream >= 0)
2617         stream_component_close(is, is->video_stream);
2618     if (is->subtitle_stream >= 0)
2619         stream_component_close(is, is->subtitle_stream);
2620     if (is->ic) {
2621         av_close_input_file(is->ic);
2622         is->ic = NULL; /* safety */
2623     }
2624     avio_set_interrupt_cb(NULL);
2625
2626     if (ret != 0) {
2627         SDL_Event event;
2628
2629         event.type = FF_QUIT_EVENT;
2630         event.user.data1 = is;
2631         SDL_PushEvent(&event);
2632     }
2633     return 0;
2634 }
2635
2636 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2637 {
2638     VideoState *is;
2639
2640     is = av_mallocz(sizeof(VideoState));
2641     if (!is)
2642         return NULL;
2643     av_strlcpy(is->filename, filename, sizeof(is->filename));
2644     is->iformat = iformat;
2645     is->ytop = 0;
2646     is->xleft = 0;
2647
2648     /* start video display */
2649     is->pictq_mutex = SDL_CreateMutex();
2650     is->pictq_cond = SDL_CreateCond();
2651
2652     is->subpq_mutex = SDL_CreateMutex();
2653     is->subpq_cond = SDL_CreateCond();
2654
2655     is->av_sync_type = av_sync_type;
2656     is->read_tid = SDL_CreateThread(read_thread, is);
2657     if (!is->read_tid) {
2658         av_free(is);
2659         return NULL;
2660     }
2661     return is;
2662 }
2663
2664 static void stream_cycle_channel(VideoState *is, int codec_type)
2665 {
2666     AVFormatContext *ic = is->ic;
2667     int start_index, stream_index;
2668     AVStream *st;
2669
2670     if (codec_type == AVMEDIA_TYPE_VIDEO)
2671         start_index = is->video_stream;
2672     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2673         start_index = is->audio_stream;
2674     else
2675         start_index = is->subtitle_stream;
2676     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2677         return;
2678     stream_index = start_index;
2679     for(;;) {
2680         if (++stream_index >= is->ic->nb_streams)
2681         {
2682             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2683             {
2684                 stream_index = -1;
2685                 goto the_end;
2686             } else
2687                 stream_index = 0;
2688         }
2689         if (stream_index == start_index)
2690             return;
2691         st = ic->streams[stream_index];
2692         if (st->codec->codec_type == codec_type) {
2693             /* check that parameters are OK */
2694             switch(codec_type) {
2695             case AVMEDIA_TYPE_AUDIO:
2696                 if (st->codec->sample_rate != 0 &&
2697                     st->codec->channels != 0)
2698                     goto the_end;
2699                 break;
2700             case AVMEDIA_TYPE_VIDEO:
2701             case AVMEDIA_TYPE_SUBTITLE:
2702                 goto the_end;
2703             default:
2704                 break;
2705             }
2706         }
2707     }
2708  the_end:
2709     stream_component_close(is, start_index);
2710     stream_component_open(is, stream_index);
2711 }
2712
2713
2714 static void toggle_full_screen(VideoState *is)
2715 {
2716     is_full_screen = !is_full_screen;
2717     video_open(is);
2718 }
2719
2720 static void toggle_pause(VideoState *is)
2721 {
2722     stream_toggle_pause(is);
2723     is->step = 0;
2724 }
2725
2726 static void step_to_next_frame(VideoState *is)
2727 {
2728     /* if the stream is paused unpause it, then step */
2729     if (is->paused)
2730         stream_toggle_pause(is);
2731     is->step = 1;
2732 }
2733
2734 static void toggle_audio_display(VideoState *is)
2735 {
2736     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2737     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2738     fill_rectangle(screen,
2739                 is->xleft, is->ytop, is->width, is->height,
2740                 bgcolor);
2741     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2742 }
2743
2744 /* handle an event sent by the GUI */
2745 static void event_loop(VideoState *cur_stream)
2746 {
2747     SDL_Event event;
2748     double incr, pos, frac;
2749
2750     for(;;) {
2751         double x;
2752         SDL_WaitEvent(&event);
2753         switch(event.type) {
2754         case SDL_KEYDOWN:
2755             if (exit_on_keydown) {
2756                 do_exit(cur_stream);
2757                 break;
2758             }
2759             switch(event.key.keysym.sym) {
2760             case SDLK_ESCAPE:
2761             case SDLK_q:
2762                 do_exit(cur_stream);
2763                 break;
2764             case SDLK_f:
2765                 toggle_full_screen(cur_stream);
2766                 break;
2767             case SDLK_p:
2768             case SDLK_SPACE:
2769                 toggle_pause(cur_stream);
2770                 break;
2771             case SDLK_s: //S: Step to next frame
2772                 step_to_next_frame(cur_stream);
2773                 break;
2774             case SDLK_a:
2775                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2776                 break;
2777             case SDLK_v:
2778                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2779                 break;
2780             case SDLK_t:
2781                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2782                 break;
2783             case SDLK_w:
2784                 toggle_audio_display(cur_stream);
2785                 break;
2786             case SDLK_LEFT:
2787                 incr = -10.0;
2788                 goto do_seek;
2789             case SDLK_RIGHT:
2790                 incr = 10.0;
2791                 goto do_seek;
2792             case SDLK_UP:
2793                 incr = 60.0;
2794                 goto do_seek;
2795             case SDLK_DOWN:
2796                 incr = -60.0;
2797             do_seek:
2798                 if (seek_by_bytes) {
2799                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2800                         pos= cur_stream->video_current_pos;
2801                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2802                         pos= cur_stream->audio_pkt.pos;
2803                     }else
2804                         pos = avio_tell(cur_stream->ic->pb);
2805                     if (cur_stream->ic->bit_rate)
2806                         incr *= cur_stream->ic->bit_rate / 8.0;
2807                     else
2808                         incr *= 180000.0;
2809                     pos += incr;
2810                     stream_seek(cur_stream, pos, incr, 1);
2811                 } else {
2812                     pos = get_master_clock(cur_stream);
2813                     pos += incr;
2814                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2815                 }
2816                 break;
2817             default:
2818                 break;
2819             }
2820             break;
2821         case SDL_MOUSEBUTTONDOWN:
2822             if (exit_on_mousedown) {
2823                 do_exit(cur_stream);
2824                 break;
2825             }
2826         case SDL_MOUSEMOTION:
2827             if(event.type ==SDL_MOUSEBUTTONDOWN){
2828                 x= event.button.x;
2829             }else{
2830                 if(event.motion.state != SDL_PRESSED)
2831                     break;
2832                 x= event.motion.x;
2833             }
2834             if(seek_by_bytes || cur_stream->ic->duration<=0){
2835                 uint64_t size=  avio_size(cur_stream->ic->pb);
2836                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2837             }else{
2838                 int64_t ts;
2839                 int ns, hh, mm, ss;
2840                 int tns, thh, tmm, tss;
2841                 tns = cur_stream->ic->duration/1000000LL;
2842                 thh = tns/3600;
2843                 tmm = (tns%3600)/60;
2844                 tss = (tns%60);
2845                 frac = x/cur_stream->width;
2846                 ns = frac*tns;
2847                 hh = ns/3600;
2848                 mm = (ns%3600)/60;
2849                 ss = (ns%60);
2850                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2851                         hh, mm, ss, thh, tmm, tss);
2852                 ts = frac*cur_stream->ic->duration;
2853                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2854                     ts += cur_stream->ic->start_time;
2855                 stream_seek(cur_stream, ts, 0, 0);
2856             }
2857             break;
2858         case SDL_VIDEORESIZE:
2859             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2860                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2861             screen_width = cur_stream->width = event.resize.w;
2862             screen_height= cur_stream->height= event.resize.h;
2863             break;
2864         case SDL_QUIT:
2865         case FF_QUIT_EVENT:
2866             do_exit(cur_stream);
2867             break;
2868         case FF_ALLOC_EVENT:
2869             video_open(event.user.data1);
2870             alloc_picture(event.user.data1);
2871             break;
2872         case FF_REFRESH_EVENT:
2873             video_refresh(event.user.data1);
2874             cur_stream->refresh=0;
2875             break;
2876         default:
2877             break;
2878         }
2879     }
2880 }
2881
2882 static int opt_frame_size(const char *opt, const char *arg)
2883 {
2884     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2885     return opt_default("video_size", arg);
2886 }
2887
2888 static int opt_width(const char *opt, const char *arg)
2889 {
2890     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2891     return 0;
2892 }
2893
2894 static int opt_height(const char *opt, const char *arg)
2895 {
2896     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2897     return 0;
2898 }
2899
2900 static int opt_format(const char *opt, const char *arg)
2901 {
2902     file_iformat = av_find_input_format(arg);
2903     if (!file_iformat) {
2904         fprintf(stderr, "Unknown input format: %s\n", arg);
2905         return AVERROR(EINVAL);
2906     }
2907     return 0;
2908 }
2909
2910 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2911 {
2912     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2913     return opt_default("pixel_format", arg);
2914 }
2915
2916 static int opt_sync(const char *opt, const char *arg)
2917 {
2918     if (!strcmp(arg, "audio"))
2919         av_sync_type = AV_SYNC_AUDIO_MASTER;
2920     else if (!strcmp(arg, "video"))
2921         av_sync_type = AV_SYNC_VIDEO_MASTER;
2922     else if (!strcmp(arg, "ext"))
2923         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2924     else {
2925         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2926         exit(1);
2927     }
2928     return 0;
2929 }
2930
2931 static int opt_seek(const char *opt, const char *arg)
2932 {
2933     start_time = parse_time_or_die(opt, arg, 1);
2934     return 0;
2935 }
2936
2937 static int opt_duration(const char *opt, const char *arg)
2938 {
2939     duration = parse_time_or_die(opt, arg, 1);
2940     return 0;
2941 }
2942
2943 static int opt_show_mode(const char *opt, const char *arg)
2944 {
2945     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2946                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2947                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2948                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2949     return 0;
2950 }
2951
2952 static void opt_input_file(void *optctx, const char *filename)
2953 {
2954     if (input_filename) {
2955         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2956                 filename, input_filename);
2957         exit_program(1);
2958     }
2959     if (!strcmp(filename, "-"))
2960         filename = "pipe:";
2961     input_filename = filename;
2962 }
2963
2964 static int opt_codec(void *o, const char *opt, const char *arg)
2965 {
2966     switch(opt[strlen(opt)-1]){
2967     case 'a' :    audio_codec_name = arg; break;
2968     case 's' : subtitle_codec_name = arg; break;
2969     case 'v' :    video_codec_name = arg; break;
2970     }
2971     return 0;
2972 }
2973
2974 static int dummy;
2975
2976 static const OptionDef options[] = {
2977 #include "cmdutils_common_opts.h"
2978     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2979     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2980     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2981     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2982     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2983     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2984     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2985     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2986     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2987     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2988     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2989     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2990     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2991     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2992     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2993     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2994     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2995     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2996     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2997     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2998     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2999     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3000     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3001     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3002     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3003     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3004     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3005     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3006     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3007     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3008     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3009     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3010     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3011     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3012 #if CONFIG_AVFILTER
3013     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3014 #endif
3015     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3016     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3017     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3018     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3019     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3020     { NULL, },
3021 };
3022
3023 static void show_usage(void)
3024 {
3025     printf("Simple media player\n");
3026     printf("usage: %s [options] input_file\n", program_name);
3027     printf("\n");
3028 }
3029
3030 static int opt_help(const char *opt, const char *arg)
3031 {
3032     const AVClass *class;
3033     av_log_set_callback(log_callback_help);
3034     show_usage();
3035     show_help_options(options, "Main options:\n",
3036                       OPT_EXPERT, 0);
3037     show_help_options(options, "\nAdvanced options:\n",
3038                       OPT_EXPERT, OPT_EXPERT);
3039     printf("\n");
3040     class = avcodec_get_class();
3041     av_opt_show2(&class, NULL,
3042                  AV_OPT_FLAG_DECODING_PARAM, 0);
3043     printf("\n");
3044     class = avformat_get_class();
3045     av_opt_show2(&class, NULL,
3046                  AV_OPT_FLAG_DECODING_PARAM, 0);
3047 #if !CONFIG_AVFILTER
3048     printf("\n");
3049     class = sws_get_class();
3050     av_opt_show2(&class, NULL,
3051                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3052 #endif
3053     printf("\nWhile playing:\n"
3054            "q, ESC              quit\n"
3055            "f                   toggle full screen\n"
3056            "p, SPC              pause\n"
3057            "a                   cycle audio channel\n"
3058            "v                   cycle video channel\n"
3059            "t                   cycle subtitle channel\n"
3060            "w                   show audio waves\n"
3061            "s                   activate frame-step mode\n"
3062            "left/right          seek backward/forward 10 seconds\n"
3063            "down/up             seek backward/forward 1 minute\n"
3064            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3065            );
3066     return 0;
3067 }
3068
3069 static int lockmgr(void **mtx, enum AVLockOp op)
3070 {
3071    switch(op) {
3072       case AV_LOCK_CREATE:
3073           *mtx = SDL_CreateMutex();
3074           if(!*mtx)
3075               return 1;
3076           return 0;
3077       case AV_LOCK_OBTAIN:
3078           return !!SDL_LockMutex(*mtx);
3079       case AV_LOCK_RELEASE:
3080           return !!SDL_UnlockMutex(*mtx);
3081       case AV_LOCK_DESTROY:
3082           SDL_DestroyMutex(*mtx);
3083           return 0;
3084    }
3085    return 1;
3086 }
3087
3088 /* Called from the main */
3089 int main(int argc, char **argv)
3090 {
3091     int flags;
3092     VideoState *is;
3093
3094     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3095     parse_loglevel(argc, argv, options);
3096
3097     /* register all codecs, demux and protocols */
3098     avcodec_register_all();
3099 #if CONFIG_AVDEVICE
3100     avdevice_register_all();
3101 #endif
3102 #if CONFIG_AVFILTER
3103     avfilter_register_all();
3104 #endif
3105     av_register_all();
3106
3107     init_opts();
3108
3109     show_banner();
3110
3111     parse_options(NULL, argc, argv, options, opt_input_file);
3112
3113     if (!input_filename) {
3114         show_usage();
3115         fprintf(stderr, "An input file must be specified\n");
3116         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3117         exit(1);
3118     }
3119
3120     if (display_disable) {
3121         video_disable = 1;
3122     }
3123     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3124     if (audio_disable)
3125         flags &= ~SDL_INIT_AUDIO;
3126 #if !defined(__MINGW32__) && !defined(__APPLE__)
3127     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3128 #endif
3129     if (SDL_Init (flags)) {
3130         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3131         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3132         exit(1);
3133     }
3134
3135     if (!display_disable) {
3136 #if HAVE_SDL_VIDEO_SIZE
3137         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3138         fs_screen_width = vi->current_w;
3139         fs_screen_height = vi->current_h;
3140 #endif
3141     }
3142
3143     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3144     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3145     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3146
3147     if (av_lockmgr_register(lockmgr)) {
3148         fprintf(stderr, "Could not initialize lock manager!\n");
3149         do_exit(NULL);
3150     }
3151
3152     av_init_packet(&flush_pkt);
3153     flush_pkt.data= "FLUSH";
3154
3155     is = stream_open(input_filename, file_iformat);
3156     if (!is) {
3157         fprintf(stderr, "Failed to initialize VideoState!\n");
3158         do_exit(NULL);
3159     }
3160
3161     event_loop(is);
3162
3163     /* never returns */
3164
3165     return 0;
3166 }