]> git.sesse.net Git - ffmpeg/blob - ffplay.c
avidec: Fix infinite loop caused by rounding of timestamps in non interleaved avis.
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double duration;                             ///<expected duration of the frame
100     int64_t pos;                                 ///<byte position in file
101     int skip;
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     int reallocate;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
155     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
156     uint8_t *audio_buf;
157     uint8_t *audio_buf1;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     int audio_write_buf_size;
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat audio_src_fmt;
164     enum AVSampleFormat audio_tgt_fmt;
165     int audio_src_channels;
166     int audio_tgt_channels;
167     int64_t audio_src_channel_layout;
168     int64_t audio_tgt_channel_layout;
169     int audio_src_freq;
170     int audio_tgt_freq;
171     struct SwrContext *swr_ctx;
172     double audio_current_pts;
173     double audio_current_pts_drift;
174     int frame_drops_early;
175     int frame_drops_late;
176     AVFrame *frame;
177
178     enum ShowMode {
179         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
180     } show_mode;
181     int16_t sample_array[SAMPLE_ARRAY_SIZE];
182     int sample_array_index;
183     int last_i_start;
184     RDFTContext *rdft;
185     int rdft_bits;
186     FFTSample *rdft_data;
187     int xpos;
188
189     SDL_Thread *subtitle_tid;
190     int subtitle_stream;
191     int subtitle_stream_changed;
192     AVStream *subtitle_st;
193     PacketQueue subtitleq;
194     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
195     int subpq_size, subpq_rindex, subpq_windex;
196     SDL_mutex *subpq_mutex;
197     SDL_cond *subpq_cond;
198
199     double frame_timer;
200     double frame_last_pts;
201     double frame_last_duration;
202     double frame_last_dropped_pts;
203     double frame_last_returned_time;
204     double frame_last_filter_delay;
205     int64_t frame_last_dropped_pos;
206     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
207     int video_stream;
208     AVStream *video_st;
209     PacketQueue videoq;
210     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
211     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
212     int64_t video_current_pos;                   ///<current displayed file pos
213     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
214     int pictq_size, pictq_rindex, pictq_windex;
215     SDL_mutex *pictq_mutex;
216     SDL_cond *pictq_cond;
217 #if !CONFIG_AVFILTER
218     struct SwsContext *img_convert_ctx;
219 #endif
220
221     char filename[1024];
222     int width, height, xleft, ytop;
223     int step;
224
225 #if CONFIG_AVFILTER
226     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
227 #endif
228
229     int refresh;
230 } VideoState;
231
232 static int opt_help(const char *opt, const char *arg);
233
234 /* options specified by the user */
235 static AVInputFormat *file_iformat;
236 static const char *input_filename;
237 static const char *window_title;
238 static int fs_screen_width;
239 static int fs_screen_height;
240 static int screen_width = 0;
241 static int screen_height = 0;
242 static int audio_disable;
243 static int video_disable;
244 static int wanted_stream[AVMEDIA_TYPE_NB]={
245     [AVMEDIA_TYPE_AUDIO]=-1,
246     [AVMEDIA_TYPE_VIDEO]=-1,
247     [AVMEDIA_TYPE_SUBTITLE]=-1,
248 };
249 static int seek_by_bytes=-1;
250 static int display_disable;
251 static int show_status = 1;
252 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
253 static int64_t start_time = AV_NOPTS_VALUE;
254 static int64_t duration = AV_NOPTS_VALUE;
255 static int workaround_bugs = 1;
256 static int fast = 0;
257 static int genpts = 0;
258 static int lowres = 0;
259 static int idct = FF_IDCT_AUTO;
260 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
261 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
262 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
263 static int error_recognition = FF_ER_CAREFUL;
264 static int error_concealment = 3;
265 static int decoder_reorder_pts= -1;
266 static int autoexit;
267 static int exit_on_keydown;
268 static int exit_on_mousedown;
269 static int loop=1;
270 static int framedrop=-1;
271 static enum ShowMode show_mode = SHOW_MODE_NONE;
272 static const char *audio_codec_name;
273 static const char *subtitle_codec_name;
274 static const char *video_codec_name;
275
276 static int rdftspeed=20;
277 #if CONFIG_AVFILTER
278 static char *vfilters = NULL;
279 #endif
280
281 /* current context */
282 static int is_full_screen;
283 static int64_t audio_callback_time;
284
285 static AVPacket flush_pkt;
286
287 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
288 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
289 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
290
291 static SDL_Surface *screen;
292
293 void av_noreturn exit_program(int ret)
294 {
295     exit(ret);
296 }
297
298 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
299 {
300     AVPacketList *pkt1;
301
302     /* duplicate the packet */
303     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
304         return -1;
305
306     pkt1 = av_malloc(sizeof(AVPacketList));
307     if (!pkt1)
308         return -1;
309     pkt1->pkt = *pkt;
310     pkt1->next = NULL;
311
312
313     SDL_LockMutex(q->mutex);
314
315     if (!q->last_pkt)
316
317         q->first_pkt = pkt1;
318     else
319         q->last_pkt->next = pkt1;
320     q->last_pkt = pkt1;
321     q->nb_packets++;
322     q->size += pkt1->pkt.size + sizeof(*pkt1);
323     /* XXX: should duplicate packet data in DV case */
324     SDL_CondSignal(q->cond);
325
326     SDL_UnlockMutex(q->mutex);
327     return 0;
328 }
329
330 /* packet queue handling */
331 static void packet_queue_init(PacketQueue *q)
332 {
333     memset(q, 0, sizeof(PacketQueue));
334     q->mutex = SDL_CreateMutex();
335     q->cond = SDL_CreateCond();
336     packet_queue_put(q, &flush_pkt);
337 }
338
339 static void packet_queue_flush(PacketQueue *q)
340 {
341     AVPacketList *pkt, *pkt1;
342
343     SDL_LockMutex(q->mutex);
344     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
345         pkt1 = pkt->next;
346         av_free_packet(&pkt->pkt);
347         av_freep(&pkt);
348     }
349     q->last_pkt = NULL;
350     q->first_pkt = NULL;
351     q->nb_packets = 0;
352     q->size = 0;
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 static void packet_queue_end(PacketQueue *q)
357 {
358     packet_queue_flush(q);
359     SDL_DestroyMutex(q->mutex);
360     SDL_DestroyCond(q->cond);
361 }
362
363 static void packet_queue_abort(PacketQueue *q)
364 {
365     SDL_LockMutex(q->mutex);
366
367     q->abort_request = 1;
368
369     SDL_CondSignal(q->cond);
370
371     SDL_UnlockMutex(q->mutex);
372 }
373
374 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
375 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
376 {
377     AVPacketList *pkt1;
378     int ret;
379
380     SDL_LockMutex(q->mutex);
381
382     for(;;) {
383         if (q->abort_request) {
384             ret = -1;
385             break;
386         }
387
388         pkt1 = q->first_pkt;
389         if (pkt1) {
390             q->first_pkt = pkt1->next;
391             if (!q->first_pkt)
392                 q->last_pkt = NULL;
393             q->nb_packets--;
394             q->size -= pkt1->pkt.size + sizeof(*pkt1);
395             *pkt = pkt1->pkt;
396             av_free(pkt1);
397             ret = 1;
398             break;
399         } else if (!block) {
400             ret = 0;
401             break;
402         } else {
403             SDL_CondWait(q->cond, q->mutex);
404         }
405     }
406     SDL_UnlockMutex(q->mutex);
407     return ret;
408 }
409
410 static inline void fill_rectangle(SDL_Surface *screen,
411                                   int x, int y, int w, int h, int color)
412 {
413     SDL_Rect rect;
414     rect.x = x;
415     rect.y = y;
416     rect.w = w;
417     rect.h = h;
418     SDL_FillRect(screen, &rect, color);
419 }
420
421 #define ALPHA_BLEND(a, oldp, newp, s)\
422 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
423
424 #define RGBA_IN(r, g, b, a, s)\
425 {\
426     unsigned int v = ((const uint32_t *)(s))[0];\
427     a = (v >> 24) & 0xff;\
428     r = (v >> 16) & 0xff;\
429     g = (v >> 8) & 0xff;\
430     b = v & 0xff;\
431 }
432
433 #define YUVA_IN(y, u, v, a, s, pal)\
434 {\
435     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
436     a = (val >> 24) & 0xff;\
437     y = (val >> 16) & 0xff;\
438     u = (val >> 8) & 0xff;\
439     v = val & 0xff;\
440 }
441
442 #define YUVA_OUT(d, y, u, v, a)\
443 {\
444     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
445 }
446
447
448 #define BPP 1
449
450 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
451 {
452     int wrap, wrap3, width2, skip2;
453     int y, u, v, a, u1, v1, a1, w, h;
454     uint8_t *lum, *cb, *cr;
455     const uint8_t *p;
456     const uint32_t *pal;
457     int dstx, dsty, dstw, dsth;
458
459     dstw = av_clip(rect->w, 0, imgw);
460     dsth = av_clip(rect->h, 0, imgh);
461     dstx = av_clip(rect->x, 0, imgw - dstw);
462     dsty = av_clip(rect->y, 0, imgh - dsth);
463     lum = dst->data[0] + dsty * dst->linesize[0];
464     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
465     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
466
467     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
468     skip2 = dstx >> 1;
469     wrap = dst->linesize[0];
470     wrap3 = rect->pict.linesize[0];
471     p = rect->pict.data[0];
472     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
473
474     if (dsty & 1) {
475         lum += dstx;
476         cb += skip2;
477         cr += skip2;
478
479         if (dstx & 1) {
480             YUVA_IN(y, u, v, a, p, pal);
481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
483             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
484             cb++;
485             cr++;
486             lum++;
487             p += BPP;
488         }
489         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
490             YUVA_IN(y, u, v, a, p, pal);
491             u1 = u;
492             v1 = v;
493             a1 = a;
494             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495
496             YUVA_IN(y, u, v, a, p + BPP, pal);
497             u1 += u;
498             v1 += v;
499             a1 += a;
500             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
501             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
502             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
503             cb++;
504             cr++;
505             p += 2 * BPP;
506             lum += 2;
507         }
508         if (w) {
509             YUVA_IN(y, u, v, a, p, pal);
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513             p++;
514             lum++;
515         }
516         p += wrap3 - dstw * BPP;
517         lum += wrap - dstw - dstx;
518         cb += dst->linesize[1] - width2 - skip2;
519         cr += dst->linesize[2] - width2 - skip2;
520     }
521     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
522         lum += dstx;
523         cb += skip2;
524         cr += skip2;
525
526         if (dstx & 1) {
527             YUVA_IN(y, u, v, a, p, pal);
528             u1 = u;
529             v1 = v;
530             a1 = a;
531             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532             p += wrap3;
533             lum += wrap;
534             YUVA_IN(y, u, v, a, p, pal);
535             u1 += u;
536             v1 += v;
537             a1 += a;
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
540             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
541             cb++;
542             cr++;
543             p += -wrap3 + BPP;
544             lum += -wrap + 1;
545         }
546         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 = u;
549             v1 = v;
550             a1 = a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558             p += wrap3;
559             lum += wrap;
560
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566
567             YUVA_IN(y, u, v, a, p + BPP, pal);
568             u1 += u;
569             v1 += v;
570             a1 += a;
571             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
572
573             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
574             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
575
576             cb++;
577             cr++;
578             p += -wrap3 + 2 * BPP;
579             lum += -wrap + 2;
580         }
581         if (w) {
582             YUVA_IN(y, u, v, a, p, pal);
583             u1 = u;
584             v1 = v;
585             a1 = a;
586             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587             p += wrap3;
588             lum += wrap;
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
595             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
596             cb++;
597             cr++;
598             p += -wrap3 + BPP;
599             lum += -wrap + 1;
600         }
601         p += wrap3 + (wrap3 - dstw * BPP);
602         lum += wrap + (wrap - dstw - dstx);
603         cb += dst->linesize[1] - width2 - skip2;
604         cr += dst->linesize[2] - width2 - skip2;
605     }
606     /* handle odd height */
607     if (h) {
608         lum += dstx;
609         cb += skip2;
610         cr += skip2;
611
612         if (dstx & 1) {
613             YUVA_IN(y, u, v, a, p, pal);
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
616             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
617             cb++;
618             cr++;
619             lum++;
620             p += BPP;
621         }
622         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
623             YUVA_IN(y, u, v, a, p, pal);
624             u1 = u;
625             v1 = v;
626             a1 = a;
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628
629             YUVA_IN(y, u, v, a, p + BPP, pal);
630             u1 += u;
631             v1 += v;
632             a1 += a;
633             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
634             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
635             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
636             cb++;
637             cr++;
638             p += 2 * BPP;
639             lum += 2;
640         }
641         if (w) {
642             YUVA_IN(y, u, v, a, p, pal);
643             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646         }
647     }
648 }
649
650 static void free_subpicture(SubPicture *sp)
651 {
652     avsubtitle_free(&sp->sub);
653 }
654
655 static void video_image_display(VideoState *is)
656 {
657     VideoPicture *vp;
658     SubPicture *sp;
659     AVPicture pict;
660     float aspect_ratio;
661     int width, height, x, y;
662     SDL_Rect rect;
663     int i;
664
665     vp = &is->pictq[is->pictq_rindex];
666     if (vp->bmp) {
667 #if CONFIG_AVFILTER
668          if (vp->picref->video->sample_aspect_ratio.num == 0)
669              aspect_ratio = 0;
670          else
671              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
672 #else
673
674         /* XXX: use variable in the frame */
675         if (is->video_st->sample_aspect_ratio.num)
676             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
677         else if (is->video_st->codec->sample_aspect_ratio.num)
678             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
679         else
680             aspect_ratio = 0;
681 #endif
682         if (aspect_ratio <= 0.0)
683             aspect_ratio = 1.0;
684         aspect_ratio *= (float)vp->width / (float)vp->height;
685
686         if (is->subtitle_st) {
687             if (is->subpq_size > 0) {
688                 sp = &is->subpq[is->subpq_rindex];
689
690                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
691                     SDL_LockYUVOverlay (vp->bmp);
692
693                     pict.data[0] = vp->bmp->pixels[0];
694                     pict.data[1] = vp->bmp->pixels[2];
695                     pict.data[2] = vp->bmp->pixels[1];
696
697                     pict.linesize[0] = vp->bmp->pitches[0];
698                     pict.linesize[1] = vp->bmp->pitches[2];
699                     pict.linesize[2] = vp->bmp->pitches[1];
700
701                     for (i = 0; i < sp->sub.num_rects; i++)
702                         blend_subrect(&pict, sp->sub.rects[i],
703                                       vp->bmp->w, vp->bmp->h);
704
705                     SDL_UnlockYUVOverlay (vp->bmp);
706                 }
707             }
708         }
709
710
711         /* XXX: we suppose the screen has a 1.0 pixel ratio */
712         height = is->height;
713         width = ((int)rint(height * aspect_ratio)) & ~1;
714         if (width > is->width) {
715             width = is->width;
716             height = ((int)rint(width / aspect_ratio)) & ~1;
717         }
718         x = (is->width - width) / 2;
719         y = (is->height - height) / 2;
720         is->no_background = 0;
721         rect.x = is->xleft + x;
722         rect.y = is->ytop  + y;
723         rect.w = FFMAX(width,  1);
724         rect.h = FFMAX(height, 1);
725         SDL_DisplayYUVOverlay(vp->bmp, &rect);
726     }
727 }
728
729 static inline int compute_mod(int a, int b)
730 {
731     return a < 0 ? a%b + b : a%b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
742         ;
743     nb_freq= 1<<(rdft_bits-1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->audio_tgt_channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
750         n = 2 * channels;
751         delay = s->audio_write_buf_size;
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
759         }
760
761         delay += 2*data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if (s->show_mode == SHOW_MODE_WAVES) {
767             h= INT_MIN;
768             for(i=0; i<1000; i+=channels){
769                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a= s->sample_array[idx];
771                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
772                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
773                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
774                 int score= a-d;
775                 if(h<score && (b^c)<0){
776                     h= score;
777                     i_start= idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if (s->show_mode == SHOW_MODE_WAVES) {
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for(ch = 0;ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for(x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for(ch = 1;ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     }else{
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if(rdft_bits != s->rdft_bits){
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits= rdft_bits;
835             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for(ch = 0;ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2*nb_freq*ch;
841                 i = i_start + ch;
842                 for(x = 0; x < 2*nb_freq; x++) {
843                     double w= (x-nb_freq)*(1.0/nb_freq);
844                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             //least efficient way to do this, we should of course directly access it but its more than fast enough
852             for(y=0; y<s->height; y++){
853                 double w= 1/sqrt(nb_freq);
854                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
855                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
856                        + data[1][2*y+1]*data[1][2*y+1])) : a;
857                 a= FFMIN(a,255);
858                 b= FFMIN(b,255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if(s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static void stream_close(VideoState *is)
874 {
875     VideoPicture *vp;
876     int i;
877     /* XXX: use a special url_shutdown call to abort parse cleanly */
878     is->abort_request = 1;
879     SDL_WaitThread(is->read_tid, NULL);
880     SDL_WaitThread(is->refresh_tid, NULL);
881
882     /* free all pictures */
883     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
884         vp = &is->pictq[i];
885 #if CONFIG_AVFILTER
886         if (vp->picref) {
887             avfilter_unref_buffer(vp->picref);
888             vp->picref = NULL;
889         }
890 #endif
891         if (vp->bmp) {
892             SDL_FreeYUVOverlay(vp->bmp);
893             vp->bmp = NULL;
894         }
895     }
896     SDL_DestroyMutex(is->pictq_mutex);
897     SDL_DestroyCond(is->pictq_cond);
898     SDL_DestroyMutex(is->subpq_mutex);
899     SDL_DestroyCond(is->subpq_cond);
900 #if !CONFIG_AVFILTER
901     if (is->img_convert_ctx)
902         sws_freeContext(is->img_convert_ctx);
903 #endif
904     av_free(is);
905 }
906
907 static void do_exit(VideoState *is)
908 {
909     if (is) {
910         stream_close(is);
911     }
912     av_lockmgr_register(NULL);
913     uninit_opts();
914 #if CONFIG_AVFILTER
915     avfilter_uninit();
916 #endif
917     avformat_network_deinit();
918     if (show_status)
919         printf("\n");
920     SDL_Quit();
921     av_log(NULL, AV_LOG_QUIET, "%s", "");
922     exit(0);
923 }
924
925 static int video_open(VideoState *is, int force_set_video_mode){
926     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
927     int w,h;
928
929     if(is_full_screen) flags |= SDL_FULLSCREEN;
930     else               flags |= SDL_RESIZABLE;
931
932     if (is_full_screen && fs_screen_width) {
933         w = fs_screen_width;
934         h = fs_screen_height;
935     } else if(!is_full_screen && screen_width){
936         w = screen_width;
937         h = screen_height;
938 #if CONFIG_AVFILTER
939     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
940         w = is->out_video_filter->inputs[0]->w;
941         h = is->out_video_filter->inputs[0]->h;
942 #else
943     }else if (is->video_st && is->video_st->codec->width){
944         w = is->video_st->codec->width;
945         h = is->video_st->codec->height;
946 #endif
947     } else {
948         w = 640;
949         h = 480;
950     }
951     if(screen && is->width == screen->w && screen->w == w
952        && is->height== screen->h && screen->h == h && !force_set_video_mode)
953         return 0;
954     screen = SDL_SetVideoMode(w, h, 0, flags);
955     if (!screen) {
956         fprintf(stderr, "SDL: could not set video mode - exiting\n");
957         do_exit(is);
958     }
959     if (!window_title)
960         window_title = input_filename;
961     SDL_WM_SetCaption(window_title, window_title);
962
963     is->width = screen->w;
964     is->height = screen->h;
965
966     return 0;
967 }
968
969 /* display the current picture, if any */
970 static void video_display(VideoState *is)
971 {
972     if(!screen)
973         video_open(is, 0);
974     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
975         video_audio_display(is);
976     else if (is->video_st)
977         video_image_display(is);
978 }
979
980 static int refresh_thread(void *opaque)
981 {
982     VideoState *is= opaque;
983     while(!is->abort_request){
984         SDL_Event event;
985         event.type = FF_REFRESH_EVENT;
986         event.user.data1 = opaque;
987         if(!is->refresh){
988             is->refresh=1;
989             SDL_PushEvent(&event);
990         }
991         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
992         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
993     }
994     return 0;
995 }
996
997 /* get the current audio clock value */
998 static double get_audio_clock(VideoState *is)
999 {
1000     if (is->paused) {
1001         return is->audio_current_pts;
1002     } else {
1003         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1004     }
1005 }
1006
1007 /* get the current video clock value */
1008 static double get_video_clock(VideoState *is)
1009 {
1010     if (is->paused) {
1011         return is->video_current_pts;
1012     } else {
1013         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1014     }
1015 }
1016
1017 /* get the current external clock value */
1018 static double get_external_clock(VideoState *is)
1019 {
1020     int64_t ti;
1021     ti = av_gettime();
1022     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1023 }
1024
1025 /* get the current master clock value */
1026 static double get_master_clock(VideoState *is)
1027 {
1028     double val;
1029
1030     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1031         if (is->video_st)
1032             val = get_video_clock(is);
1033         else
1034             val = get_audio_clock(is);
1035     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1036         if (is->audio_st)
1037             val = get_audio_clock(is);
1038         else
1039             val = get_video_clock(is);
1040     } else {
1041         val = get_external_clock(is);
1042     }
1043     return val;
1044 }
1045
1046 /* seek in the stream */
1047 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1048 {
1049     if (!is->seek_req) {
1050         is->seek_pos = pos;
1051         is->seek_rel = rel;
1052         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1053         if (seek_by_bytes)
1054             is->seek_flags |= AVSEEK_FLAG_BYTE;
1055         is->seek_req = 1;
1056     }
1057 }
1058
1059 /* pause or resume the video */
1060 static void stream_toggle_pause(VideoState *is)
1061 {
1062     if (is->paused) {
1063         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1064         if(is->read_pause_return != AVERROR(ENOSYS)){
1065             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1066         }
1067         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1068     }
1069     is->paused = !is->paused;
1070 }
1071
1072 static double compute_target_delay(double delay, VideoState *is)
1073 {
1074     double sync_threshold, diff;
1075
1076     /* update delay to follow master synchronisation source */
1077     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1078          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1079         /* if video is slave, we try to correct big delays by
1080            duplicating or deleting a frame */
1081         diff = get_video_clock(is) - get_master_clock(is);
1082
1083         /* skip or repeat frame. We take into account the
1084            delay to compute the threshold. I still don't know
1085            if it is the best guess */
1086         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1087         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1088             if (diff <= -sync_threshold)
1089                 delay = 0;
1090             else if (diff >= sync_threshold)
1091                 delay = 2 * delay;
1092         }
1093     }
1094
1095     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1096             delay, -diff);
1097
1098     return delay;
1099 }
1100
1101 static void pictq_next_picture(VideoState *is) {
1102     /* update queue size and signal for next picture */
1103     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1104         is->pictq_rindex = 0;
1105
1106     SDL_LockMutex(is->pictq_mutex);
1107     is->pictq_size--;
1108     SDL_CondSignal(is->pictq_cond);
1109     SDL_UnlockMutex(is->pictq_mutex);
1110 }
1111
1112 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1113     double time = av_gettime() / 1000000.0;
1114     /* update current video pts */
1115     is->video_current_pts = pts;
1116     is->video_current_pts_drift = is->video_current_pts - time;
1117     is->video_current_pos = pos;
1118     is->frame_last_pts = pts;
1119 }
1120
1121 /* called to display each frame */
1122 static void video_refresh(void *opaque)
1123 {
1124     VideoState *is = opaque;
1125     VideoPicture *vp;
1126     double time;
1127
1128     SubPicture *sp, *sp2;
1129
1130     if (is->video_st) {
1131 retry:
1132         if (is->pictq_size == 0) {
1133             SDL_LockMutex(is->pictq_mutex);
1134             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1135                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1136                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1137             }
1138             SDL_UnlockMutex(is->pictq_mutex);
1139             //nothing to do, no picture to display in the que
1140         } else {
1141             double last_duration, duration, delay;
1142             /* dequeue the picture */
1143             vp = &is->pictq[is->pictq_rindex];
1144
1145             if (vp->skip) {
1146                 pictq_next_picture(is);
1147                 goto retry;
1148             }
1149
1150             /* compute nominal last_duration */
1151             last_duration = vp->pts - is->frame_last_pts;
1152             if (last_duration > 0 && last_duration < 10.0) {
1153                 /* if duration of the last frame was sane, update last_duration in video state */
1154                 is->frame_last_duration = last_duration;
1155             }
1156             delay = compute_target_delay(is->frame_last_duration, is);
1157
1158             time= av_gettime()/1000000.0;
1159             if(time < is->frame_timer + delay)
1160                 return;
1161
1162             if (delay > 0)
1163                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1164
1165             SDL_LockMutex(is->pictq_mutex);
1166             update_video_pts(is, vp->pts, vp->pos);
1167             SDL_UnlockMutex(is->pictq_mutex);
1168
1169             if(is->pictq_size > 1) {
1170                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1171                  duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1172             } else {
1173                  duration = vp->duration;
1174             }
1175
1176             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1177                 if(is->pictq_size > 1){
1178                     is->frame_drops_late++;
1179                     pictq_next_picture(is);
1180                     goto retry;
1181                 }
1182             }
1183
1184             if(is->subtitle_st) {
1185                 if (is->subtitle_stream_changed) {
1186                     SDL_LockMutex(is->subpq_mutex);
1187
1188                     while (is->subpq_size) {
1189                         free_subpicture(&is->subpq[is->subpq_rindex]);
1190
1191                         /* update queue size and signal for next picture */
1192                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1193                             is->subpq_rindex = 0;
1194
1195                         is->subpq_size--;
1196                     }
1197                     is->subtitle_stream_changed = 0;
1198
1199                     SDL_CondSignal(is->subpq_cond);
1200                     SDL_UnlockMutex(is->subpq_mutex);
1201                 } else {
1202                     if (is->subpq_size > 0) {
1203                         sp = &is->subpq[is->subpq_rindex];
1204
1205                         if (is->subpq_size > 1)
1206                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1207                         else
1208                             sp2 = NULL;
1209
1210                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1211                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1212                         {
1213                             free_subpicture(sp);
1214
1215                             /* update queue size and signal for next picture */
1216                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1217                                 is->subpq_rindex = 0;
1218
1219                             SDL_LockMutex(is->subpq_mutex);
1220                             is->subpq_size--;
1221                             SDL_CondSignal(is->subpq_cond);
1222                             SDL_UnlockMutex(is->subpq_mutex);
1223                         }
1224                     }
1225                 }
1226             }
1227
1228             /* display picture */
1229             if (!display_disable)
1230                 video_display(is);
1231
1232             pictq_next_picture(is);
1233         }
1234     } else if (is->audio_st) {
1235         /* draw the next audio frame */
1236
1237         /* if only audio stream, then display the audio bars (better
1238            than nothing, just to test the implementation */
1239
1240         /* display picture */
1241         if (!display_disable)
1242             video_display(is);
1243     }
1244     if (show_status) {
1245         static int64_t last_time;
1246         int64_t cur_time;
1247         int aqsize, vqsize, sqsize;
1248         double av_diff;
1249
1250         cur_time = av_gettime();
1251         if (!last_time || (cur_time - last_time) >= 30000) {
1252             aqsize = 0;
1253             vqsize = 0;
1254             sqsize = 0;
1255             if (is->audio_st)
1256                 aqsize = is->audioq.size;
1257             if (is->video_st)
1258                 vqsize = is->videoq.size;
1259             if (is->subtitle_st)
1260                 sqsize = is->subtitleq.size;
1261             av_diff = 0;
1262             if (is->audio_st && is->video_st)
1263                 av_diff = get_audio_clock(is) - get_video_clock(is);
1264             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1265                    get_master_clock(is),
1266                    av_diff,
1267                    is->frame_drops_early + is->frame_drops_late,
1268                    aqsize / 1024,
1269                    vqsize / 1024,
1270                    sqsize,
1271                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1272                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1273             fflush(stdout);
1274             last_time = cur_time;
1275         }
1276     }
1277 }
1278
1279 /* allocate a picture (needs to do that in main thread to avoid
1280    potential locking problems */
1281 static void alloc_picture(void *opaque)
1282 {
1283     VideoState *is = opaque;
1284     VideoPicture *vp;
1285
1286     vp = &is->pictq[is->pictq_windex];
1287
1288     if (vp->bmp)
1289         SDL_FreeYUVOverlay(vp->bmp);
1290
1291 #if CONFIG_AVFILTER
1292     if (vp->picref)
1293         avfilter_unref_buffer(vp->picref);
1294     vp->picref = NULL;
1295
1296     vp->width   = is->out_video_filter->inputs[0]->w;
1297     vp->height  = is->out_video_filter->inputs[0]->h;
1298     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1299 #else
1300     vp->width   = is->video_st->codec->width;
1301     vp->height  = is->video_st->codec->height;
1302     vp->pix_fmt = is->video_st->codec->pix_fmt;
1303 #endif
1304
1305     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1306                                    SDL_YV12_OVERLAY,
1307                                    screen);
1308     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1309         /* SDL allocates a buffer smaller than requested if the video
1310          * overlay hardware is unable to support the requested size. */
1311         fprintf(stderr, "Error: the video system does not support an image\n"
1312                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1313                         "to reduce the image size.\n", vp->width, vp->height );
1314         do_exit(is);
1315     }
1316
1317     SDL_LockMutex(is->pictq_mutex);
1318     vp->allocated = 1;
1319     SDL_CondSignal(is->pictq_cond);
1320     SDL_UnlockMutex(is->pictq_mutex);
1321 }
1322
1323 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1324 {
1325     VideoPicture *vp;
1326     double frame_delay, pts = pts1;
1327
1328     /* compute the exact PTS for the picture if it is omitted in the stream
1329      * pts1 is the dts of the pkt / pts of the frame */
1330     if (pts != 0) {
1331         /* update video clock with pts, if present */
1332         is->video_clock = pts;
1333     } else {
1334         pts = is->video_clock;
1335     }
1336     /* update video clock for next frame */
1337     frame_delay = av_q2d(is->video_st->codec->time_base);
1338     /* for MPEG2, the frame can be repeated, so we update the
1339        clock accordingly */
1340     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1341     is->video_clock += frame_delay;
1342
1343 #if defined(DEBUG_SYNC) && 0
1344     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1345            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1346 #endif
1347
1348     /* wait until we have space to put a new picture */
1349     SDL_LockMutex(is->pictq_mutex);
1350
1351     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1352            !is->videoq.abort_request) {
1353         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1354     }
1355     SDL_UnlockMutex(is->pictq_mutex);
1356
1357     if (is->videoq.abort_request)
1358         return -1;
1359
1360     vp = &is->pictq[is->pictq_windex];
1361
1362     vp->duration = frame_delay;
1363
1364     /* alloc or resize hardware picture buffer */
1365     if (!vp->bmp || vp->reallocate ||
1366 #if CONFIG_AVFILTER
1367         vp->width  != is->out_video_filter->inputs[0]->w ||
1368         vp->height != is->out_video_filter->inputs[0]->h) {
1369 #else
1370         vp->width != is->video_st->codec->width ||
1371         vp->height != is->video_st->codec->height) {
1372 #endif
1373         SDL_Event event;
1374
1375         vp->allocated  = 0;
1376         vp->reallocate = 0;
1377
1378         /* the allocation must be done in the main thread to avoid
1379            locking problems */
1380         event.type = FF_ALLOC_EVENT;
1381         event.user.data1 = is;
1382         SDL_PushEvent(&event);
1383
1384         /* wait until the picture is allocated */
1385         SDL_LockMutex(is->pictq_mutex);
1386         while (!vp->allocated && !is->videoq.abort_request) {
1387             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1388         }
1389         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1390         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1391             while (!vp->allocated) {
1392                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1393             }
1394         }
1395         SDL_UnlockMutex(is->pictq_mutex);
1396
1397         if (is->videoq.abort_request)
1398             return -1;
1399     }
1400
1401     /* if the frame is not skipped, then display it */
1402     if (vp->bmp) {
1403         AVPicture pict;
1404 #if CONFIG_AVFILTER
1405         if(vp->picref)
1406             avfilter_unref_buffer(vp->picref);
1407         vp->picref = src_frame->opaque;
1408 #endif
1409
1410         /* get a pointer on the bitmap */
1411         SDL_LockYUVOverlay (vp->bmp);
1412
1413         memset(&pict,0,sizeof(AVPicture));
1414         pict.data[0] = vp->bmp->pixels[0];
1415         pict.data[1] = vp->bmp->pixels[2];
1416         pict.data[2] = vp->bmp->pixels[1];
1417
1418         pict.linesize[0] = vp->bmp->pitches[0];
1419         pict.linesize[1] = vp->bmp->pitches[2];
1420         pict.linesize[2] = vp->bmp->pitches[1];
1421
1422 #if CONFIG_AVFILTER
1423         //FIXME use direct rendering
1424         av_picture_copy(&pict, (AVPicture *)src_frame,
1425                         vp->pix_fmt, vp->width, vp->height);
1426 #else
1427         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1428         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1429             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1430             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1431         if (is->img_convert_ctx == NULL) {
1432             fprintf(stderr, "Cannot initialize the conversion context\n");
1433             exit(1);
1434         }
1435         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1436                   0, vp->height, pict.data, pict.linesize);
1437 #endif
1438         /* update the bitmap content */
1439         SDL_UnlockYUVOverlay(vp->bmp);
1440
1441         vp->pts = pts;
1442         vp->pos = pos;
1443         vp->skip = 0;
1444
1445         /* now we can update the picture count */
1446         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1447             is->pictq_windex = 0;
1448         SDL_LockMutex(is->pictq_mutex);
1449         is->pictq_size++;
1450         SDL_UnlockMutex(is->pictq_mutex);
1451     }
1452     return 0;
1453 }
1454
1455 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1456 {
1457     int got_picture, i;
1458
1459     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1460         return -1;
1461
1462     if (pkt->data == flush_pkt.data) {
1463         avcodec_flush_buffers(is->video_st->codec);
1464
1465         SDL_LockMutex(is->pictq_mutex);
1466         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1467         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1468             is->pictq[i].skip = 1;
1469         }
1470         while (is->pictq_size && !is->videoq.abort_request) {
1471             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1472         }
1473         is->video_current_pos = -1;
1474         is->frame_last_pts = AV_NOPTS_VALUE;
1475         is->frame_last_duration = 0;
1476         is->frame_timer = (double)av_gettime() / 1000000.0;
1477         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1478         SDL_UnlockMutex(is->pictq_mutex);
1479
1480         return 0;
1481     }
1482
1483     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1484
1485     if (got_picture) {
1486         int ret = 1;
1487
1488         if (decoder_reorder_pts == -1) {
1489             *pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
1490         } else if (decoder_reorder_pts) {
1491             *pts = frame->pkt_pts;
1492         } else {
1493             *pts = frame->pkt_dts;
1494         }
1495
1496         if (*pts == AV_NOPTS_VALUE) {
1497             *pts = 0;
1498         }
1499
1500         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1501              (framedrop>0 || (framedrop && is->audio_st))) {
1502             SDL_LockMutex(is->pictq_mutex);
1503             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1504                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1505                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1506                 double ptsdiff = dpts - is->frame_last_pts;
1507                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1508                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1509                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1510                     is->frame_last_dropped_pos = pkt->pos;
1511                     is->frame_last_dropped_pts = dpts;
1512                     is->frame_drops_early++;
1513                     ret = 0;
1514                 }
1515             }
1516             SDL_UnlockMutex(is->pictq_mutex);
1517         }
1518
1519         if (ret)
1520             is->frame_last_returned_time = av_gettime() / 1000000.0;
1521
1522         return ret;
1523     }
1524     return 0;
1525 }
1526
1527 #if CONFIG_AVFILTER
1528 typedef struct {
1529     VideoState *is;
1530     AVFrame *frame;
1531     int use_dr1;
1532 } FilterPriv;
1533
1534 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1535 {
1536     AVFilterContext *ctx = codec->opaque;
1537     AVFilterBufferRef  *ref;
1538     int perms = AV_PERM_WRITE;
1539     int i, w, h, stride[4];
1540     unsigned edge;
1541     int pixel_size;
1542
1543     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1544
1545     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1546         perms |= AV_PERM_NEG_LINESIZES;
1547
1548     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1549         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1550         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1551         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1552     }
1553     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1554
1555     w = codec->width;
1556     h = codec->height;
1557
1558     if(av_image_check_size(w, h, 0, codec))
1559         return -1;
1560
1561     avcodec_align_dimensions2(codec, &w, &h, stride);
1562     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1563     w += edge << 1;
1564     h += edge << 1;
1565     if (codec->pix_fmt != ctx->outputs[0]->format) {
1566         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1567         return -1;
1568     }
1569     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1570         return -1;
1571
1572     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1573     ref->video->w = codec->width;
1574     ref->video->h = codec->height;
1575     for(i = 0; i < 4; i ++) {
1576         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1577         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1578
1579         if (ref->data[i]) {
1580             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1581         }
1582         pic->data[i]     = ref->data[i];
1583         pic->linesize[i] = ref->linesize[i];
1584     }
1585     pic->opaque = ref;
1586     pic->age    = INT_MAX;
1587     pic->type   = FF_BUFFER_TYPE_USER;
1588     pic->reordered_opaque = codec->reordered_opaque;
1589     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1590     else           pic->pkt_pts = AV_NOPTS_VALUE;
1591     return 0;
1592 }
1593
1594 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1595 {
1596     memset(pic->data, 0, sizeof(pic->data));
1597     avfilter_unref_buffer(pic->opaque);
1598 }
1599
1600 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1601 {
1602     AVFilterBufferRef *ref = pic->opaque;
1603
1604     if (pic->data[0] == NULL) {
1605         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1606         return codec->get_buffer(codec, pic);
1607     }
1608
1609     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1610         (codec->pix_fmt != ref->format)) {
1611         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1612         return -1;
1613     }
1614
1615     pic->reordered_opaque = codec->reordered_opaque;
1616     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1617     else           pic->pkt_pts = AV_NOPTS_VALUE;
1618     return 0;
1619 }
1620
1621 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1622 {
1623     FilterPriv *priv = ctx->priv;
1624     AVCodecContext *codec;
1625     if(!opaque) return -1;
1626
1627     priv->is = opaque;
1628     codec    = priv->is->video_st->codec;
1629     codec->opaque = ctx;
1630     if((codec->codec->capabilities & CODEC_CAP_DR1)
1631     ) {
1632         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1633         priv->use_dr1 = 1;
1634         codec->get_buffer     = input_get_buffer;
1635         codec->release_buffer = input_release_buffer;
1636         codec->reget_buffer   = input_reget_buffer;
1637         codec->thread_safe_callbacks = 1;
1638     }
1639
1640     priv->frame = avcodec_alloc_frame();
1641
1642     return 0;
1643 }
1644
1645 static void input_uninit(AVFilterContext *ctx)
1646 {
1647     FilterPriv *priv = ctx->priv;
1648     av_free(priv->frame);
1649 }
1650
1651 static int input_request_frame(AVFilterLink *link)
1652 {
1653     FilterPriv *priv = link->src->priv;
1654     AVFilterBufferRef *picref;
1655     int64_t pts = 0;
1656     AVPacket pkt;
1657     int ret;
1658
1659     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1660         av_free_packet(&pkt);
1661     if (ret < 0)
1662         return -1;
1663
1664     if(priv->use_dr1 && priv->frame->opaque) {
1665         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1666     } else {
1667         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1668         av_image_copy(picref->data, picref->linesize,
1669                       priv->frame->data, priv->frame->linesize,
1670                       picref->format, link->w, link->h);
1671     }
1672     av_free_packet(&pkt);
1673
1674     avfilter_copy_frame_props(picref, priv->frame);
1675     picref->pts = pts;
1676
1677     avfilter_start_frame(link, picref);
1678     avfilter_draw_slice(link, 0, link->h, 1);
1679     avfilter_end_frame(link);
1680
1681     return 0;
1682 }
1683
1684 static int input_query_formats(AVFilterContext *ctx)
1685 {
1686     FilterPriv *priv = ctx->priv;
1687     enum PixelFormat pix_fmts[] = {
1688         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1689     };
1690
1691     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1692     return 0;
1693 }
1694
1695 static int input_config_props(AVFilterLink *link)
1696 {
1697     FilterPriv *priv  = link->src->priv;
1698     AVStream *s = priv->is->video_st;
1699
1700     link->w = s->codec->width;
1701     link->h = s->codec->height;
1702     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1703         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1704     link->time_base = s->time_base;
1705
1706     return 0;
1707 }
1708
1709 static AVFilter input_filter =
1710 {
1711     .name      = "ffplay_input",
1712
1713     .priv_size = sizeof(FilterPriv),
1714
1715     .init      = input_init,
1716     .uninit    = input_uninit,
1717
1718     .query_formats = input_query_formats,
1719
1720     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1721     .outputs   = (AVFilterPad[]) {{ .name = "default",
1722                                     .type = AVMEDIA_TYPE_VIDEO,
1723                                     .request_frame = input_request_frame,
1724                                     .config_props  = input_config_props, },
1725                                   { .name = NULL }},
1726 };
1727
1728 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1729 {
1730     char sws_flags_str[128];
1731     int ret;
1732     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1733     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1734     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1735     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1736     graph->scale_sws_opts = av_strdup(sws_flags_str);
1737
1738     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1739                                             NULL, is, graph)) < 0)
1740         return ret;
1741 #if FF_API_OLD_VSINK_API
1742     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1743                                        NULL, pix_fmts, graph);
1744 #else
1745     buffersink_params->pixel_fmts = pix_fmts;
1746     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1747                                        NULL, buffersink_params, graph);
1748 #endif
1749     av_freep(&buffersink_params);
1750     if (ret < 0)
1751         return ret;
1752
1753     if(vfilters) {
1754         AVFilterInOut *outputs = avfilter_inout_alloc();
1755         AVFilterInOut *inputs  = avfilter_inout_alloc();
1756
1757         outputs->name    = av_strdup("in");
1758         outputs->filter_ctx = filt_src;
1759         outputs->pad_idx = 0;
1760         outputs->next    = NULL;
1761
1762         inputs->name    = av_strdup("out");
1763         inputs->filter_ctx = filt_out;
1764         inputs->pad_idx = 0;
1765         inputs->next    = NULL;
1766
1767         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1768             return ret;
1769     } else {
1770         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1771             return ret;
1772     }
1773
1774     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1775         return ret;
1776
1777     is->out_video_filter = filt_out;
1778
1779     return ret;
1780 }
1781
1782 #endif  /* CONFIG_AVFILTER */
1783
1784 static int video_thread(void *arg)
1785 {
1786     VideoState *is = arg;
1787     AVFrame *frame= avcodec_alloc_frame();
1788     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1789     double pts;
1790     int ret;
1791
1792 #if CONFIG_AVFILTER
1793     AVFilterGraph *graph = avfilter_graph_alloc();
1794     AVFilterContext *filt_out = NULL;
1795     int last_w = is->video_st->codec->width;
1796     int last_h = is->video_st->codec->height;
1797
1798     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1799         goto the_end;
1800     filt_out = is->out_video_filter;
1801 #endif
1802
1803     for(;;) {
1804 #if !CONFIG_AVFILTER
1805         AVPacket pkt;
1806 #else
1807         AVFilterBufferRef *picref;
1808         AVRational tb = filt_out->inputs[0]->time_base;
1809 #endif
1810         while (is->paused && !is->videoq.abort_request)
1811             SDL_Delay(10);
1812 #if CONFIG_AVFILTER
1813         if (   last_w != is->video_st->codec->width
1814             || last_h != is->video_st->codec->height) {
1815             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1816                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1817             avfilter_graph_free(&graph);
1818             graph = avfilter_graph_alloc();
1819             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1820                 goto the_end;
1821             filt_out = is->out_video_filter;
1822             last_w = is->video_st->codec->width;
1823             last_h = is->video_st->codec->height;
1824         }
1825         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1826         if (picref) {
1827             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1828             pts_int = picref->pts;
1829             pos     = picref->pos;
1830             frame->opaque = picref;
1831         }
1832
1833         if (av_cmp_q(tb, is->video_st->time_base)) {
1834             av_unused int64_t pts1 = pts_int;
1835             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1836             av_dlog(NULL, "video_thread(): "
1837                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1838                     tb.num, tb.den, pts1,
1839                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1840         }
1841 #else
1842         ret = get_video_frame(is, frame, &pts_int, &pkt);
1843         pos = pkt.pos;
1844         av_free_packet(&pkt);
1845 #endif
1846
1847         if (ret < 0) goto the_end;
1848
1849         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1850         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1851             is->frame_last_filter_delay = 0;
1852
1853 #if CONFIG_AVFILTER
1854         if (!picref)
1855             continue;
1856 #endif
1857
1858         pts = pts_int*av_q2d(is->video_st->time_base);
1859
1860         ret = queue_picture(is, frame, pts, pos);
1861
1862         if (ret < 0)
1863             goto the_end;
1864
1865         if (is->step)
1866             stream_toggle_pause(is);
1867     }
1868  the_end:
1869 #if CONFIG_AVFILTER
1870     avfilter_graph_free(&graph);
1871 #endif
1872     av_free(frame);
1873     return 0;
1874 }
1875
1876 static int subtitle_thread(void *arg)
1877 {
1878     VideoState *is = arg;
1879     SubPicture *sp;
1880     AVPacket pkt1, *pkt = &pkt1;
1881     int got_subtitle;
1882     double pts;
1883     int i, j;
1884     int r, g, b, y, u, v, a;
1885
1886     for(;;) {
1887         while (is->paused && !is->subtitleq.abort_request) {
1888             SDL_Delay(10);
1889         }
1890         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1891             break;
1892
1893         if(pkt->data == flush_pkt.data){
1894             avcodec_flush_buffers(is->subtitle_st->codec);
1895             continue;
1896         }
1897         SDL_LockMutex(is->subpq_mutex);
1898         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1899                !is->subtitleq.abort_request) {
1900             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1901         }
1902         SDL_UnlockMutex(is->subpq_mutex);
1903
1904         if (is->subtitleq.abort_request)
1905             return 0;
1906
1907         sp = &is->subpq[is->subpq_windex];
1908
1909        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1910            this packet, if any */
1911         pts = 0;
1912         if (pkt->pts != AV_NOPTS_VALUE)
1913             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1914
1915         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1916                                  &got_subtitle, pkt);
1917
1918         if (got_subtitle && sp->sub.format == 0) {
1919             sp->pts = pts;
1920
1921             for (i = 0; i < sp->sub.num_rects; i++)
1922             {
1923                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1924                 {
1925                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1926                     y = RGB_TO_Y_CCIR(r, g, b);
1927                     u = RGB_TO_U_CCIR(r, g, b, 0);
1928                     v = RGB_TO_V_CCIR(r, g, b, 0);
1929                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1930                 }
1931             }
1932
1933             /* now we can update the picture count */
1934             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1935                 is->subpq_windex = 0;
1936             SDL_LockMutex(is->subpq_mutex);
1937             is->subpq_size++;
1938             SDL_UnlockMutex(is->subpq_mutex);
1939         }
1940         av_free_packet(pkt);
1941     }
1942     return 0;
1943 }
1944
1945 /* copy samples for viewing in editor window */
1946 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1947 {
1948     int size, len;
1949
1950     size = samples_size / sizeof(short);
1951     while (size > 0) {
1952         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1953         if (len > size)
1954             len = size;
1955         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1956         samples += len;
1957         is->sample_array_index += len;
1958         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1959             is->sample_array_index = 0;
1960         size -= len;
1961     }
1962 }
1963
1964 /* return the new audio buffer size (samples can be added or deleted
1965    to get better sync if video or external master clock) */
1966 static int synchronize_audio(VideoState *is, short *samples,
1967                              int samples_size1, double pts)
1968 {
1969     int n, samples_size;
1970     double ref_clock;
1971
1972     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1973     samples_size = samples_size1;
1974
1975     /* if not master, then we try to remove or add samples to correct the clock */
1976     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1977          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1978         double diff, avg_diff;
1979         int wanted_size, min_size, max_size, nb_samples;
1980
1981         ref_clock = get_master_clock(is);
1982         diff = get_audio_clock(is) - ref_clock;
1983
1984         if (diff < AV_NOSYNC_THRESHOLD) {
1985             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1986             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1987                 /* not enough measures to have a correct estimate */
1988                 is->audio_diff_avg_count++;
1989             } else {
1990                 /* estimate the A-V difference */
1991                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1992
1993                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1994                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1995                     nb_samples = samples_size / n;
1996
1997                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1998                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1999                     if (wanted_size < min_size)
2000                         wanted_size = min_size;
2001                     else if (wanted_size > FFMIN3(max_size, samples_size, sizeof(is->audio_buf2)))
2002                         wanted_size = FFMIN3(max_size, samples_size, sizeof(is->audio_buf2));
2003
2004                     /* add or remove samples to correction the synchro */
2005                     if (wanted_size < samples_size) {
2006                         /* remove samples */
2007                         samples_size = wanted_size;
2008                     } else if (wanted_size > samples_size) {
2009                         uint8_t *samples_end, *q;
2010                         int nb;
2011
2012                         /* add samples */
2013                         nb = (samples_size - wanted_size);
2014                         samples_end = (uint8_t *)samples + samples_size - n;
2015                         q = samples_end + n;
2016                         while (nb > 0) {
2017                             memcpy(q, samples_end, n);
2018                             q += n;
2019                             nb -= n;
2020                         }
2021                         samples_size = wanted_size;
2022                     }
2023                 }
2024                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2025                         diff, avg_diff, samples_size - samples_size1,
2026                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2027             }
2028         } else {
2029             /* too big difference : may be initial PTS errors, so
2030                reset A-V filter */
2031             is->audio_diff_avg_count = 0;
2032             is->audio_diff_cum = 0;
2033         }
2034     }
2035
2036     return samples_size;
2037 }
2038
2039 /* decode one audio frame and returns its uncompressed size */
2040 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2041 {
2042     AVPacket *pkt_temp = &is->audio_pkt_temp;
2043     AVPacket *pkt = &is->audio_pkt;
2044     AVCodecContext *dec= is->audio_st->codec;
2045     int len1, len2, data_size, resampled_data_size;
2046     int64_t dec_channel_layout;
2047     int got_frame;
2048     double pts;
2049     int new_packet = 0;
2050     int flush_complete = 0;
2051
2052     for(;;) {
2053         /* NOTE: the audio packet can contain several frames */
2054         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2055             if (!is->frame) {
2056                 if (!(is->frame = avcodec_alloc_frame()))
2057                     return AVERROR(ENOMEM);
2058             } else
2059                 avcodec_get_frame_defaults(is->frame);
2060
2061             if (flush_complete)
2062                 break;
2063             new_packet = 0;
2064             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2065             if (len1 < 0) {
2066                 /* if error, we skip the frame */
2067                 pkt_temp->size = 0;
2068                 break;
2069             }
2070
2071             pkt_temp->data += len1;
2072             pkt_temp->size -= len1;
2073
2074             if (!got_frame) {
2075                 /* stop sending empty packets if the decoder is finished */
2076                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2077                     flush_complete = 1;
2078                 continue;
2079             }
2080             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2081                                                    is->frame->nb_samples,
2082                                                    dec->sample_fmt, 1);
2083
2084             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2085
2086             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2087                 if (is->swr_ctx)
2088                     swr_free(&is->swr_ctx);
2089                 is->swr_ctx = swr_alloc_set_opts(NULL,
2090                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2091                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2092                                                  0, NULL);
2093                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2094                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2095                         dec->sample_rate,
2096                         av_get_sample_fmt_name(dec->sample_fmt),
2097                         dec->channels,
2098                         is->audio_tgt_freq,
2099                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2100                         is->audio_tgt_channels);
2101                     break;
2102                 }
2103                 is->audio_src_channel_layout = dec_channel_layout;
2104                 is->audio_src_channels = dec->channels;
2105                 is->audio_src_freq = dec->sample_rate;
2106                 is->audio_src_fmt = dec->sample_fmt;
2107             }
2108
2109             resampled_data_size = data_size;
2110             if (is->swr_ctx) {
2111                 const uint8_t *in[] = { is->frame->data[0] };
2112                 uint8_t *out[] = {is->audio_buf2};
2113                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2114                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2115                 if (len2 < 0) {
2116                     fprintf(stderr, "audio_resample() failed\n");
2117                     break;
2118                 }
2119                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2120                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2121                     swr_init(is->swr_ctx);
2122                 }
2123                 is->audio_buf = is->audio_buf2;
2124                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2125             } else {
2126                 is->audio_buf = is->frame->data[0];
2127             }
2128
2129             /* if no pts, then compute it */
2130             pts = is->audio_clock;
2131             *pts_ptr = pts;
2132             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2133 #ifdef DEBUG
2134             {
2135                 static double last_clock;
2136                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2137                        is->audio_clock - last_clock,
2138                        is->audio_clock, pts);
2139                 last_clock = is->audio_clock;
2140             }
2141 #endif
2142             return resampled_data_size;
2143         }
2144
2145         /* free the current packet */
2146         if (pkt->data)
2147             av_free_packet(pkt);
2148         memset(pkt_temp, 0, sizeof(*pkt_temp));
2149
2150         if (is->paused || is->audioq.abort_request) {
2151             return -1;
2152         }
2153
2154         /* read next packet */
2155         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2156             return -1;
2157
2158         if (pkt->data == flush_pkt.data)
2159             avcodec_flush_buffers(dec);
2160
2161         *pkt_temp = *pkt;
2162
2163         /* if update the audio clock with the pts */
2164         if (pkt->pts != AV_NOPTS_VALUE) {
2165             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2166         }
2167     }
2168 }
2169
2170 /* prepare a new audio buffer */
2171 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2172 {
2173     VideoState *is = opaque;
2174     int audio_size, len1;
2175     int bytes_per_sec;
2176     double pts;
2177
2178     audio_callback_time = av_gettime();
2179
2180     while (len > 0) {
2181         if (is->audio_buf_index >= is->audio_buf_size) {
2182            audio_size = audio_decode_frame(is, &pts);
2183            if (audio_size < 0) {
2184                 /* if error, just output silence */
2185                is->audio_buf      = is->silence_buf;
2186                is->audio_buf_size = sizeof(is->silence_buf);
2187            } else {
2188                if (is->show_mode != SHOW_MODE_VIDEO)
2189                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2190                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2191                                               pts);
2192                is->audio_buf_size = audio_size;
2193            }
2194            is->audio_buf_index = 0;
2195         }
2196         len1 = is->audio_buf_size - is->audio_buf_index;
2197         if (len1 > len)
2198             len1 = len;
2199         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2200         len -= len1;
2201         stream += len1;
2202         is->audio_buf_index += len1;
2203     }
2204     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2205     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2206     /* Let's assume the audio driver that is used by SDL has two periods. */
2207     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2208     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2209 }
2210
2211 /* open a given stream. Return 0 if OK */
2212 static int stream_component_open(VideoState *is, int stream_index)
2213 {
2214     AVFormatContext *ic = is->ic;
2215     AVCodecContext *avctx;
2216     AVCodec *codec;
2217     SDL_AudioSpec wanted_spec, spec;
2218     AVDictionary *opts;
2219     AVDictionaryEntry *t = NULL;
2220     int64_t wanted_channel_layout = 0;
2221
2222     if (stream_index < 0 || stream_index >= ic->nb_streams)
2223         return -1;
2224     avctx = ic->streams[stream_index]->codec;
2225
2226     codec = avcodec_find_decoder(avctx->codec_id);
2227     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2228
2229     switch(avctx->codec_type){
2230         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2231         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2232         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2233     }
2234     if (!codec)
2235         return -1;
2236
2237     avctx->workaround_bugs = workaround_bugs;
2238     avctx->lowres = lowres;
2239     if(avctx->lowres > codec->max_lowres){
2240         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2241                 codec->max_lowres);
2242         avctx->lowres= codec->max_lowres;
2243     }
2244     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2245     avctx->idct_algo= idct;
2246     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2247     avctx->skip_frame= skip_frame;
2248     avctx->skip_idct= skip_idct;
2249     avctx->skip_loop_filter= skip_loop_filter;
2250     avctx->error_recognition= error_recognition;
2251     avctx->error_concealment= error_concealment;
2252
2253     if(codec->capabilities & CODEC_CAP_DR1)
2254         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2255
2256     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2257         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2258         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2259         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2260         wanted_spec.freq = avctx->sample_rate;
2261         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2262             fprintf(stderr, "Invalid sample rate or channel count!\n");
2263             return -1;
2264         }
2265     }
2266
2267     if (!codec ||
2268         avcodec_open2(avctx, codec, &opts) < 0)
2269         return -1;
2270     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2271         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2272         return AVERROR_OPTION_NOT_FOUND;
2273     }
2274
2275     /* prepare audio output */
2276     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2277         wanted_spec.format = AUDIO_S16SYS;
2278         wanted_spec.silence = 0;
2279         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2280         wanted_spec.callback = sdl_audio_callback;
2281         wanted_spec.userdata = is;
2282         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2283             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2284             return -1;
2285         }
2286         is->audio_hw_buf_size = spec.size;
2287         if (spec.format != AUDIO_S16SYS) {
2288             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2289             return -1;
2290         }
2291         if (spec.channels != wanted_spec.channels) {
2292             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2293             if (!wanted_channel_layout) {
2294                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2295                 return -1;
2296             }
2297         }
2298         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2299         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2300         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2301         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2302     }
2303
2304     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2305     switch(avctx->codec_type) {
2306     case AVMEDIA_TYPE_AUDIO:
2307         is->audio_stream = stream_index;
2308         is->audio_st = ic->streams[stream_index];
2309         is->audio_buf_size = 0;
2310         is->audio_buf_index = 0;
2311
2312         /* init averaging filter */
2313         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2314         is->audio_diff_avg_count = 0;
2315         /* since we do not have a precise anough audio fifo fullness,
2316            we correct audio sync only if larger than this threshold */
2317         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2318
2319         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2320         packet_queue_init(&is->audioq);
2321         SDL_PauseAudio(0);
2322         break;
2323     case AVMEDIA_TYPE_VIDEO:
2324         is->video_stream = stream_index;
2325         is->video_st = ic->streams[stream_index];
2326
2327         packet_queue_init(&is->videoq);
2328         is->video_tid = SDL_CreateThread(video_thread, is);
2329         break;
2330     case AVMEDIA_TYPE_SUBTITLE:
2331         is->subtitle_stream = stream_index;
2332         is->subtitle_st = ic->streams[stream_index];
2333         packet_queue_init(&is->subtitleq);
2334
2335         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2336         break;
2337     default:
2338         break;
2339     }
2340     return 0;
2341 }
2342
2343 static void stream_component_close(VideoState *is, int stream_index)
2344 {
2345     AVFormatContext *ic = is->ic;
2346     AVCodecContext *avctx;
2347
2348     if (stream_index < 0 || stream_index >= ic->nb_streams)
2349         return;
2350     avctx = ic->streams[stream_index]->codec;
2351
2352     switch(avctx->codec_type) {
2353     case AVMEDIA_TYPE_AUDIO:
2354         packet_queue_abort(&is->audioq);
2355
2356         SDL_CloseAudio();
2357
2358         packet_queue_end(&is->audioq);
2359         if (is->swr_ctx)
2360             swr_free(&is->swr_ctx);
2361         av_free_packet(&is->audio_pkt);
2362         av_freep(&is->audio_buf1);
2363         is->audio_buf = NULL;
2364         av_freep(&is->frame);
2365
2366         if (is->rdft) {
2367             av_rdft_end(is->rdft);
2368             av_freep(&is->rdft_data);
2369             is->rdft = NULL;
2370             is->rdft_bits = 0;
2371         }
2372         break;
2373     case AVMEDIA_TYPE_VIDEO:
2374         packet_queue_abort(&is->videoq);
2375
2376         /* note: we also signal this mutex to make sure we deblock the
2377            video thread in all cases */
2378         SDL_LockMutex(is->pictq_mutex);
2379         SDL_CondSignal(is->pictq_cond);
2380         SDL_UnlockMutex(is->pictq_mutex);
2381
2382         SDL_WaitThread(is->video_tid, NULL);
2383
2384         packet_queue_end(&is->videoq);
2385         break;
2386     case AVMEDIA_TYPE_SUBTITLE:
2387         packet_queue_abort(&is->subtitleq);
2388
2389         /* note: we also signal this mutex to make sure we deblock the
2390            video thread in all cases */
2391         SDL_LockMutex(is->subpq_mutex);
2392         is->subtitle_stream_changed = 1;
2393
2394         SDL_CondSignal(is->subpq_cond);
2395         SDL_UnlockMutex(is->subpq_mutex);
2396
2397         SDL_WaitThread(is->subtitle_tid, NULL);
2398
2399         packet_queue_end(&is->subtitleq);
2400         break;
2401     default:
2402         break;
2403     }
2404
2405     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2406     avcodec_close(avctx);
2407     switch(avctx->codec_type) {
2408     case AVMEDIA_TYPE_AUDIO:
2409         is->audio_st = NULL;
2410         is->audio_stream = -1;
2411         break;
2412     case AVMEDIA_TYPE_VIDEO:
2413         is->video_st = NULL;
2414         is->video_stream = -1;
2415         break;
2416     case AVMEDIA_TYPE_SUBTITLE:
2417         is->subtitle_st = NULL;
2418         is->subtitle_stream = -1;
2419         break;
2420     default:
2421         break;
2422     }
2423 }
2424
2425 /* since we have only one decoding thread, we can use a global
2426    variable instead of a thread local variable */
2427 static VideoState *global_video_state;
2428
2429 static int decode_interrupt_cb(void *ctx)
2430 {
2431     return (global_video_state && global_video_state->abort_request);
2432 }
2433
2434 /* this thread gets the stream from the disk or the network */
2435 static int read_thread(void *arg)
2436 {
2437     VideoState *is = arg;
2438     AVFormatContext *ic = NULL;
2439     int err, i, ret;
2440     int st_index[AVMEDIA_TYPE_NB];
2441     AVPacket pkt1, *pkt = &pkt1;
2442     int eof=0;
2443     int pkt_in_play_range = 0;
2444     AVDictionaryEntry *t;
2445     AVDictionary **opts;
2446     int orig_nb_streams;
2447
2448     memset(st_index, -1, sizeof(st_index));
2449     is->video_stream = -1;
2450     is->audio_stream = -1;
2451     is->subtitle_stream = -1;
2452
2453     global_video_state = is;
2454
2455     ic = avformat_alloc_context();
2456     ic->interrupt_callback.callback = decode_interrupt_cb;
2457     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2458     if (err < 0) {
2459         print_error(is->filename, err);
2460         ret = -1;
2461         goto fail;
2462     }
2463     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2464         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2465         ret = AVERROR_OPTION_NOT_FOUND;
2466         goto fail;
2467     }
2468     is->ic = ic;
2469
2470     if(genpts)
2471         ic->flags |= AVFMT_FLAG_GENPTS;
2472
2473     opts = setup_find_stream_info_opts(ic, codec_opts);
2474     orig_nb_streams = ic->nb_streams;
2475
2476     err = avformat_find_stream_info(ic, opts);
2477     if (err < 0) {
2478         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2479         ret = -1;
2480         goto fail;
2481     }
2482     for (i = 0; i < orig_nb_streams; i++)
2483         av_dict_free(&opts[i]);
2484     av_freep(&opts);
2485
2486     if(ic->pb)
2487         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2488
2489     if(seek_by_bytes<0)
2490         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2491
2492     /* if seeking requested, we execute it */
2493     if (start_time != AV_NOPTS_VALUE) {
2494         int64_t timestamp;
2495
2496         timestamp = start_time;
2497         /* add the stream start time */
2498         if (ic->start_time != AV_NOPTS_VALUE)
2499             timestamp += ic->start_time;
2500         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2501         if (ret < 0) {
2502             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2503                     is->filename, (double)timestamp / AV_TIME_BASE);
2504         }
2505     }
2506
2507     for (i = 0; i < ic->nb_streams; i++)
2508         ic->streams[i]->discard = AVDISCARD_ALL;
2509     if (!video_disable)
2510         st_index[AVMEDIA_TYPE_VIDEO] =
2511             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2512                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2513     if (!audio_disable)
2514         st_index[AVMEDIA_TYPE_AUDIO] =
2515             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2516                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2517                                 st_index[AVMEDIA_TYPE_VIDEO],
2518                                 NULL, 0);
2519     if (!video_disable)
2520         st_index[AVMEDIA_TYPE_SUBTITLE] =
2521             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2522                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2523                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2524                                  st_index[AVMEDIA_TYPE_AUDIO] :
2525                                  st_index[AVMEDIA_TYPE_VIDEO]),
2526                                 NULL, 0);
2527     if (show_status) {
2528         av_dump_format(ic, 0, is->filename, 0);
2529     }
2530
2531     is->show_mode = show_mode;
2532
2533     /* open the streams */
2534     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2535         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2536     }
2537
2538     ret=-1;
2539     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2540         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2541     }
2542     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2543     if (is->show_mode == SHOW_MODE_NONE)
2544         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2545
2546     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2547         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2548     }
2549
2550     if (is->video_stream < 0 && is->audio_stream < 0) {
2551         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2552         ret = -1;
2553         goto fail;
2554     }
2555
2556     for(;;) {
2557         if (is->abort_request)
2558             break;
2559         if (is->paused != is->last_paused) {
2560             is->last_paused = is->paused;
2561             if (is->paused)
2562                 is->read_pause_return= av_read_pause(ic);
2563             else
2564                 av_read_play(ic);
2565         }
2566 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2567         if (is->paused &&
2568                 (!strcmp(ic->iformat->name, "rtsp") ||
2569                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2570             /* wait 10 ms to avoid trying to get another packet */
2571             /* XXX: horrible */
2572             SDL_Delay(10);
2573             continue;
2574         }
2575 #endif
2576         if (is->seek_req) {
2577             int64_t seek_target= is->seek_pos;
2578             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2579             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2580 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2581 //      of the seek_pos/seek_rel variables
2582
2583             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2584             if (ret < 0) {
2585                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2586             }else{
2587                 if (is->audio_stream >= 0) {
2588                     packet_queue_flush(&is->audioq);
2589                     packet_queue_put(&is->audioq, &flush_pkt);
2590                 }
2591                 if (is->subtitle_stream >= 0) {
2592                     packet_queue_flush(&is->subtitleq);
2593                     packet_queue_put(&is->subtitleq, &flush_pkt);
2594                 }
2595                 if (is->video_stream >= 0) {
2596                     packet_queue_flush(&is->videoq);
2597                     packet_queue_put(&is->videoq, &flush_pkt);
2598                 }
2599             }
2600             is->seek_req = 0;
2601             eof= 0;
2602         }
2603
2604         /* if the queue are full, no need to read more */
2605         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2606             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2607                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2608                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2609             /* wait 10 ms */
2610             SDL_Delay(10);
2611             continue;
2612         }
2613         if(eof) {
2614             if(is->video_stream >= 0){
2615                 av_init_packet(pkt);
2616                 pkt->data=NULL;
2617                 pkt->size=0;
2618                 pkt->stream_index= is->video_stream;
2619                 packet_queue_put(&is->videoq, pkt);
2620             }
2621             if (is->audio_stream >= 0 &&
2622                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2623                 av_init_packet(pkt);
2624                 pkt->data = NULL;
2625                 pkt->size = 0;
2626                 pkt->stream_index = is->audio_stream;
2627                 packet_queue_put(&is->audioq, pkt);
2628             }
2629             SDL_Delay(10);
2630             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2631                 if(loop!=1 && (!loop || --loop)){
2632                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2633                 }else if(autoexit){
2634                     ret=AVERROR_EOF;
2635                     goto fail;
2636                 }
2637             }
2638             eof=0;
2639             continue;
2640         }
2641         ret = av_read_frame(ic, pkt);
2642         if (ret < 0) {
2643             if (ret == AVERROR_EOF || url_feof(ic->pb))
2644                 eof=1;
2645             if (ic->pb && ic->pb->error)
2646                 break;
2647             SDL_Delay(100); /* wait for user event */
2648             continue;
2649         }
2650         /* check if packet is in play range specified by user, then queue, otherwise discard */
2651         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2652                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2653                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2654                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2655                 <= ((double)duration/1000000);
2656         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2657             packet_queue_put(&is->audioq, pkt);
2658         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2659             packet_queue_put(&is->videoq, pkt);
2660         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2661             packet_queue_put(&is->subtitleq, pkt);
2662         } else {
2663             av_free_packet(pkt);
2664         }
2665     }
2666     /* wait until the end */
2667     while (!is->abort_request) {
2668         SDL_Delay(100);
2669     }
2670
2671     ret = 0;
2672  fail:
2673     /* disable interrupting */
2674     global_video_state = NULL;
2675
2676     /* close each stream */
2677     if (is->audio_stream >= 0)
2678         stream_component_close(is, is->audio_stream);
2679     if (is->video_stream >= 0)
2680         stream_component_close(is, is->video_stream);
2681     if (is->subtitle_stream >= 0)
2682         stream_component_close(is, is->subtitle_stream);
2683     if (is->ic) {
2684         avformat_close_input(&is->ic);
2685     }
2686     avio_set_interrupt_cb(NULL);
2687
2688     if (ret != 0) {
2689         SDL_Event event;
2690
2691         event.type = FF_QUIT_EVENT;
2692         event.user.data1 = is;
2693         SDL_PushEvent(&event);
2694     }
2695     return 0;
2696 }
2697
2698 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2699 {
2700     VideoState *is;
2701
2702     is = av_mallocz(sizeof(VideoState));
2703     if (!is)
2704         return NULL;
2705     av_strlcpy(is->filename, filename, sizeof(is->filename));
2706     is->iformat = iformat;
2707     is->ytop = 0;
2708     is->xleft = 0;
2709
2710     /* start video display */
2711     is->pictq_mutex = SDL_CreateMutex();
2712     is->pictq_cond = SDL_CreateCond();
2713
2714     is->subpq_mutex = SDL_CreateMutex();
2715     is->subpq_cond = SDL_CreateCond();
2716
2717     is->av_sync_type = av_sync_type;
2718     is->read_tid = SDL_CreateThread(read_thread, is);
2719     if (!is->read_tid) {
2720         av_free(is);
2721         return NULL;
2722     }
2723     return is;
2724 }
2725
2726 static void stream_cycle_channel(VideoState *is, int codec_type)
2727 {
2728     AVFormatContext *ic = is->ic;
2729     int start_index, stream_index;
2730     AVStream *st;
2731
2732     if (codec_type == AVMEDIA_TYPE_VIDEO)
2733         start_index = is->video_stream;
2734     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2735         start_index = is->audio_stream;
2736     else
2737         start_index = is->subtitle_stream;
2738     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2739         return;
2740     stream_index = start_index;
2741     for(;;) {
2742         if (++stream_index >= is->ic->nb_streams)
2743         {
2744             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2745             {
2746                 stream_index = -1;
2747                 goto the_end;
2748             } else
2749                 stream_index = 0;
2750         }
2751         if (stream_index == start_index)
2752             return;
2753         st = ic->streams[stream_index];
2754         if (st->codec->codec_type == codec_type) {
2755             /* check that parameters are OK */
2756             switch(codec_type) {
2757             case AVMEDIA_TYPE_AUDIO:
2758                 if (st->codec->sample_rate != 0 &&
2759                     st->codec->channels != 0)
2760                     goto the_end;
2761                 break;
2762             case AVMEDIA_TYPE_VIDEO:
2763             case AVMEDIA_TYPE_SUBTITLE:
2764                 goto the_end;
2765             default:
2766                 break;
2767             }
2768         }
2769     }
2770  the_end:
2771     stream_component_close(is, start_index);
2772     stream_component_open(is, stream_index);
2773 }
2774
2775
2776 static void toggle_full_screen(VideoState *is)
2777 {
2778     int i;
2779     is_full_screen = !is_full_screen;
2780 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2781     /* OS X needs to reallocate the SDL overlays */
2782     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2783         is->pictq[i].reallocate = 1;
2784     }
2785 #endif
2786     video_open(is, 1);
2787 }
2788
2789 static void toggle_pause(VideoState *is)
2790 {
2791     stream_toggle_pause(is);
2792     is->step = 0;
2793 }
2794
2795 static void step_to_next_frame(VideoState *is)
2796 {
2797     /* if the stream is paused unpause it, then step */
2798     if (is->paused)
2799         stream_toggle_pause(is);
2800     is->step = 1;
2801 }
2802
2803 static void toggle_audio_display(VideoState *is)
2804 {
2805     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2806     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2807     fill_rectangle(screen,
2808                 is->xleft, is->ytop, is->width, is->height,
2809                 bgcolor);
2810     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2811 }
2812
2813 /* handle an event sent by the GUI */
2814 static void event_loop(VideoState *cur_stream)
2815 {
2816     SDL_Event event;
2817     double incr, pos, frac;
2818
2819     for(;;) {
2820         double x;
2821         SDL_WaitEvent(&event);
2822         switch(event.type) {
2823         case SDL_KEYDOWN:
2824             if (exit_on_keydown) {
2825                 do_exit(cur_stream);
2826                 break;
2827             }
2828             switch(event.key.keysym.sym) {
2829             case SDLK_ESCAPE:
2830             case SDLK_q:
2831                 do_exit(cur_stream);
2832                 break;
2833             case SDLK_f:
2834                 toggle_full_screen(cur_stream);
2835                 break;
2836             case SDLK_p:
2837             case SDLK_SPACE:
2838                 toggle_pause(cur_stream);
2839                 break;
2840             case SDLK_s: //S: Step to next frame
2841                 step_to_next_frame(cur_stream);
2842                 break;
2843             case SDLK_a:
2844                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2845                 break;
2846             case SDLK_v:
2847                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2848                 break;
2849             case SDLK_t:
2850                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2851                 break;
2852             case SDLK_w:
2853                 toggle_audio_display(cur_stream);
2854                 break;
2855             case SDLK_PAGEUP:
2856                 incr = 600.0;
2857                 goto do_seek;
2858             case SDLK_PAGEDOWN:
2859                 incr = -600.0;
2860                 goto do_seek;
2861             case SDLK_LEFT:
2862                 incr = -10.0;
2863                 goto do_seek;
2864             case SDLK_RIGHT:
2865                 incr = 10.0;
2866                 goto do_seek;
2867             case SDLK_UP:
2868                 incr = 60.0;
2869                 goto do_seek;
2870             case SDLK_DOWN:
2871                 incr = -60.0;
2872             do_seek:
2873                 if (seek_by_bytes) {
2874                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2875                         pos= cur_stream->video_current_pos;
2876                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2877                         pos= cur_stream->audio_pkt.pos;
2878                     }else
2879                         pos = avio_tell(cur_stream->ic->pb);
2880                     if (cur_stream->ic->bit_rate)
2881                         incr *= cur_stream->ic->bit_rate / 8.0;
2882                     else
2883                         incr *= 180000.0;
2884                     pos += incr;
2885                     stream_seek(cur_stream, pos, incr, 1);
2886                 } else {
2887                     pos = get_master_clock(cur_stream);
2888                     pos += incr;
2889                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2890                 }
2891                 break;
2892             default:
2893                 break;
2894             }
2895             break;
2896         case SDL_MOUSEBUTTONDOWN:
2897             if (exit_on_mousedown) {
2898                 do_exit(cur_stream);
2899                 break;
2900             }
2901         case SDL_MOUSEMOTION:
2902             if(event.type ==SDL_MOUSEBUTTONDOWN){
2903                 x= event.button.x;
2904             }else{
2905                 if(event.motion.state != SDL_PRESSED)
2906                     break;
2907                 x= event.motion.x;
2908             }
2909             if(seek_by_bytes || cur_stream->ic->duration<=0){
2910                 uint64_t size=  avio_size(cur_stream->ic->pb);
2911                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2912             }else{
2913                 int64_t ts;
2914                 int ns, hh, mm, ss;
2915                 int tns, thh, tmm, tss;
2916                 tns = cur_stream->ic->duration/1000000LL;
2917                 thh = tns/3600;
2918                 tmm = (tns%3600)/60;
2919                 tss = (tns%60);
2920                 frac = x/cur_stream->width;
2921                 ns = frac*tns;
2922                 hh = ns/3600;
2923                 mm = (ns%3600)/60;
2924                 ss = (ns%60);
2925                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2926                         hh, mm, ss, thh, tmm, tss);
2927                 ts = frac*cur_stream->ic->duration;
2928                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2929                     ts += cur_stream->ic->start_time;
2930                 stream_seek(cur_stream, ts, 0, 0);
2931             }
2932             break;
2933         case SDL_VIDEORESIZE:
2934             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2935                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2936             screen_width = cur_stream->width = event.resize.w;
2937             screen_height= cur_stream->height= event.resize.h;
2938             break;
2939         case SDL_QUIT:
2940         case FF_QUIT_EVENT:
2941             do_exit(cur_stream);
2942             break;
2943         case FF_ALLOC_EVENT:
2944             video_open(event.user.data1, 0);
2945             alloc_picture(event.user.data1);
2946             break;
2947         case FF_REFRESH_EVENT:
2948             video_refresh(event.user.data1);
2949             cur_stream->refresh=0;
2950             break;
2951         default:
2952             break;
2953         }
2954     }
2955 }
2956
2957 static int opt_frame_size(const char *opt, const char *arg)
2958 {
2959     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2960     return opt_default("video_size", arg);
2961 }
2962
2963 static int opt_width(const char *opt, const char *arg)
2964 {
2965     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2966     return 0;
2967 }
2968
2969 static int opt_height(const char *opt, const char *arg)
2970 {
2971     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2972     return 0;
2973 }
2974
2975 static int opt_format(const char *opt, const char *arg)
2976 {
2977     file_iformat = av_find_input_format(arg);
2978     if (!file_iformat) {
2979         fprintf(stderr, "Unknown input format: %s\n", arg);
2980         return AVERROR(EINVAL);
2981     }
2982     return 0;
2983 }
2984
2985 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2986 {
2987     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2988     return opt_default("pixel_format", arg);
2989 }
2990
2991 static int opt_sync(const char *opt, const char *arg)
2992 {
2993     if (!strcmp(arg, "audio"))
2994         av_sync_type = AV_SYNC_AUDIO_MASTER;
2995     else if (!strcmp(arg, "video"))
2996         av_sync_type = AV_SYNC_VIDEO_MASTER;
2997     else if (!strcmp(arg, "ext"))
2998         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2999     else {
3000         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3001         exit(1);
3002     }
3003     return 0;
3004 }
3005
3006 static int opt_seek(const char *opt, const char *arg)
3007 {
3008     start_time = parse_time_or_die(opt, arg, 1);
3009     return 0;
3010 }
3011
3012 static int opt_duration(const char *opt, const char *arg)
3013 {
3014     duration = parse_time_or_die(opt, arg, 1);
3015     return 0;
3016 }
3017
3018 static int opt_show_mode(const char *opt, const char *arg)
3019 {
3020     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3021                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3022                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3023                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3024     return 0;
3025 }
3026
3027 static void opt_input_file(void *optctx, const char *filename)
3028 {
3029     if (input_filename) {
3030         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3031                 filename, input_filename);
3032         exit_program(1);
3033     }
3034     if (!strcmp(filename, "-"))
3035         filename = "pipe:";
3036     input_filename = filename;
3037 }
3038
3039 static int opt_codec(void *o, const char *opt, const char *arg)
3040 {
3041     switch(opt[strlen(opt)-1]){
3042     case 'a' :    audio_codec_name = arg; break;
3043     case 's' : subtitle_codec_name = arg; break;
3044     case 'v' :    video_codec_name = arg; break;
3045     }
3046     return 0;
3047 }
3048
3049 static int dummy;
3050
3051 static const OptionDef options[] = {
3052 #include "cmdutils_common_opts.h"
3053     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
3054     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
3055     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3056     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3057     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3058     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3059     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3060     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3061     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3062     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3063     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3064     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3065     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3066     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3067     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3068     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3069     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3070     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3071     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3072     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3073     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3074     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3075     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3076     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3077     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3078     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3079     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3080     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3081     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3082     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3083     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3084     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3085     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3086     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3087 #if CONFIG_AVFILTER
3088     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3089 #endif
3090     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3091     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3092     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3093     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3094     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3095     { NULL, },
3096 };
3097
3098 static void show_usage(void)
3099 {
3100     printf("Simple media player\n");
3101     printf("usage: %s [options] input_file\n", program_name);
3102     printf("\n");
3103 }
3104
3105 static int opt_help(const char *opt, const char *arg)
3106 {
3107     av_log_set_callback(log_callback_help);
3108     show_usage();
3109     show_help_options(options, "Main options:\n",
3110                       OPT_EXPERT, 0);
3111     show_help_options(options, "\nAdvanced options:\n",
3112                       OPT_EXPERT, OPT_EXPERT);
3113     printf("\n");
3114     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3115     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3116 #if !CONFIG_AVFILTER
3117     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3118 #endif
3119     printf("\nWhile playing:\n"
3120            "q, ESC              quit\n"
3121            "f                   toggle full screen\n"
3122            "p, SPC              pause\n"
3123            "a                   cycle audio channel\n"
3124            "v                   cycle video channel\n"
3125            "t                   cycle subtitle channel\n"
3126            "w                   show audio waves\n"
3127            "s                   activate frame-step mode\n"
3128            "left/right          seek backward/forward 10 seconds\n"
3129            "down/up             seek backward/forward 1 minute\n"
3130            "page down/page up   seek backward/forward 10 minutes\n"
3131            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3132            );
3133     return 0;
3134 }
3135
3136 static int lockmgr(void **mtx, enum AVLockOp op)
3137 {
3138    switch(op) {
3139       case AV_LOCK_CREATE:
3140           *mtx = SDL_CreateMutex();
3141           if(!*mtx)
3142               return 1;
3143           return 0;
3144       case AV_LOCK_OBTAIN:
3145           return !!SDL_LockMutex(*mtx);
3146       case AV_LOCK_RELEASE:
3147           return !!SDL_UnlockMutex(*mtx);
3148       case AV_LOCK_DESTROY:
3149           SDL_DestroyMutex(*mtx);
3150           return 0;
3151    }
3152    return 1;
3153 }
3154
3155 /* Called from the main */
3156 int main(int argc, char **argv)
3157 {
3158     int flags;
3159     VideoState *is;
3160
3161     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3162     parse_loglevel(argc, argv, options);
3163
3164     /* register all codecs, demux and protocols */
3165     avcodec_register_all();
3166 #if CONFIG_AVDEVICE
3167     avdevice_register_all();
3168 #endif
3169 #if CONFIG_AVFILTER
3170     avfilter_register_all();
3171 #endif
3172     av_register_all();
3173     avformat_network_init();
3174
3175     init_opts();
3176
3177     show_banner();
3178
3179     parse_options(NULL, argc, argv, options, opt_input_file);
3180
3181     if (!input_filename) {
3182         show_usage();
3183         fprintf(stderr, "An input file must be specified\n");
3184         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3185         exit(1);
3186     }
3187
3188     if (display_disable) {
3189         video_disable = 1;
3190     }
3191     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3192     if (audio_disable)
3193         flags &= ~SDL_INIT_AUDIO;
3194 #if !defined(__MINGW32__) && !defined(__APPLE__)
3195     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3196 #endif
3197     if (SDL_Init (flags)) {
3198         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3199         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3200         exit(1);
3201     }
3202
3203     if (!display_disable) {
3204 #if HAVE_SDL_VIDEO_SIZE
3205         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3206         fs_screen_width = vi->current_w;
3207         fs_screen_height = vi->current_h;
3208 #endif
3209     }
3210
3211     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3212     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3213     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3214
3215     if (av_lockmgr_register(lockmgr)) {
3216         fprintf(stderr, "Could not initialize lock manager!\n");
3217         do_exit(NULL);
3218     }
3219
3220     av_init_packet(&flush_pkt);
3221     flush_pkt.data= "FLUSH";
3222
3223     is = stream_open(input_filename, file_iformat);
3224     if (!is) {
3225         fprintf(stderr, "Failed to initialize VideoState!\n");
3226         do_exit(NULL);
3227     }
3228
3229     event_loop(is);
3230
3231     /* never returns */
3232
3233     return 0;
3234 }