]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'mans/dnxhd'
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double duration;                             ///<expected duration of the frame
100     int64_t pos;                                 ///<byte position in file
101     int skip;
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     int reallocate;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation, resampling and format conversion */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     enum AVSampleFormat audio_tgt_fmt;
166     int audio_src_channels;
167     int audio_tgt_channels;
168     int64_t audio_src_channel_layout;
169     int64_t audio_tgt_channel_layout;
170     int audio_src_freq;
171     int audio_tgt_freq;
172     struct SwrContext *swr_ctx;
173     double audio_current_pts;
174     double audio_current_pts_drift;
175
176     enum ShowMode {
177         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
178     } show_mode;
179     int16_t sample_array[SAMPLE_ARRAY_SIZE];
180     int sample_array_index;
181     int last_i_start;
182     RDFTContext *rdft;
183     int rdft_bits;
184     FFTSample *rdft_data;
185     int xpos;
186
187     SDL_Thread *subtitle_tid;
188     int subtitle_stream;
189     int subtitle_stream_changed;
190     AVStream *subtitle_st;
191     PacketQueue subtitleq;
192     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
193     int subpq_size, subpq_rindex, subpq_windex;
194     SDL_mutex *subpq_mutex;
195     SDL_cond *subpq_cond;
196
197     double frame_timer;
198     double frame_last_pts;
199     double frame_last_duration;
200     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
201     int video_stream;
202     AVStream *video_st;
203     PacketQueue videoq;
204     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
205     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
206     int64_t video_current_pos;                   ///<current displayed file pos
207     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
208     int pictq_size, pictq_rindex, pictq_windex;
209     SDL_mutex *pictq_mutex;
210     SDL_cond *pictq_cond;
211 #if !CONFIG_AVFILTER
212     struct SwsContext *img_convert_ctx;
213 #endif
214
215     char filename[1024];
216     int width, height, xleft, ytop;
217     int step;
218
219 #if CONFIG_AVFILTER
220     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
221 #endif
222
223     int refresh;
224 } VideoState;
225
226 static int opt_help(const char *opt, const char *arg);
227
228 /* options specified by the user */
229 static AVInputFormat *file_iformat;
230 static const char *input_filename;
231 static const char *window_title;
232 static int fs_screen_width;
233 static int fs_screen_height;
234 static int screen_width = 0;
235 static int screen_height = 0;
236 static int audio_disable;
237 static int video_disable;
238 static int wanted_stream[AVMEDIA_TYPE_NB]={
239     [AVMEDIA_TYPE_AUDIO]=-1,
240     [AVMEDIA_TYPE_VIDEO]=-1,
241     [AVMEDIA_TYPE_SUBTITLE]=-1,
242 };
243 static int seek_by_bytes=-1;
244 static int display_disable;
245 static int show_status = 1;
246 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
247 static int64_t start_time = AV_NOPTS_VALUE;
248 static int64_t duration = AV_NOPTS_VALUE;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int lowres = 0;
253 static int idct = FF_IDCT_AUTO;
254 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
257 static int error_recognition = FF_ER_CAREFUL;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts= -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop=1;
264 static int framedrop=-1;
265 static enum ShowMode show_mode = SHOW_MODE_NONE;
266 static const char *audio_codec_name;
267 static const char *subtitle_codec_name;
268 static const char *video_codec_name;
269
270 static int rdftspeed=20;
271 #if CONFIG_AVFILTER
272 static char *vfilters = NULL;
273 #endif
274
275 /* current context */
276 static int is_full_screen;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 void exit_program(int ret)
288 {
289     exit(ret);
290 }
291
292 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
293 {
294     AVPacketList *pkt1;
295
296     /* duplicate the packet */
297     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
298         return -1;
299
300     pkt1 = av_malloc(sizeof(AVPacketList));
301     if (!pkt1)
302         return -1;
303     pkt1->pkt = *pkt;
304     pkt1->next = NULL;
305
306
307     SDL_LockMutex(q->mutex);
308
309     if (!q->last_pkt)
310
311         q->first_pkt = pkt1;
312     else
313         q->last_pkt->next = pkt1;
314     q->last_pkt = pkt1;
315     q->nb_packets++;
316     q->size += pkt1->pkt.size + sizeof(*pkt1);
317     /* XXX: should duplicate packet data in DV case */
318     SDL_CondSignal(q->cond);
319
320     SDL_UnlockMutex(q->mutex);
321     return 0;
322 }
323
324 /* packet queue handling */
325 static void packet_queue_init(PacketQueue *q)
326 {
327     memset(q, 0, sizeof(PacketQueue));
328     q->mutex = SDL_CreateMutex();
329     q->cond = SDL_CreateCond();
330     packet_queue_put(q, &flush_pkt);
331 }
332
333 static void packet_queue_flush(PacketQueue *q)
334 {
335     AVPacketList *pkt, *pkt1;
336
337     SDL_LockMutex(q->mutex);
338     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
339         pkt1 = pkt->next;
340         av_free_packet(&pkt->pkt);
341         av_freep(&pkt);
342     }
343     q->last_pkt = NULL;
344     q->first_pkt = NULL;
345     q->nb_packets = 0;
346     q->size = 0;
347     SDL_UnlockMutex(q->mutex);
348 }
349
350 static void packet_queue_end(PacketQueue *q)
351 {
352     packet_queue_flush(q);
353     SDL_DestroyMutex(q->mutex);
354     SDL_DestroyCond(q->cond);
355 }
356
357 static void packet_queue_abort(PacketQueue *q)
358 {
359     SDL_LockMutex(q->mutex);
360
361     q->abort_request = 1;
362
363     SDL_CondSignal(q->cond);
364
365     SDL_UnlockMutex(q->mutex);
366 }
367
368 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
369 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
370 {
371     AVPacketList *pkt1;
372     int ret;
373
374     SDL_LockMutex(q->mutex);
375
376     for(;;) {
377         if (q->abort_request) {
378             ret = -1;
379             break;
380         }
381
382         pkt1 = q->first_pkt;
383         if (pkt1) {
384             q->first_pkt = pkt1->next;
385             if (!q->first_pkt)
386                 q->last_pkt = NULL;
387             q->nb_packets--;
388             q->size -= pkt1->pkt.size + sizeof(*pkt1);
389             *pkt = pkt1->pkt;
390             av_free(pkt1);
391             ret = 1;
392             break;
393         } else if (!block) {
394             ret = 0;
395             break;
396         } else {
397             SDL_CondWait(q->cond, q->mutex);
398         }
399     }
400     SDL_UnlockMutex(q->mutex);
401     return ret;
402 }
403
404 static inline void fill_rectangle(SDL_Surface *screen,
405                                   int x, int y, int w, int h, int color)
406 {
407     SDL_Rect rect;
408     rect.x = x;
409     rect.y = y;
410     rect.w = w;
411     rect.h = h;
412     SDL_FillRect(screen, &rect, color);
413 }
414
415 #define ALPHA_BLEND(a, oldp, newp, s)\
416 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
417
418 #define RGBA_IN(r, g, b, a, s)\
419 {\
420     unsigned int v = ((const uint32_t *)(s))[0];\
421     a = (v >> 24) & 0xff;\
422     r = (v >> 16) & 0xff;\
423     g = (v >> 8) & 0xff;\
424     b = v & 0xff;\
425 }
426
427 #define YUVA_IN(y, u, v, a, s, pal)\
428 {\
429     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
430     a = (val >> 24) & 0xff;\
431     y = (val >> 16) & 0xff;\
432     u = (val >> 8) & 0xff;\
433     v = val & 0xff;\
434 }
435
436 #define YUVA_OUT(d, y, u, v, a)\
437 {\
438     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
439 }
440
441
442 #define BPP 1
443
444 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
445 {
446     int wrap, wrap3, width2, skip2;
447     int y, u, v, a, u1, v1, a1, w, h;
448     uint8_t *lum, *cb, *cr;
449     const uint8_t *p;
450     const uint32_t *pal;
451     int dstx, dsty, dstw, dsth;
452
453     dstw = av_clip(rect->w, 0, imgw);
454     dsth = av_clip(rect->h, 0, imgh);
455     dstx = av_clip(rect->x, 0, imgw - dstw);
456     dsty = av_clip(rect->y, 0, imgh - dsth);
457     lum = dst->data[0] + dsty * dst->linesize[0];
458     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
459     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
460
461     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
462     skip2 = dstx >> 1;
463     wrap = dst->linesize[0];
464     wrap3 = rect->pict.linesize[0];
465     p = rect->pict.data[0];
466     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
467
468     if (dsty & 1) {
469         lum += dstx;
470         cb += skip2;
471         cr += skip2;
472
473         if (dstx & 1) {
474             YUVA_IN(y, u, v, a, p, pal);
475             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
476             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
477             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
478             cb++;
479             cr++;
480             lum++;
481             p += BPP;
482         }
483         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
484             YUVA_IN(y, u, v, a, p, pal);
485             u1 = u;
486             v1 = v;
487             a1 = a;
488             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
489
490             YUVA_IN(y, u, v, a, p + BPP, pal);
491             u1 += u;
492             v1 += v;
493             a1 += a;
494             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
495             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
496             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
497             cb++;
498             cr++;
499             p += 2 * BPP;
500             lum += 2;
501         }
502         if (w) {
503             YUVA_IN(y, u, v, a, p, pal);
504             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
505             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
506             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
507             p++;
508             lum++;
509         }
510         p += wrap3 - dstw * BPP;
511         lum += wrap - dstw - dstx;
512         cb += dst->linesize[1] - width2 - skip2;
513         cr += dst->linesize[2] - width2 - skip2;
514     }
515     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
516         lum += dstx;
517         cb += skip2;
518         cr += skip2;
519
520         if (dstx & 1) {
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 = u;
523             v1 = v;
524             a1 = a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526             p += wrap3;
527             lum += wrap;
528             YUVA_IN(y, u, v, a, p, pal);
529             u1 += u;
530             v1 += v;
531             a1 += a;
532             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
534             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
535             cb++;
536             cr++;
537             p += -wrap3 + BPP;
538             lum += -wrap + 1;
539         }
540         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
541             YUVA_IN(y, u, v, a, p, pal);
542             u1 = u;
543             v1 = v;
544             a1 = a;
545             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
547             YUVA_IN(y, u, v, a, p + BPP, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
552             p += wrap3;
553             lum += wrap;
554
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
561             YUVA_IN(y, u, v, a, p + BPP, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
566
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
569
570             cb++;
571             cr++;
572             p += -wrap3 + 2 * BPP;
573             lum += -wrap + 2;
574         }
575         if (w) {
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 = u;
578             v1 = v;
579             a1 = a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581             p += wrap3;
582             lum += wrap;
583             YUVA_IN(y, u, v, a, p, pal);
584             u1 += u;
585             v1 += v;
586             a1 += a;
587             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
588             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
589             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
590             cb++;
591             cr++;
592             p += -wrap3 + BPP;
593             lum += -wrap + 1;
594         }
595         p += wrap3 + (wrap3 - dstw * BPP);
596         lum += wrap + (wrap - dstw - dstx);
597         cb += dst->linesize[1] - width2 - skip2;
598         cr += dst->linesize[2] - width2 - skip2;
599     }
600     /* handle odd height */
601     if (h) {
602         lum += dstx;
603         cb += skip2;
604         cr += skip2;
605
606         if (dstx & 1) {
607             YUVA_IN(y, u, v, a, p, pal);
608             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
609             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
610             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
611             cb++;
612             cr++;
613             lum++;
614             p += BPP;
615         }
616         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 = u;
619             v1 = v;
620             a1 = a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622
623             YUVA_IN(y, u, v, a, p + BPP, pal);
624             u1 += u;
625             v1 += v;
626             a1 += a;
627             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
628             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
629             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
630             cb++;
631             cr++;
632             p += 2 * BPP;
633             lum += 2;
634         }
635         if (w) {
636             YUVA_IN(y, u, v, a, p, pal);
637             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
638             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
639             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
640         }
641     }
642 }
643
644 static void free_subpicture(SubPicture *sp)
645 {
646     avsubtitle_free(&sp->sub);
647 }
648
649 static void video_image_display(VideoState *is)
650 {
651     VideoPicture *vp;
652     SubPicture *sp;
653     AVPicture pict;
654     float aspect_ratio;
655     int width, height, x, y;
656     SDL_Rect rect;
657     int i;
658
659     vp = &is->pictq[is->pictq_rindex];
660     if (vp->bmp) {
661 #if CONFIG_AVFILTER
662          if (vp->picref->video->sample_aspect_ratio.num == 0)
663              aspect_ratio = 0;
664          else
665              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
666 #else
667
668         /* XXX: use variable in the frame */
669         if (is->video_st->sample_aspect_ratio.num)
670             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
671         else if (is->video_st->codec->sample_aspect_ratio.num)
672             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
673         else
674             aspect_ratio = 0;
675 #endif
676         if (aspect_ratio <= 0.0)
677             aspect_ratio = 1.0;
678         aspect_ratio *= (float)vp->width / (float)vp->height;
679
680         if (is->subtitle_st) {
681             if (is->subpq_size > 0) {
682                 sp = &is->subpq[is->subpq_rindex];
683
684                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
685                     SDL_LockYUVOverlay (vp->bmp);
686
687                     pict.data[0] = vp->bmp->pixels[0];
688                     pict.data[1] = vp->bmp->pixels[2];
689                     pict.data[2] = vp->bmp->pixels[1];
690
691                     pict.linesize[0] = vp->bmp->pitches[0];
692                     pict.linesize[1] = vp->bmp->pitches[2];
693                     pict.linesize[2] = vp->bmp->pitches[1];
694
695                     for (i = 0; i < sp->sub.num_rects; i++)
696                         blend_subrect(&pict, sp->sub.rects[i],
697                                       vp->bmp->w, vp->bmp->h);
698
699                     SDL_UnlockYUVOverlay (vp->bmp);
700                 }
701             }
702         }
703
704
705         /* XXX: we suppose the screen has a 1.0 pixel ratio */
706         height = is->height;
707         width = ((int)rint(height * aspect_ratio)) & ~1;
708         if (width > is->width) {
709             width = is->width;
710             height = ((int)rint(width / aspect_ratio)) & ~1;
711         }
712         x = (is->width - width) / 2;
713         y = (is->height - height) / 2;
714         is->no_background = 0;
715         rect.x = is->xleft + x;
716         rect.y = is->ytop  + y;
717         rect.w = FFMAX(width,  1);
718         rect.h = FFMAX(height, 1);
719         SDL_DisplayYUVOverlay(vp->bmp, &rect);
720     }
721 }
722
723 static inline int compute_mod(int a, int b)
724 {
725     return a < 0 ? a%b + b : a%b;
726 }
727
728 static void video_audio_display(VideoState *s)
729 {
730     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
731     int ch, channels, h, h2, bgcolor, fgcolor;
732     int16_t time_diff;
733     int rdft_bits, nb_freq;
734
735     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
736         ;
737     nb_freq= 1<<(rdft_bits-1);
738
739     /* compute display index : center on currently output samples */
740     channels = s->audio_tgt_channels;
741     nb_display_channels = channels;
742     if (!s->paused) {
743         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
744         n = 2 * channels;
745         delay = s->audio_write_buf_size;
746         delay /= n;
747
748         /* to be more precise, we take into account the time spent since
749            the last buffer computation */
750         if (audio_callback_time) {
751             time_diff = av_gettime() - audio_callback_time;
752             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
753         }
754
755         delay += 2*data_used;
756         if (delay < data_used)
757             delay = data_used;
758
759         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
760         if (s->show_mode == SHOW_MODE_WAVES) {
761             h= INT_MIN;
762             for(i=0; i<1000; i+=channels){
763                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
764                 int a= s->sample_array[idx];
765                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
766                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
767                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
768                 int score= a-d;
769                 if(h<score && (b^c)<0){
770                     h= score;
771                     i_start= idx;
772                 }
773             }
774         }
775
776         s->last_i_start = i_start;
777     } else {
778         i_start = s->last_i_start;
779     }
780
781     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
782     if (s->show_mode == SHOW_MODE_WAVES) {
783         fill_rectangle(screen,
784                        s->xleft, s->ytop, s->width, s->height,
785                        bgcolor);
786
787         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
788
789         /* total height for one channel */
790         h = s->height / nb_display_channels;
791         /* graph height / 2 */
792         h2 = (h * 9) / 20;
793         for(ch = 0;ch < nb_display_channels; ch++) {
794             i = i_start + ch;
795             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
796             for(x = 0; x < s->width; x++) {
797                 y = (s->sample_array[i] * h2) >> 15;
798                 if (y < 0) {
799                     y = -y;
800                     ys = y1 - y;
801                 } else {
802                     ys = y1;
803                 }
804                 fill_rectangle(screen,
805                                s->xleft + x, ys, 1, y,
806                                fgcolor);
807                 i += channels;
808                 if (i >= SAMPLE_ARRAY_SIZE)
809                     i -= SAMPLE_ARRAY_SIZE;
810             }
811         }
812
813         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
814
815         for(ch = 1;ch < nb_display_channels; ch++) {
816             y = s->ytop + ch * h;
817             fill_rectangle(screen,
818                            s->xleft, y, s->width, 1,
819                            fgcolor);
820         }
821         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
822     }else{
823         nb_display_channels= FFMIN(nb_display_channels, 2);
824         if(rdft_bits != s->rdft_bits){
825             av_rdft_end(s->rdft);
826             av_free(s->rdft_data);
827             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
828             s->rdft_bits= rdft_bits;
829             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
830         }
831         {
832             FFTSample *data[2];
833             for(ch = 0;ch < nb_display_channels; ch++) {
834                 data[ch] = s->rdft_data + 2*nb_freq*ch;
835                 i = i_start + ch;
836                 for(x = 0; x < 2*nb_freq; x++) {
837                     double w= (x-nb_freq)*(1.0/nb_freq);
838                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
839                     i += channels;
840                     if (i >= SAMPLE_ARRAY_SIZE)
841                         i -= SAMPLE_ARRAY_SIZE;
842                 }
843                 av_rdft_calc(s->rdft, data[ch]);
844             }
845             //least efficient way to do this, we should of course directly access it but its more than fast enough
846             for(y=0; y<s->height; y++){
847                 double w= 1/sqrt(nb_freq);
848                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
849                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
850                        + data[1][2*y+1]*data[1][2*y+1])) : a;
851                 a= FFMIN(a,255);
852                 b= FFMIN(b,255);
853                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
854
855                 fill_rectangle(screen,
856                             s->xpos, s->height-y, 1, 1,
857                             fgcolor);
858             }
859         }
860         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
861         s->xpos++;
862         if(s->xpos >= s->width)
863             s->xpos= s->xleft;
864     }
865 }
866
867 static void stream_close(VideoState *is)
868 {
869     VideoPicture *vp;
870     int i;
871     /* XXX: use a special url_shutdown call to abort parse cleanly */
872     is->abort_request = 1;
873     SDL_WaitThread(is->read_tid, NULL);
874     SDL_WaitThread(is->refresh_tid, NULL);
875
876     /* free all pictures */
877     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
878         vp = &is->pictq[i];
879 #if CONFIG_AVFILTER
880         if (vp->picref) {
881             avfilter_unref_buffer(vp->picref);
882             vp->picref = NULL;
883         }
884 #endif
885         if (vp->bmp) {
886             SDL_FreeYUVOverlay(vp->bmp);
887             vp->bmp = NULL;
888         }
889     }
890     SDL_DestroyMutex(is->pictq_mutex);
891     SDL_DestroyCond(is->pictq_cond);
892     SDL_DestroyMutex(is->subpq_mutex);
893     SDL_DestroyCond(is->subpq_cond);
894 #if !CONFIG_AVFILTER
895     if (is->img_convert_ctx)
896         sws_freeContext(is->img_convert_ctx);
897 #endif
898     av_free(is);
899 }
900
901 static void do_exit(VideoState *is)
902 {
903     if (is) {
904         stream_close(is);
905     }
906     av_lockmgr_register(NULL);
907     uninit_opts();
908 #if CONFIG_AVFILTER
909     avfilter_uninit();
910 #endif
911     if (show_status)
912         printf("\n");
913     SDL_Quit();
914     av_log(NULL, AV_LOG_QUIET, "%s", "");
915     exit(0);
916 }
917
918 static int video_open(VideoState *is){
919     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
920     int w,h;
921
922     if(is_full_screen) flags |= SDL_FULLSCREEN;
923     else               flags |= SDL_RESIZABLE;
924
925     if (is_full_screen && fs_screen_width) {
926         w = fs_screen_width;
927         h = fs_screen_height;
928     } else if(!is_full_screen && screen_width){
929         w = screen_width;
930         h = screen_height;
931 #if CONFIG_AVFILTER
932     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
933         w = is->out_video_filter->inputs[0]->w;
934         h = is->out_video_filter->inputs[0]->h;
935 #else
936     }else if (is->video_st && is->video_st->codec->width){
937         w = is->video_st->codec->width;
938         h = is->video_st->codec->height;
939 #endif
940     } else {
941         w = 640;
942         h = 480;
943     }
944     if(screen && is->width == screen->w && screen->w == w
945        && is->height== screen->h && screen->h == h)
946         return 0;
947     screen = SDL_SetVideoMode(w, h, 0, flags);
948     if (!screen) {
949         fprintf(stderr, "SDL: could not set video mode - exiting\n");
950         do_exit(is);
951     }
952     if (!window_title)
953         window_title = input_filename;
954     SDL_WM_SetCaption(window_title, window_title);
955
956     is->width = screen->w;
957     is->height = screen->h;
958
959     return 0;
960 }
961
962 /* display the current picture, if any */
963 static void video_display(VideoState *is)
964 {
965     if(!screen)
966         video_open(is);
967     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
968         video_audio_display(is);
969     else if (is->video_st)
970         video_image_display(is);
971 }
972
973 static int refresh_thread(void *opaque)
974 {
975     VideoState *is= opaque;
976     while(!is->abort_request){
977         SDL_Event event;
978         event.type = FF_REFRESH_EVENT;
979         event.user.data1 = opaque;
980         if(!is->refresh){
981             is->refresh=1;
982             SDL_PushEvent(&event);
983         }
984         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
985         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
986     }
987     return 0;
988 }
989
990 /* get the current audio clock value */
991 static double get_audio_clock(VideoState *is)
992 {
993     if (is->paused) {
994         return is->audio_current_pts;
995     } else {
996         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
997     }
998 }
999
1000 /* get the current video clock value */
1001 static double get_video_clock(VideoState *is)
1002 {
1003     if (is->paused) {
1004         return is->video_current_pts;
1005     } else {
1006         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1007     }
1008 }
1009
1010 /* get the current external clock value */
1011 static double get_external_clock(VideoState *is)
1012 {
1013     int64_t ti;
1014     ti = av_gettime();
1015     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1016 }
1017
1018 /* get the current master clock value */
1019 static double get_master_clock(VideoState *is)
1020 {
1021     double val;
1022
1023     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1024         if (is->video_st)
1025             val = get_video_clock(is);
1026         else
1027             val = get_audio_clock(is);
1028     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1029         if (is->audio_st)
1030             val = get_audio_clock(is);
1031         else
1032             val = get_video_clock(is);
1033     } else {
1034         val = get_external_clock(is);
1035     }
1036     return val;
1037 }
1038
1039 /* seek in the stream */
1040 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1041 {
1042     if (!is->seek_req) {
1043         is->seek_pos = pos;
1044         is->seek_rel = rel;
1045         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1046         if (seek_by_bytes)
1047             is->seek_flags |= AVSEEK_FLAG_BYTE;
1048         is->seek_req = 1;
1049     }
1050 }
1051
1052 /* pause or resume the video */
1053 static void stream_toggle_pause(VideoState *is)
1054 {
1055     if (is->paused) {
1056         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1057         if(is->read_pause_return != AVERROR(ENOSYS)){
1058             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1059         }
1060         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1061     }
1062     is->paused = !is->paused;
1063 }
1064
1065 static double compute_target_delay(double delay, VideoState *is)
1066 {
1067     double sync_threshold, diff;
1068
1069     /* update delay to follow master synchronisation source */
1070     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1071          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1072         /* if video is slave, we try to correct big delays by
1073            duplicating or deleting a frame */
1074         diff = get_video_clock(is) - get_master_clock(is);
1075
1076         /* skip or repeat frame. We take into account the
1077            delay to compute the threshold. I still don't know
1078            if it is the best guess */
1079         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1080         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1081             if (diff <= -sync_threshold)
1082                 delay = 0;
1083             else if (diff >= sync_threshold)
1084                 delay = 2 * delay;
1085         }
1086     }
1087
1088     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1089             delay, -diff);
1090
1091     return delay;
1092 }
1093
1094 static void pictq_next_picture(VideoState *is) {
1095     /* update queue size and signal for next picture */
1096     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1097         is->pictq_rindex = 0;
1098
1099     SDL_LockMutex(is->pictq_mutex);
1100     is->pictq_size--;
1101     SDL_CondSignal(is->pictq_cond);
1102     SDL_UnlockMutex(is->pictq_mutex);
1103 }
1104
1105 /* called to display each frame */
1106 static void video_refresh(void *opaque)
1107 {
1108     VideoState *is = opaque;
1109     VideoPicture *vp;
1110
1111     SubPicture *sp, *sp2;
1112
1113     if (is->video_st) {
1114 retry:
1115         if (is->pictq_size == 0) {
1116             //nothing to do, no picture to display in the que
1117         } else {
1118             double time= av_gettime()/1000000.0;
1119             double last_duration, duration, delay;
1120             /* dequeue the picture */
1121             vp = &is->pictq[is->pictq_rindex];
1122
1123             if (vp->skip) {
1124                 pictq_next_picture(is);
1125                 goto retry;
1126             }
1127
1128             /* compute nominal last_duration */
1129             last_duration = vp->pts - is->frame_last_pts;
1130             if (last_duration > 0 && last_duration < 10.0) {
1131                 /* if duration of the last frame was sane, update last_duration in video state */
1132                 is->frame_last_duration = last_duration;
1133             }
1134             delay = compute_target_delay(is->frame_last_duration, is);
1135
1136             if(time < is->frame_timer + delay)
1137                 return;
1138
1139             is->frame_last_pts = vp->pts;
1140             if (delay > 0)
1141                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1142
1143             /* update current video pts */
1144             is->video_current_pts = vp->pts;
1145             is->video_current_pts_drift = is->video_current_pts - time;
1146             is->video_current_pos = vp->pos;
1147
1148             if(is->pictq_size > 1) {
1149                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1150                  duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1151             } else {
1152                  duration = vp->duration;
1153             }
1154
1155             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1156                 if(is->pictq_size > 1){
1157                     pictq_next_picture(is);
1158                     goto retry;
1159                 }
1160             }
1161
1162             if(is->subtitle_st) {
1163                 if (is->subtitle_stream_changed) {
1164                     SDL_LockMutex(is->subpq_mutex);
1165
1166                     while (is->subpq_size) {
1167                         free_subpicture(&is->subpq[is->subpq_rindex]);
1168
1169                         /* update queue size and signal for next picture */
1170                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1171                             is->subpq_rindex = 0;
1172
1173                         is->subpq_size--;
1174                     }
1175                     is->subtitle_stream_changed = 0;
1176
1177                     SDL_CondSignal(is->subpq_cond);
1178                     SDL_UnlockMutex(is->subpq_mutex);
1179                 } else {
1180                     if (is->subpq_size > 0) {
1181                         sp = &is->subpq[is->subpq_rindex];
1182
1183                         if (is->subpq_size > 1)
1184                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1185                         else
1186                             sp2 = NULL;
1187
1188                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1189                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1190                         {
1191                             free_subpicture(sp);
1192
1193                             /* update queue size and signal for next picture */
1194                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1195                                 is->subpq_rindex = 0;
1196
1197                             SDL_LockMutex(is->subpq_mutex);
1198                             is->subpq_size--;
1199                             SDL_CondSignal(is->subpq_cond);
1200                             SDL_UnlockMutex(is->subpq_mutex);
1201                         }
1202                     }
1203                 }
1204             }
1205
1206             /* display picture */
1207             if (!display_disable)
1208                 video_display(is);
1209
1210             pictq_next_picture(is);
1211         }
1212     } else if (is->audio_st) {
1213         /* draw the next audio frame */
1214
1215         /* if only audio stream, then display the audio bars (better
1216            than nothing, just to test the implementation */
1217
1218         /* display picture */
1219         if (!display_disable)
1220             video_display(is);
1221     }
1222     if (show_status) {
1223         static int64_t last_time;
1224         int64_t cur_time;
1225         int aqsize, vqsize, sqsize;
1226         double av_diff;
1227
1228         cur_time = av_gettime();
1229         if (!last_time || (cur_time - last_time) >= 30000) {
1230             aqsize = 0;
1231             vqsize = 0;
1232             sqsize = 0;
1233             if (is->audio_st)
1234                 aqsize = is->audioq.size;
1235             if (is->video_st)
1236                 vqsize = is->videoq.size;
1237             if (is->subtitle_st)
1238                 sqsize = is->subtitleq.size;
1239             av_diff = 0;
1240             if (is->audio_st && is->video_st)
1241                 av_diff = get_audio_clock(is) - get_video_clock(is);
1242             printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1243                    get_master_clock(is),
1244                    av_diff,
1245                    aqsize / 1024,
1246                    vqsize / 1024,
1247                    sqsize,
1248                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1249                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1250             fflush(stdout);
1251             last_time = cur_time;
1252         }
1253     }
1254 }
1255
1256 /* allocate a picture (needs to do that in main thread to avoid
1257    potential locking problems */
1258 static void alloc_picture(void *opaque)
1259 {
1260     VideoState *is = opaque;
1261     VideoPicture *vp;
1262
1263     vp = &is->pictq[is->pictq_windex];
1264
1265     if (vp->bmp)
1266         SDL_FreeYUVOverlay(vp->bmp);
1267
1268 #if CONFIG_AVFILTER
1269     if (vp->picref)
1270         avfilter_unref_buffer(vp->picref);
1271     vp->picref = NULL;
1272
1273     vp->width   = is->out_video_filter->inputs[0]->w;
1274     vp->height  = is->out_video_filter->inputs[0]->h;
1275     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1276 #else
1277     vp->width   = is->video_st->codec->width;
1278     vp->height  = is->video_st->codec->height;
1279     vp->pix_fmt = is->video_st->codec->pix_fmt;
1280 #endif
1281
1282     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1283                                    SDL_YV12_OVERLAY,
1284                                    screen);
1285     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1286         /* SDL allocates a buffer smaller than requested if the video
1287          * overlay hardware is unable to support the requested size. */
1288         fprintf(stderr, "Error: the video system does not support an image\n"
1289                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1290                         "to reduce the image size.\n", vp->width, vp->height );
1291         do_exit(is);
1292     }
1293
1294     SDL_LockMutex(is->pictq_mutex);
1295     vp->allocated = 1;
1296     SDL_CondSignal(is->pictq_cond);
1297     SDL_UnlockMutex(is->pictq_mutex);
1298 }
1299
1300 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1301 {
1302     VideoPicture *vp;
1303     double frame_delay, pts = pts1;
1304
1305     /* compute the exact PTS for the picture if it is omitted in the stream
1306      * pts1 is the dts of the pkt / pts of the frame */
1307     if (pts != 0) {
1308         /* update video clock with pts, if present */
1309         is->video_clock = pts;
1310     } else {
1311         pts = is->video_clock;
1312     }
1313     /* update video clock for next frame */
1314     frame_delay = av_q2d(is->video_st->codec->time_base);
1315     /* for MPEG2, the frame can be repeated, so we update the
1316        clock accordingly */
1317     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1318     is->video_clock += frame_delay;
1319
1320 #if defined(DEBUG_SYNC) && 0
1321     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1322            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1323 #endif
1324
1325     /* wait until we have space to put a new picture */
1326     SDL_LockMutex(is->pictq_mutex);
1327
1328     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1329            !is->videoq.abort_request) {
1330         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1331     }
1332     SDL_UnlockMutex(is->pictq_mutex);
1333
1334     if (is->videoq.abort_request)
1335         return -1;
1336
1337     vp = &is->pictq[is->pictq_windex];
1338
1339     vp->duration = frame_delay;
1340
1341     /* alloc or resize hardware picture buffer */
1342     if (!vp->bmp || vp->reallocate ||
1343 #if CONFIG_AVFILTER
1344         vp->width  != is->out_video_filter->inputs[0]->w ||
1345         vp->height != is->out_video_filter->inputs[0]->h) {
1346 #else
1347         vp->width != is->video_st->codec->width ||
1348         vp->height != is->video_st->codec->height) {
1349 #endif
1350         SDL_Event event;
1351
1352         vp->allocated = 0;
1353         vp->reallocate = 0;
1354
1355         /* the allocation must be done in the main thread to avoid
1356            locking problems */
1357         event.type = FF_ALLOC_EVENT;
1358         event.user.data1 = is;
1359         SDL_PushEvent(&event);
1360
1361         /* wait until the picture is allocated */
1362         SDL_LockMutex(is->pictq_mutex);
1363         while (!vp->allocated && !is->videoq.abort_request) {
1364             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1365         }
1366         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1367         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1368             while (!vp->allocated) {
1369                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370             }
1371         }
1372         SDL_UnlockMutex(is->pictq_mutex);
1373
1374         if (is->videoq.abort_request)
1375             return -1;
1376     }
1377
1378     /* if the frame is not skipped, then display it */
1379     if (vp->bmp) {
1380         AVPicture pict;
1381 #if CONFIG_AVFILTER
1382         if(vp->picref)
1383             avfilter_unref_buffer(vp->picref);
1384         vp->picref = src_frame->opaque;
1385 #endif
1386
1387         /* get a pointer on the bitmap */
1388         SDL_LockYUVOverlay (vp->bmp);
1389
1390         memset(&pict,0,sizeof(AVPicture));
1391         pict.data[0] = vp->bmp->pixels[0];
1392         pict.data[1] = vp->bmp->pixels[2];
1393         pict.data[2] = vp->bmp->pixels[1];
1394
1395         pict.linesize[0] = vp->bmp->pitches[0];
1396         pict.linesize[1] = vp->bmp->pitches[2];
1397         pict.linesize[2] = vp->bmp->pitches[1];
1398
1399 #if CONFIG_AVFILTER
1400         //FIXME use direct rendering
1401         av_picture_copy(&pict, (AVPicture *)src_frame,
1402                         vp->pix_fmt, vp->width, vp->height);
1403 #else
1404         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1405         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1406             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1407             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1408         if (is->img_convert_ctx == NULL) {
1409             fprintf(stderr, "Cannot initialize the conversion context\n");
1410             exit(1);
1411         }
1412         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1413                   0, vp->height, pict.data, pict.linesize);
1414 #endif
1415         /* update the bitmap content */
1416         SDL_UnlockYUVOverlay(vp->bmp);
1417
1418         vp->pts = pts;
1419         vp->pos = pos;
1420         vp->skip = 0;
1421
1422         /* now we can update the picture count */
1423         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1424             is->pictq_windex = 0;
1425         SDL_LockMutex(is->pictq_mutex);
1426         is->pictq_size++;
1427         SDL_UnlockMutex(is->pictq_mutex);
1428     }
1429     return 0;
1430 }
1431
1432 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1433 {
1434     int got_picture, i;
1435
1436     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1437         return -1;
1438
1439     if (pkt->data == flush_pkt.data) {
1440         avcodec_flush_buffers(is->video_st->codec);
1441
1442         SDL_LockMutex(is->pictq_mutex);
1443         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1444         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1445             is->pictq[i].skip = 1;
1446         }
1447         while (is->pictq_size && !is->videoq.abort_request) {
1448             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1449         }
1450         is->video_current_pos = -1;
1451         SDL_UnlockMutex(is->pictq_mutex);
1452
1453         is->frame_last_pts = AV_NOPTS_VALUE;
1454         is->frame_last_duration = 0;
1455         is->frame_timer = (double)av_gettime() / 1000000.0;
1456         return 0;
1457     }
1458
1459     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1460
1461     if (got_picture) {
1462         if (decoder_reorder_pts == -1) {
1463             *pts = frame->best_effort_timestamp;
1464         } else if (decoder_reorder_pts) {
1465             *pts = frame->pkt_pts;
1466         } else {
1467             *pts = frame->pkt_dts;
1468         }
1469
1470         if (*pts == AV_NOPTS_VALUE) {
1471             *pts = 0;
1472         }
1473
1474         return 1;
1475
1476     }
1477     return 0;
1478 }
1479
1480 #if CONFIG_AVFILTER
1481 typedef struct {
1482     VideoState *is;
1483     AVFrame *frame;
1484     int use_dr1;
1485 } FilterPriv;
1486
1487 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1488 {
1489     AVFilterContext *ctx = codec->opaque;
1490     AVFilterBufferRef  *ref;
1491     int perms = AV_PERM_WRITE;
1492     int i, w, h, stride[4];
1493     unsigned edge;
1494     int pixel_size;
1495
1496     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1497
1498     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1499         perms |= AV_PERM_NEG_LINESIZES;
1500
1501     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1502         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1503         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1504         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1505     }
1506     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1507
1508     w = codec->width;
1509     h = codec->height;
1510
1511     if(av_image_check_size(w, h, 0, codec))
1512         return -1;
1513
1514     avcodec_align_dimensions2(codec, &w, &h, stride);
1515     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1516     w += edge << 1;
1517     h += edge << 1;
1518
1519     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1520         return -1;
1521
1522     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1523     ref->video->w = codec->width;
1524     ref->video->h = codec->height;
1525     for(i = 0; i < 4; i ++) {
1526         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1527         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1528
1529         if (ref->data[i]) {
1530             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1531         }
1532         pic->data[i]     = ref->data[i];
1533         pic->linesize[i] = ref->linesize[i];
1534     }
1535     pic->opaque = ref;
1536     pic->age    = INT_MAX;
1537     pic->type   = FF_BUFFER_TYPE_USER;
1538     pic->reordered_opaque = codec->reordered_opaque;
1539     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1540     else           pic->pkt_pts = AV_NOPTS_VALUE;
1541     return 0;
1542 }
1543
1544 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1545 {
1546     memset(pic->data, 0, sizeof(pic->data));
1547     avfilter_unref_buffer(pic->opaque);
1548 }
1549
1550 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1551 {
1552     AVFilterBufferRef *ref = pic->opaque;
1553
1554     if (pic->data[0] == NULL) {
1555         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1556         return codec->get_buffer(codec, pic);
1557     }
1558
1559     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1560         (codec->pix_fmt != ref->format)) {
1561         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1562         return -1;
1563     }
1564
1565     pic->reordered_opaque = codec->reordered_opaque;
1566     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1567     else           pic->pkt_pts = AV_NOPTS_VALUE;
1568     return 0;
1569 }
1570
1571 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1572 {
1573     FilterPriv *priv = ctx->priv;
1574     AVCodecContext *codec;
1575     if(!opaque) return -1;
1576
1577     priv->is = opaque;
1578     codec    = priv->is->video_st->codec;
1579     codec->opaque = ctx;
1580     if((codec->codec->capabilities & CODEC_CAP_DR1)
1581     ) {
1582         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1583         priv->use_dr1 = 1;
1584         codec->get_buffer     = input_get_buffer;
1585         codec->release_buffer = input_release_buffer;
1586         codec->reget_buffer   = input_reget_buffer;
1587         codec->thread_safe_callbacks = 1;
1588     }
1589
1590     priv->frame = avcodec_alloc_frame();
1591
1592     return 0;
1593 }
1594
1595 static void input_uninit(AVFilterContext *ctx)
1596 {
1597     FilterPriv *priv = ctx->priv;
1598     av_free(priv->frame);
1599 }
1600
1601 static int input_request_frame(AVFilterLink *link)
1602 {
1603     FilterPriv *priv = link->src->priv;
1604     AVFilterBufferRef *picref;
1605     int64_t pts = 0;
1606     AVPacket pkt;
1607     int ret;
1608
1609     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1610         av_free_packet(&pkt);
1611     if (ret < 0)
1612         return -1;
1613
1614     if(priv->use_dr1 && priv->frame->opaque) {
1615         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1616     } else {
1617         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1618         av_image_copy(picref->data, picref->linesize,
1619                       priv->frame->data, priv->frame->linesize,
1620                       picref->format, link->w, link->h);
1621     }
1622     av_free_packet(&pkt);
1623
1624     avfilter_copy_frame_props(picref, priv->frame);
1625     picref->pts = pts;
1626
1627     avfilter_start_frame(link, picref);
1628     avfilter_draw_slice(link, 0, link->h, 1);
1629     avfilter_end_frame(link);
1630
1631     return 0;
1632 }
1633
1634 static int input_query_formats(AVFilterContext *ctx)
1635 {
1636     FilterPriv *priv = ctx->priv;
1637     enum PixelFormat pix_fmts[] = {
1638         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1639     };
1640
1641     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1642     return 0;
1643 }
1644
1645 static int input_config_props(AVFilterLink *link)
1646 {
1647     FilterPriv *priv  = link->src->priv;
1648     AVStream *s = priv->is->video_st;
1649
1650     link->w = s->codec->width;
1651     link->h = s->codec->height;
1652     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1653         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1654     link->time_base = s->time_base;
1655
1656     return 0;
1657 }
1658
1659 static AVFilter input_filter =
1660 {
1661     .name      = "ffplay_input",
1662
1663     .priv_size = sizeof(FilterPriv),
1664
1665     .init      = input_init,
1666     .uninit    = input_uninit,
1667
1668     .query_formats = input_query_formats,
1669
1670     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1671     .outputs   = (AVFilterPad[]) {{ .name = "default",
1672                                     .type = AVMEDIA_TYPE_VIDEO,
1673                                     .request_frame = input_request_frame,
1674                                     .config_props  = input_config_props, },
1675                                   { .name = NULL }},
1676 };
1677
1678 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1679 {
1680     char sws_flags_str[128];
1681     int ret;
1682     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1683     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1684     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1685     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1686     graph->scale_sws_opts = av_strdup(sws_flags_str);
1687
1688     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1689                                             NULL, is, graph)) < 0)
1690         return ret;
1691 #if FF_API_OLD_VSINK_API
1692     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1693                                        NULL, pix_fmts, graph);
1694 #else
1695     buffersink_params->pixel_fmts = pix_fmts;
1696     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1697                                        NULL, buffersink_params, graph);
1698 #endif
1699     av_freep(&buffersink_params);
1700     if (ret < 0)
1701         return ret;
1702
1703     if(vfilters) {
1704         AVFilterInOut *outputs = avfilter_inout_alloc();
1705         AVFilterInOut *inputs  = avfilter_inout_alloc();
1706
1707         outputs->name    = av_strdup("in");
1708         outputs->filter_ctx = filt_src;
1709         outputs->pad_idx = 0;
1710         outputs->next    = NULL;
1711
1712         inputs->name    = av_strdup("out");
1713         inputs->filter_ctx = filt_out;
1714         inputs->pad_idx = 0;
1715         inputs->next    = NULL;
1716
1717         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1718             return ret;
1719     } else {
1720         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1721             return ret;
1722     }
1723
1724     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1725         return ret;
1726
1727     is->out_video_filter = filt_out;
1728
1729     return ret;
1730 }
1731
1732 #endif  /* CONFIG_AVFILTER */
1733
1734 static int video_thread(void *arg)
1735 {
1736     VideoState *is = arg;
1737     AVFrame *frame= avcodec_alloc_frame();
1738     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1739     double pts;
1740     int ret;
1741
1742 #if CONFIG_AVFILTER
1743     AVFilterGraph *graph = avfilter_graph_alloc();
1744     AVFilterContext *filt_out = NULL;
1745     int last_w = is->video_st->codec->width;
1746     int last_h = is->video_st->codec->height;
1747
1748     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1749         goto the_end;
1750     filt_out = is->out_video_filter;
1751 #endif
1752
1753     for(;;) {
1754 #if !CONFIG_AVFILTER
1755         AVPacket pkt;
1756 #else
1757         AVFilterBufferRef *picref;
1758         AVRational tb = filt_out->inputs[0]->time_base;
1759 #endif
1760         while (is->paused && !is->videoq.abort_request)
1761             SDL_Delay(10);
1762 #if CONFIG_AVFILTER
1763         if (   last_w != is->video_st->codec->width
1764             || last_h != is->video_st->codec->height) {
1765             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1766                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1767             avfilter_graph_free(&graph);
1768             graph = avfilter_graph_alloc();
1769             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1770                 goto the_end;
1771             filt_out = is->out_video_filter;
1772             last_w = is->video_st->codec->width;
1773             last_h = is->video_st->codec->height;
1774         }
1775         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1776         if (picref) {
1777             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1778             pts_int = picref->pts;
1779             pos     = picref->pos;
1780             frame->opaque = picref;
1781         }
1782
1783         if (av_cmp_q(tb, is->video_st->time_base)) {
1784             av_unused int64_t pts1 = pts_int;
1785             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1786             av_dlog(NULL, "video_thread(): "
1787                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1788                     tb.num, tb.den, pts1,
1789                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1790         }
1791 #else
1792         ret = get_video_frame(is, frame, &pts_int, &pkt);
1793         pos = pkt.pos;
1794         av_free_packet(&pkt);
1795 #endif
1796
1797         if (ret < 0) goto the_end;
1798
1799 #if CONFIG_AVFILTER
1800         if (!picref)
1801             continue;
1802 #endif
1803
1804         pts = pts_int*av_q2d(is->video_st->time_base);
1805
1806         ret = queue_picture(is, frame, pts, pos);
1807
1808         if (ret < 0)
1809             goto the_end;
1810
1811         if (is->step)
1812             stream_toggle_pause(is);
1813     }
1814  the_end:
1815 #if CONFIG_AVFILTER
1816     avfilter_graph_free(&graph);
1817 #endif
1818     av_free(frame);
1819     return 0;
1820 }
1821
1822 static int subtitle_thread(void *arg)
1823 {
1824     VideoState *is = arg;
1825     SubPicture *sp;
1826     AVPacket pkt1, *pkt = &pkt1;
1827     int got_subtitle;
1828     double pts;
1829     int i, j;
1830     int r, g, b, y, u, v, a;
1831
1832     for(;;) {
1833         while (is->paused && !is->subtitleq.abort_request) {
1834             SDL_Delay(10);
1835         }
1836         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1837             break;
1838
1839         if(pkt->data == flush_pkt.data){
1840             avcodec_flush_buffers(is->subtitle_st->codec);
1841             continue;
1842         }
1843         SDL_LockMutex(is->subpq_mutex);
1844         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1845                !is->subtitleq.abort_request) {
1846             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1847         }
1848         SDL_UnlockMutex(is->subpq_mutex);
1849
1850         if (is->subtitleq.abort_request)
1851             return 0;
1852
1853         sp = &is->subpq[is->subpq_windex];
1854
1855        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1856            this packet, if any */
1857         pts = 0;
1858         if (pkt->pts != AV_NOPTS_VALUE)
1859             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1860
1861         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1862                                  &got_subtitle, pkt);
1863
1864         if (got_subtitle && sp->sub.format == 0) {
1865             sp->pts = pts;
1866
1867             for (i = 0; i < sp->sub.num_rects; i++)
1868             {
1869                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1870                 {
1871                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1872                     y = RGB_TO_Y_CCIR(r, g, b);
1873                     u = RGB_TO_U_CCIR(r, g, b, 0);
1874                     v = RGB_TO_V_CCIR(r, g, b, 0);
1875                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1876                 }
1877             }
1878
1879             /* now we can update the picture count */
1880             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1881                 is->subpq_windex = 0;
1882             SDL_LockMutex(is->subpq_mutex);
1883             is->subpq_size++;
1884             SDL_UnlockMutex(is->subpq_mutex);
1885         }
1886         av_free_packet(pkt);
1887     }
1888     return 0;
1889 }
1890
1891 /* copy samples for viewing in editor window */
1892 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1893 {
1894     int size, len;
1895
1896     size = samples_size / sizeof(short);
1897     while (size > 0) {
1898         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1899         if (len > size)
1900             len = size;
1901         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1902         samples += len;
1903         is->sample_array_index += len;
1904         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1905             is->sample_array_index = 0;
1906         size -= len;
1907     }
1908 }
1909
1910 /* return the new audio buffer size (samples can be added or deleted
1911    to get better sync if video or external master clock) */
1912 static int synchronize_audio(VideoState *is, short *samples,
1913                              int samples_size1, double pts)
1914 {
1915     int n, samples_size;
1916     double ref_clock;
1917
1918     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1919     samples_size = samples_size1;
1920
1921     /* if not master, then we try to remove or add samples to correct the clock */
1922     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1923          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1924         double diff, avg_diff;
1925         int wanted_size, min_size, max_size, nb_samples;
1926
1927         ref_clock = get_master_clock(is);
1928         diff = get_audio_clock(is) - ref_clock;
1929
1930         if (diff < AV_NOSYNC_THRESHOLD) {
1931             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1932             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1933                 /* not enough measures to have a correct estimate */
1934                 is->audio_diff_avg_count++;
1935             } else {
1936                 /* estimate the A-V difference */
1937                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1938
1939                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1940                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1941                     nb_samples = samples_size / n;
1942
1943                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1944                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1945                     if (wanted_size < min_size)
1946                         wanted_size = min_size;
1947                     else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
1948                         wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
1949
1950                     /* add or remove samples to correction the synchro */
1951                     if (wanted_size < samples_size) {
1952                         /* remove samples */
1953                         samples_size = wanted_size;
1954                     } else if (wanted_size > samples_size) {
1955                         uint8_t *samples_end, *q;
1956                         int nb;
1957
1958                         /* add samples */
1959                         nb = (samples_size - wanted_size);
1960                         samples_end = (uint8_t *)samples + samples_size - n;
1961                         q = samples_end + n;
1962                         while (nb > 0) {
1963                             memcpy(q, samples_end, n);
1964                             q += n;
1965                             nb -= n;
1966                         }
1967                         samples_size = wanted_size;
1968                     }
1969                 }
1970                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1971                         diff, avg_diff, samples_size - samples_size1,
1972                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1973             }
1974         } else {
1975             /* too big difference : may be initial PTS errors, so
1976                reset A-V filter */
1977             is->audio_diff_avg_count = 0;
1978             is->audio_diff_cum = 0;
1979         }
1980     }
1981
1982     return samples_size;
1983 }
1984
1985 /* decode one audio frame and returns its uncompressed size */
1986 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1987 {
1988     AVPacket *pkt_temp = &is->audio_pkt_temp;
1989     AVPacket *pkt = &is->audio_pkt;
1990     AVCodecContext *dec= is->audio_st->codec;
1991     int len1, len2, data_size, resampled_data_size;
1992     int64_t dec_channel_layout;
1993     double pts;
1994     int new_packet = 0;
1995     int flush_complete = 0;
1996
1997     for(;;) {
1998         /* NOTE: the audio packet can contain several frames */
1999         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2000             if (flush_complete)
2001                 break;
2002             new_packet = 0;
2003             data_size = sizeof(is->audio_buf1);
2004             len1 = avcodec_decode_audio3(dec,
2005                                         (int16_t *)is->audio_buf1, &data_size,
2006                                         pkt_temp);
2007             if (len1 < 0) {
2008                 /* if error, we skip the frame */
2009                 pkt_temp->size = 0;
2010                 break;
2011             }
2012
2013             pkt_temp->data += len1;
2014             pkt_temp->size -= len1;
2015
2016             if (data_size <= 0) {
2017                 /* stop sending empty packets if the decoder is finished */
2018                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2019                     flush_complete = 1;
2020                 continue;
2021             }
2022
2023             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2024
2025             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2026                 if (is->swr_ctx)
2027                     swr_free(&is->swr_ctx);
2028                 is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2029                                                dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
2030                                                0, NULL);
2031                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2032                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2033                         dec->sample_rate,
2034                         av_get_sample_fmt_name(dec->sample_fmt),
2035                         dec->channels,
2036                         is->audio_tgt_freq,
2037                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2038                         is->audio_tgt_channels);
2039                     break;
2040                 }
2041                 is->audio_src_channel_layout = dec_channel_layout;
2042                 is->audio_src_channels = dec->channels;
2043                 is->audio_src_freq = dec->sample_rate;
2044                 is->audio_src_fmt = dec->sample_fmt;
2045             }
2046
2047             resampled_data_size = data_size;
2048             if (is->swr_ctx) {
2049                 const uint8_t *in[] = {is->audio_buf1};
2050                 uint8_t *out[] = {is->audio_buf2};
2051                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2052                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2053                 if (len2 < 0) {
2054                     fprintf(stderr, "audio_resample() failed\n");
2055                     break;
2056                 }
2057                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2058                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2059                     swr_init(is->swr_ctx);
2060                 }
2061                 is->audio_buf = is->audio_buf2;
2062                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2063             } else {
2064                 is->audio_buf= is->audio_buf1;
2065             }
2066
2067             /* if no pts, then compute it */
2068             pts = is->audio_clock;
2069             *pts_ptr = pts;
2070             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2071 #ifdef DEBUG
2072             {
2073                 static double last_clock;
2074                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2075                        is->audio_clock - last_clock,
2076                        is->audio_clock, pts);
2077                 last_clock = is->audio_clock;
2078             }
2079 #endif
2080             return resampled_data_size;
2081         }
2082
2083         /* free the current packet */
2084         if (pkt->data)
2085             av_free_packet(pkt);
2086
2087         if (is->paused || is->audioq.abort_request) {
2088             return -1;
2089         }
2090
2091         /* read next packet */
2092         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2093             return -1;
2094
2095         if (pkt->data == flush_pkt.data)
2096             avcodec_flush_buffers(dec);
2097
2098         pkt_temp->data = pkt->data;
2099         pkt_temp->size = pkt->size;
2100
2101         /* if update the audio clock with the pts */
2102         if (pkt->pts != AV_NOPTS_VALUE) {
2103             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2104         }
2105     }
2106 }
2107
2108 /* prepare a new audio buffer */
2109 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2110 {
2111     VideoState *is = opaque;
2112     int audio_size, len1;
2113     int bytes_per_sec;
2114     double pts;
2115
2116     audio_callback_time = av_gettime();
2117
2118     while (len > 0) {
2119         if (is->audio_buf_index >= is->audio_buf_size) {
2120            audio_size = audio_decode_frame(is, &pts);
2121            if (audio_size < 0) {
2122                 /* if error, just output silence */
2123                is->audio_buf = is->audio_buf1;
2124                is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2125                memset(is->audio_buf, 0, is->audio_buf_size);
2126            } else {
2127                if (is->show_mode != SHOW_MODE_VIDEO)
2128                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2129                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2130                                               pts);
2131                is->audio_buf_size = audio_size;
2132            }
2133            is->audio_buf_index = 0;
2134         }
2135         len1 = is->audio_buf_size - is->audio_buf_index;
2136         if (len1 > len)
2137             len1 = len;
2138         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2139         len -= len1;
2140         stream += len1;
2141         is->audio_buf_index += len1;
2142     }
2143     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2144     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2145     /* Let's assume the audio driver that is used by SDL has two periods. */
2146     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2147     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2148 }
2149
2150 /* open a given stream. Return 0 if OK */
2151 static int stream_component_open(VideoState *is, int stream_index)
2152 {
2153     AVFormatContext *ic = is->ic;
2154     AVCodecContext *avctx;
2155     AVCodec *codec;
2156     SDL_AudioSpec wanted_spec, spec;
2157     AVDictionary *opts;
2158     AVDictionaryEntry *t = NULL;
2159     int64_t wanted_channel_layout = 0;
2160
2161     if (stream_index < 0 || stream_index >= ic->nb_streams)
2162         return -1;
2163     avctx = ic->streams[stream_index]->codec;
2164
2165     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2166
2167     codec = avcodec_find_decoder(avctx->codec_id);
2168     switch(avctx->codec_type){
2169         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2170         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2171         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2172     }
2173     if (!codec)
2174         return -1;
2175
2176     avctx->workaround_bugs = workaround_bugs;
2177     avctx->lowres = lowres;
2178     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2179     avctx->idct_algo= idct;
2180     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2181     avctx->skip_frame= skip_frame;
2182     avctx->skip_idct= skip_idct;
2183     avctx->skip_loop_filter= skip_loop_filter;
2184     avctx->error_recognition= error_recognition;
2185     avctx->error_concealment= error_concealment;
2186
2187     if(codec->capabilities & CODEC_CAP_DR1)
2188         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2189
2190     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2191         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2192         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2193         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2194         wanted_spec.freq = avctx->sample_rate;
2195         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2196             fprintf(stderr, "Invalid sample rate or channel count!\n");
2197             return -1;
2198         }
2199     }
2200
2201     if (!codec ||
2202         avcodec_open2(avctx, codec, &opts) < 0)
2203         return -1;
2204     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2205         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2206         return AVERROR_OPTION_NOT_FOUND;
2207     }
2208
2209     /* prepare audio output */
2210     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2211         wanted_spec.format = AUDIO_S16SYS;
2212         wanted_spec.silence = 0;
2213         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2214         wanted_spec.callback = sdl_audio_callback;
2215         wanted_spec.userdata = is;
2216         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2217             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2218             return -1;
2219         }
2220         is->audio_hw_buf_size = spec.size;
2221         if (spec.format != AUDIO_S16SYS) {
2222             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2223             return -1;
2224         }
2225         if (spec.channels != wanted_spec.channels) {
2226             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2227             if (!wanted_channel_layout) {
2228                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2229                 return -1;
2230             }
2231         }
2232         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2233         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2234         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2235         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2236     }
2237
2238     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2239     switch(avctx->codec_type) {
2240     case AVMEDIA_TYPE_AUDIO:
2241         is->audio_stream = stream_index;
2242         is->audio_st = ic->streams[stream_index];
2243         is->audio_buf_size = 0;
2244         is->audio_buf_index = 0;
2245
2246         /* init averaging filter */
2247         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2248         is->audio_diff_avg_count = 0;
2249         /* since we do not have a precise anough audio fifo fullness,
2250            we correct audio sync only if larger than this threshold */
2251         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2252
2253         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2254         packet_queue_init(&is->audioq);
2255         SDL_PauseAudio(0);
2256         break;
2257     case AVMEDIA_TYPE_VIDEO:
2258         is->video_stream = stream_index;
2259         is->video_st = ic->streams[stream_index];
2260
2261         packet_queue_init(&is->videoq);
2262         is->video_tid = SDL_CreateThread(video_thread, is);
2263         break;
2264     case AVMEDIA_TYPE_SUBTITLE:
2265         is->subtitle_stream = stream_index;
2266         is->subtitle_st = ic->streams[stream_index];
2267         packet_queue_init(&is->subtitleq);
2268
2269         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2270         break;
2271     default:
2272         break;
2273     }
2274     return 0;
2275 }
2276
2277 static void stream_component_close(VideoState *is, int stream_index)
2278 {
2279     AVFormatContext *ic = is->ic;
2280     AVCodecContext *avctx;
2281
2282     if (stream_index < 0 || stream_index >= ic->nb_streams)
2283         return;
2284     avctx = ic->streams[stream_index]->codec;
2285
2286     switch(avctx->codec_type) {
2287     case AVMEDIA_TYPE_AUDIO:
2288         packet_queue_abort(&is->audioq);
2289
2290         SDL_CloseAudio();
2291
2292         packet_queue_end(&is->audioq);
2293         if (is->swr_ctx)
2294             swr_free(&is->swr_ctx);
2295         av_free_packet(&is->audio_pkt);
2296
2297         if (is->rdft) {
2298             av_rdft_end(is->rdft);
2299             av_freep(&is->rdft_data);
2300         }
2301         break;
2302     case AVMEDIA_TYPE_VIDEO:
2303         packet_queue_abort(&is->videoq);
2304
2305         /* note: we also signal this mutex to make sure we deblock the
2306            video thread in all cases */
2307         SDL_LockMutex(is->pictq_mutex);
2308         SDL_CondSignal(is->pictq_cond);
2309         SDL_UnlockMutex(is->pictq_mutex);
2310
2311         SDL_WaitThread(is->video_tid, NULL);
2312
2313         packet_queue_end(&is->videoq);
2314         break;
2315     case AVMEDIA_TYPE_SUBTITLE:
2316         packet_queue_abort(&is->subtitleq);
2317
2318         /* note: we also signal this mutex to make sure we deblock the
2319            video thread in all cases */
2320         SDL_LockMutex(is->subpq_mutex);
2321         is->subtitle_stream_changed = 1;
2322
2323         SDL_CondSignal(is->subpq_cond);
2324         SDL_UnlockMutex(is->subpq_mutex);
2325
2326         SDL_WaitThread(is->subtitle_tid, NULL);
2327
2328         packet_queue_end(&is->subtitleq);
2329         break;
2330     default:
2331         break;
2332     }
2333
2334     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2335     avcodec_close(avctx);
2336     switch(avctx->codec_type) {
2337     case AVMEDIA_TYPE_AUDIO:
2338         is->audio_st = NULL;
2339         is->audio_stream = -1;
2340         break;
2341     case AVMEDIA_TYPE_VIDEO:
2342         is->video_st = NULL;
2343         is->video_stream = -1;
2344         break;
2345     case AVMEDIA_TYPE_SUBTITLE:
2346         is->subtitle_st = NULL;
2347         is->subtitle_stream = -1;
2348         break;
2349     default:
2350         break;
2351     }
2352 }
2353
2354 /* since we have only one decoding thread, we can use a global
2355    variable instead of a thread local variable */
2356 static VideoState *global_video_state;
2357
2358 static int decode_interrupt_cb(void)
2359 {
2360     return (global_video_state && global_video_state->abort_request);
2361 }
2362
2363 /* this thread gets the stream from the disk or the network */
2364 static int read_thread(void *arg)
2365 {
2366     VideoState *is = arg;
2367     AVFormatContext *ic = NULL;
2368     int err, i, ret;
2369     int st_index[AVMEDIA_TYPE_NB];
2370     AVPacket pkt1, *pkt = &pkt1;
2371     int eof=0;
2372     int pkt_in_play_range = 0;
2373     AVDictionaryEntry *t;
2374     AVDictionary **opts;
2375     int orig_nb_streams;
2376
2377     memset(st_index, -1, sizeof(st_index));
2378     is->video_stream = -1;
2379     is->audio_stream = -1;
2380     is->subtitle_stream = -1;
2381
2382     global_video_state = is;
2383     avio_set_interrupt_cb(decode_interrupt_cb);
2384
2385     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2386     if (err < 0) {
2387         print_error(is->filename, err);
2388         ret = -1;
2389         goto fail;
2390     }
2391     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2392         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2393         ret = AVERROR_OPTION_NOT_FOUND;
2394         goto fail;
2395     }
2396     is->ic = ic;
2397
2398     if(genpts)
2399         ic->flags |= AVFMT_FLAG_GENPTS;
2400
2401     av_dict_set(&codec_opts, "request_channels", "2", 0);
2402
2403     opts = setup_find_stream_info_opts(ic, codec_opts);
2404     orig_nb_streams = ic->nb_streams;
2405
2406     err = avformat_find_stream_info(ic, opts);
2407     if (err < 0) {
2408         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2409         ret = -1;
2410         goto fail;
2411     }
2412     for (i = 0; i < orig_nb_streams; i++)
2413         av_dict_free(&opts[i]);
2414     av_freep(&opts);
2415
2416     if(ic->pb)
2417         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2418
2419     if(seek_by_bytes<0)
2420         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2421
2422     /* if seeking requested, we execute it */
2423     if (start_time != AV_NOPTS_VALUE) {
2424         int64_t timestamp;
2425
2426         timestamp = start_time;
2427         /* add the stream start time */
2428         if (ic->start_time != AV_NOPTS_VALUE)
2429             timestamp += ic->start_time;
2430         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2431         if (ret < 0) {
2432             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2433                     is->filename, (double)timestamp / AV_TIME_BASE);
2434         }
2435     }
2436
2437     for (i = 0; i < ic->nb_streams; i++)
2438         ic->streams[i]->discard = AVDISCARD_ALL;
2439     if (!video_disable)
2440         st_index[AVMEDIA_TYPE_VIDEO] =
2441             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2442                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2443     if (!audio_disable)
2444         st_index[AVMEDIA_TYPE_AUDIO] =
2445             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2446                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2447                                 st_index[AVMEDIA_TYPE_VIDEO],
2448                                 NULL, 0);
2449     if (!video_disable)
2450         st_index[AVMEDIA_TYPE_SUBTITLE] =
2451             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2452                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2453                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2454                                  st_index[AVMEDIA_TYPE_AUDIO] :
2455                                  st_index[AVMEDIA_TYPE_VIDEO]),
2456                                 NULL, 0);
2457     if (show_status) {
2458         av_dump_format(ic, 0, is->filename, 0);
2459     }
2460
2461     is->show_mode = show_mode;
2462
2463     /* open the streams */
2464     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2465         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2466     }
2467
2468     ret=-1;
2469     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2470         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2471     }
2472     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2473     if (is->show_mode == SHOW_MODE_NONE)
2474         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2475
2476     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2477         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2478     }
2479
2480     if (is->video_stream < 0 && is->audio_stream < 0) {
2481         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2482         ret = -1;
2483         goto fail;
2484     }
2485
2486     for(;;) {
2487         if (is->abort_request)
2488             break;
2489         if (is->paused != is->last_paused) {
2490             is->last_paused = is->paused;
2491             if (is->paused)
2492                 is->read_pause_return= av_read_pause(ic);
2493             else
2494                 av_read_play(ic);
2495         }
2496 #if CONFIG_RTSP_DEMUXER
2497         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2498             /* wait 10 ms to avoid trying to get another packet */
2499             /* XXX: horrible */
2500             SDL_Delay(10);
2501             continue;
2502         }
2503 #endif
2504         if (is->seek_req) {
2505             int64_t seek_target= is->seek_pos;
2506             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2507             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2508 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2509 //      of the seek_pos/seek_rel variables
2510
2511             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2512             if (ret < 0) {
2513                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2514             }else{
2515                 if (is->audio_stream >= 0) {
2516                     packet_queue_flush(&is->audioq);
2517                     packet_queue_put(&is->audioq, &flush_pkt);
2518                 }
2519                 if (is->subtitle_stream >= 0) {
2520                     packet_queue_flush(&is->subtitleq);
2521                     packet_queue_put(&is->subtitleq, &flush_pkt);
2522                 }
2523                 if (is->video_stream >= 0) {
2524                     packet_queue_flush(&is->videoq);
2525                     packet_queue_put(&is->videoq, &flush_pkt);
2526                 }
2527             }
2528             is->seek_req = 0;
2529             eof= 0;
2530         }
2531
2532         /* if the queue are full, no need to read more */
2533         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2534             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2535                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2536                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2537             /* wait 10 ms */
2538             SDL_Delay(10);
2539             continue;
2540         }
2541         if(eof) {
2542             if(is->video_stream >= 0){
2543                 av_init_packet(pkt);
2544                 pkt->data=NULL;
2545                 pkt->size=0;
2546                 pkt->stream_index= is->video_stream;
2547                 packet_queue_put(&is->videoq, pkt);
2548             }
2549             if (is->audio_stream >= 0 &&
2550                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2551                 av_init_packet(pkt);
2552                 pkt->data = NULL;
2553                 pkt->size = 0;
2554                 pkt->stream_index = is->audio_stream;
2555                 packet_queue_put(&is->audioq, pkt);
2556             }
2557             SDL_Delay(10);
2558             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2559                 if(loop!=1 && (!loop || --loop)){
2560                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2561                 }else if(autoexit){
2562                     ret=AVERROR_EOF;
2563                     goto fail;
2564                 }
2565             }
2566             eof=0;
2567             continue;
2568         }
2569         ret = av_read_frame(ic, pkt);
2570         if (ret < 0) {
2571             if (ret == AVERROR_EOF || url_feof(ic->pb))
2572                 eof=1;
2573             if (ic->pb && ic->pb->error)
2574                 break;
2575             SDL_Delay(100); /* wait for user event */
2576             continue;
2577         }
2578         /* check if packet is in play range specified by user, then queue, otherwise discard */
2579         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2580                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2581                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2582                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2583                 <= ((double)duration/1000000);
2584         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->audioq, pkt);
2586         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2587             packet_queue_put(&is->videoq, pkt);
2588         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2589             packet_queue_put(&is->subtitleq, pkt);
2590         } else {
2591             av_free_packet(pkt);
2592         }
2593     }
2594     /* wait until the end */
2595     while (!is->abort_request) {
2596         SDL_Delay(100);
2597     }
2598
2599     ret = 0;
2600  fail:
2601     /* disable interrupting */
2602     global_video_state = NULL;
2603
2604     /* close each stream */
2605     if (is->audio_stream >= 0)
2606         stream_component_close(is, is->audio_stream);
2607     if (is->video_stream >= 0)
2608         stream_component_close(is, is->video_stream);
2609     if (is->subtitle_stream >= 0)
2610         stream_component_close(is, is->subtitle_stream);
2611     if (is->ic) {
2612         av_close_input_file(is->ic);
2613         is->ic = NULL; /* safety */
2614     }
2615     avio_set_interrupt_cb(NULL);
2616
2617     if (ret != 0) {
2618         SDL_Event event;
2619
2620         event.type = FF_QUIT_EVENT;
2621         event.user.data1 = is;
2622         SDL_PushEvent(&event);
2623     }
2624     return 0;
2625 }
2626
2627 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2628 {
2629     VideoState *is;
2630
2631     is = av_mallocz(sizeof(VideoState));
2632     if (!is)
2633         return NULL;
2634     av_strlcpy(is->filename, filename, sizeof(is->filename));
2635     is->iformat = iformat;
2636     is->ytop = 0;
2637     is->xleft = 0;
2638
2639     /* start video display */
2640     is->pictq_mutex = SDL_CreateMutex();
2641     is->pictq_cond = SDL_CreateCond();
2642
2643     is->subpq_mutex = SDL_CreateMutex();
2644     is->subpq_cond = SDL_CreateCond();
2645
2646     is->av_sync_type = av_sync_type;
2647     is->read_tid = SDL_CreateThread(read_thread, is);
2648     if (!is->read_tid) {
2649         av_free(is);
2650         return NULL;
2651     }
2652     return is;
2653 }
2654
2655 static void stream_cycle_channel(VideoState *is, int codec_type)
2656 {
2657     AVFormatContext *ic = is->ic;
2658     int start_index, stream_index;
2659     AVStream *st;
2660
2661     if (codec_type == AVMEDIA_TYPE_VIDEO)
2662         start_index = is->video_stream;
2663     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2664         start_index = is->audio_stream;
2665     else
2666         start_index = is->subtitle_stream;
2667     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2668         return;
2669     stream_index = start_index;
2670     for(;;) {
2671         if (++stream_index >= is->ic->nb_streams)
2672         {
2673             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2674             {
2675                 stream_index = -1;
2676                 goto the_end;
2677             } else
2678                 stream_index = 0;
2679         }
2680         if (stream_index == start_index)
2681             return;
2682         st = ic->streams[stream_index];
2683         if (st->codec->codec_type == codec_type) {
2684             /* check that parameters are OK */
2685             switch(codec_type) {
2686             case AVMEDIA_TYPE_AUDIO:
2687                 if (st->codec->sample_rate != 0 &&
2688                     st->codec->channels != 0)
2689                     goto the_end;
2690                 break;
2691             case AVMEDIA_TYPE_VIDEO:
2692             case AVMEDIA_TYPE_SUBTITLE:
2693                 goto the_end;
2694             default:
2695                 break;
2696             }
2697         }
2698     }
2699  the_end:
2700     stream_component_close(is, start_index);
2701     stream_component_open(is, stream_index);
2702 }
2703
2704
2705 static void toggle_full_screen(VideoState *is)
2706 {
2707     is_full_screen = !is_full_screen;
2708 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2709     /* OSX needs to reallocate the SDL overlays */
2710     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2711         is->pictq[i].reallocate = 1;
2712     }
2713 #endif
2714     video_open(is);
2715 }
2716
2717 static void toggle_pause(VideoState *is)
2718 {
2719     stream_toggle_pause(is);
2720     is->step = 0;
2721 }
2722
2723 static void step_to_next_frame(VideoState *is)
2724 {
2725     /* if the stream is paused unpause it, then step */
2726     if (is->paused)
2727         stream_toggle_pause(is);
2728     is->step = 1;
2729 }
2730
2731 static void toggle_audio_display(VideoState *is)
2732 {
2733     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2734     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2735     fill_rectangle(screen,
2736                 is->xleft, is->ytop, is->width, is->height,
2737                 bgcolor);
2738     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2739 }
2740
2741 /* handle an event sent by the GUI */
2742 static void event_loop(VideoState *cur_stream)
2743 {
2744     SDL_Event event;
2745     double incr, pos, frac;
2746
2747     for(;;) {
2748         double x;
2749         SDL_WaitEvent(&event);
2750         switch(event.type) {
2751         case SDL_KEYDOWN:
2752             if (exit_on_keydown) {
2753                 do_exit(cur_stream);
2754                 break;
2755             }
2756             switch(event.key.keysym.sym) {
2757             case SDLK_ESCAPE:
2758             case SDLK_q:
2759                 do_exit(cur_stream);
2760                 break;
2761             case SDLK_f:
2762                 toggle_full_screen(cur_stream);
2763                 break;
2764             case SDLK_p:
2765             case SDLK_SPACE:
2766                 toggle_pause(cur_stream);
2767                 break;
2768             case SDLK_s: //S: Step to next frame
2769                 step_to_next_frame(cur_stream);
2770                 break;
2771             case SDLK_a:
2772                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2773                 break;
2774             case SDLK_v:
2775                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2776                 break;
2777             case SDLK_t:
2778                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2779                 break;
2780             case SDLK_w:
2781                 toggle_audio_display(cur_stream);
2782                 break;
2783             case SDLK_LEFT:
2784                 incr = -10.0;
2785                 goto do_seek;
2786             case SDLK_RIGHT:
2787                 incr = 10.0;
2788                 goto do_seek;
2789             case SDLK_UP:
2790                 incr = 60.0;
2791                 goto do_seek;
2792             case SDLK_DOWN:
2793                 incr = -60.0;
2794             do_seek:
2795                 if (seek_by_bytes) {
2796                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2797                         pos= cur_stream->video_current_pos;
2798                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2799                         pos= cur_stream->audio_pkt.pos;
2800                     }else
2801                         pos = avio_tell(cur_stream->ic->pb);
2802                     if (cur_stream->ic->bit_rate)
2803                         incr *= cur_stream->ic->bit_rate / 8.0;
2804                     else
2805                         incr *= 180000.0;
2806                     pos += incr;
2807                     stream_seek(cur_stream, pos, incr, 1);
2808                 } else {
2809                     pos = get_master_clock(cur_stream);
2810                     pos += incr;
2811                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2812                 }
2813                 break;
2814             default:
2815                 break;
2816             }
2817             break;
2818         case SDL_MOUSEBUTTONDOWN:
2819             if (exit_on_mousedown) {
2820                 do_exit(cur_stream);
2821                 break;
2822             }
2823         case SDL_MOUSEMOTION:
2824             if(event.type ==SDL_MOUSEBUTTONDOWN){
2825                 x= event.button.x;
2826             }else{
2827                 if(event.motion.state != SDL_PRESSED)
2828                     break;
2829                 x= event.motion.x;
2830             }
2831             if(seek_by_bytes || cur_stream->ic->duration<=0){
2832                 uint64_t size=  avio_size(cur_stream->ic->pb);
2833                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2834             }else{
2835                 int64_t ts;
2836                 int ns, hh, mm, ss;
2837                 int tns, thh, tmm, tss;
2838                 tns = cur_stream->ic->duration/1000000LL;
2839                 thh = tns/3600;
2840                 tmm = (tns%3600)/60;
2841                 tss = (tns%60);
2842                 frac = x/cur_stream->width;
2843                 ns = frac*tns;
2844                 hh = ns/3600;
2845                 mm = (ns%3600)/60;
2846                 ss = (ns%60);
2847                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2848                         hh, mm, ss, thh, tmm, tss);
2849                 ts = frac*cur_stream->ic->duration;
2850                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2851                     ts += cur_stream->ic->start_time;
2852                 stream_seek(cur_stream, ts, 0, 0);
2853             }
2854             break;
2855         case SDL_VIDEORESIZE:
2856             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2857                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2858             screen_width = cur_stream->width = event.resize.w;
2859             screen_height= cur_stream->height= event.resize.h;
2860             break;
2861         case SDL_QUIT:
2862         case FF_QUIT_EVENT:
2863             do_exit(cur_stream);
2864             break;
2865         case FF_ALLOC_EVENT:
2866             video_open(event.user.data1);
2867             alloc_picture(event.user.data1);
2868             break;
2869         case FF_REFRESH_EVENT:
2870             video_refresh(event.user.data1);
2871             cur_stream->refresh=0;
2872             break;
2873         default:
2874             break;
2875         }
2876     }
2877 }
2878
2879 static int opt_frame_size(const char *opt, const char *arg)
2880 {
2881     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2882     return opt_default("video_size", arg);
2883 }
2884
2885 static int opt_width(const char *opt, const char *arg)
2886 {
2887     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2888     return 0;
2889 }
2890
2891 static int opt_height(const char *opt, const char *arg)
2892 {
2893     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2894     return 0;
2895 }
2896
2897 static int opt_format(const char *opt, const char *arg)
2898 {
2899     file_iformat = av_find_input_format(arg);
2900     if (!file_iformat) {
2901         fprintf(stderr, "Unknown input format: %s\n", arg);
2902         return AVERROR(EINVAL);
2903     }
2904     return 0;
2905 }
2906
2907 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2908 {
2909     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2910     return opt_default("pixel_format", arg);
2911 }
2912
2913 static int opt_sync(const char *opt, const char *arg)
2914 {
2915     if (!strcmp(arg, "audio"))
2916         av_sync_type = AV_SYNC_AUDIO_MASTER;
2917     else if (!strcmp(arg, "video"))
2918         av_sync_type = AV_SYNC_VIDEO_MASTER;
2919     else if (!strcmp(arg, "ext"))
2920         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2921     else {
2922         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2923         exit(1);
2924     }
2925     return 0;
2926 }
2927
2928 static int opt_seek(const char *opt, const char *arg)
2929 {
2930     start_time = parse_time_or_die(opt, arg, 1);
2931     return 0;
2932 }
2933
2934 static int opt_duration(const char *opt, const char *arg)
2935 {
2936     duration = parse_time_or_die(opt, arg, 1);
2937     return 0;
2938 }
2939
2940 static int opt_show_mode(const char *opt, const char *arg)
2941 {
2942     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2943                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2944                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2945                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2946     return 0;
2947 }
2948
2949 static void opt_input_file(void *optctx, const char *filename)
2950 {
2951     if (input_filename) {
2952         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2953                 filename, input_filename);
2954         exit_program(1);
2955     }
2956     if (!strcmp(filename, "-"))
2957         filename = "pipe:";
2958     input_filename = filename;
2959 }
2960
2961 static int opt_codec(void *o, const char *opt, const char *arg)
2962 {
2963     switch(opt[strlen(opt)-1]){
2964     case 'a' :    audio_codec_name = arg; break;
2965     case 's' : subtitle_codec_name = arg; break;
2966     case 'v' :    video_codec_name = arg; break;
2967     }
2968     return 0;
2969 }
2970
2971 static int dummy;
2972
2973 static const OptionDef options[] = {
2974 #include "cmdutils_common_opts.h"
2975     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2976     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2977     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2978     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2979     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2980     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2981     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2982     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2983     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2984     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2985     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2986     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2987     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2988     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2989     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2990     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2991     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2992     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2993     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2994     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2995     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2996     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2997     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2998     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2999     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3000     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3001     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3002     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3003     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3004     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3005     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3006     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3007     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3008     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3009 #if CONFIG_AVFILTER
3010     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3011 #endif
3012     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3013     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3014     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3015     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3016     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3017     { NULL, },
3018 };
3019
3020 static void show_usage(void)
3021 {
3022     printf("Simple media player\n");
3023     printf("usage: %s [options] input_file\n", program_name);
3024     printf("\n");
3025 }
3026
3027 static int opt_help(const char *opt, const char *arg)
3028 {
3029     av_log_set_callback(log_callback_help);
3030     show_usage();
3031     show_help_options(options, "Main options:\n",
3032                       OPT_EXPERT, 0);
3033     show_help_options(options, "\nAdvanced options:\n",
3034                       OPT_EXPERT, OPT_EXPERT);
3035     printf("\n");
3036     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3037     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3038 #if !CONFIG_AVFILTER
3039     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3040 #endif
3041     printf("\nWhile playing:\n"
3042            "q, ESC              quit\n"
3043            "f                   toggle full screen\n"
3044            "p, SPC              pause\n"
3045            "a                   cycle audio channel\n"
3046            "v                   cycle video channel\n"
3047            "t                   cycle subtitle channel\n"
3048            "w                   show audio waves\n"
3049            "s                   activate frame-step mode\n"
3050            "left/right          seek backward/forward 10 seconds\n"
3051            "down/up             seek backward/forward 1 minute\n"
3052            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3053            );
3054     return 0;
3055 }
3056
3057 static int lockmgr(void **mtx, enum AVLockOp op)
3058 {
3059    switch(op) {
3060       case AV_LOCK_CREATE:
3061           *mtx = SDL_CreateMutex();
3062           if(!*mtx)
3063               return 1;
3064           return 0;
3065       case AV_LOCK_OBTAIN:
3066           return !!SDL_LockMutex(*mtx);
3067       case AV_LOCK_RELEASE:
3068           return !!SDL_UnlockMutex(*mtx);
3069       case AV_LOCK_DESTROY:
3070           SDL_DestroyMutex(*mtx);
3071           return 0;
3072    }
3073    return 1;
3074 }
3075
3076 /* Called from the main */
3077 int main(int argc, char **argv)
3078 {
3079     int flags;
3080     VideoState *is;
3081
3082     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3083     parse_loglevel(argc, argv, options);
3084
3085     /* register all codecs, demux and protocols */
3086     avcodec_register_all();
3087 #if CONFIG_AVDEVICE
3088     avdevice_register_all();
3089 #endif
3090 #if CONFIG_AVFILTER
3091     avfilter_register_all();
3092 #endif
3093     av_register_all();
3094
3095     init_opts();
3096
3097     show_banner();
3098
3099     parse_options(NULL, argc, argv, options, opt_input_file);
3100
3101     if (!input_filename) {
3102         show_usage();
3103         fprintf(stderr, "An input file must be specified\n");
3104         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3105         exit(1);
3106     }
3107
3108     if (display_disable) {
3109         video_disable = 1;
3110     }
3111     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3112     if (audio_disable)
3113         flags &= ~SDL_INIT_AUDIO;
3114 #if !defined(__MINGW32__) && !defined(__APPLE__)
3115     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3116 #endif
3117     if (SDL_Init (flags)) {
3118         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3119         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3120         exit(1);
3121     }
3122
3123     if (!display_disable) {
3124 #if HAVE_SDL_VIDEO_SIZE
3125         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3126         fs_screen_width = vi->current_w;
3127         fs_screen_height = vi->current_h;
3128 #endif
3129     }
3130
3131     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3132     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3133     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3134
3135     if (av_lockmgr_register(lockmgr)) {
3136         fprintf(stderr, "Could not initialize lock manager!\n");
3137         do_exit(NULL);
3138     }
3139
3140     av_init_packet(&flush_pkt);
3141     flush_pkt.data= "FLUSH";
3142
3143     is = stream_open(input_filename, file_iformat);
3144     if (!is) {
3145         fprintf(stderr, "Failed to initialize VideoState!\n");
3146         do_exit(NULL);
3147     }
3148
3149     event_loop(is);
3150
3151     /* never returns */
3152
3153     return 0;
3154 }