]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'luzero/segment'
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double duration;                             ///<expected duration of the frame
100     int64_t pos;                                 ///<byte position in file
101     int skip;
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation, resampling and format conversion */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     int audio_write_buf_size;
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat audio_src_fmt;
164     enum AVSampleFormat audio_tgt_fmt;
165     int audio_src_channels;
166     int audio_tgt_channels;
167     int64_t audio_src_channel_layout;
168     int64_t audio_tgt_channel_layout;
169     int audio_src_freq;
170     int audio_tgt_freq;
171     struct SwrContext *swr_ctx;
172     double audio_current_pts;
173     double audio_current_pts_drift;
174
175     enum ShowMode {
176         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
177     } show_mode;
178     int16_t sample_array[SAMPLE_ARRAY_SIZE];
179     int sample_array_index;
180     int last_i_start;
181     RDFTContext *rdft;
182     int rdft_bits;
183     FFTSample *rdft_data;
184     int xpos;
185
186     SDL_Thread *subtitle_tid;
187     int subtitle_stream;
188     int subtitle_stream_changed;
189     AVStream *subtitle_st;
190     PacketQueue subtitleq;
191     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
192     int subpq_size, subpq_rindex, subpq_windex;
193     SDL_mutex *subpq_mutex;
194     SDL_cond *subpq_cond;
195
196     double frame_timer;
197     double frame_last_pts;
198     double frame_last_duration;
199     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
200     int video_stream;
201     AVStream *video_st;
202     PacketQueue videoq;
203     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
204     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
205     int64_t video_current_pos;                   ///<current displayed file pos
206     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
207     int pictq_size, pictq_rindex, pictq_windex;
208     SDL_mutex *pictq_mutex;
209     SDL_cond *pictq_cond;
210 #if !CONFIG_AVFILTER
211     struct SwsContext *img_convert_ctx;
212 #endif
213
214     char filename[1024];
215     int width, height, xleft, ytop;
216     int step;
217
218 #if CONFIG_AVFILTER
219     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
220 #endif
221
222     int refresh;
223 } VideoState;
224
225 static int opt_help(const char *opt, const char *arg);
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int lowres = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
256 static int error_recognition = FF_ER_CAREFUL;
257 static int error_concealment = 3;
258 static int decoder_reorder_pts= -1;
259 static int autoexit;
260 static int exit_on_keydown;
261 static int exit_on_mousedown;
262 static int loop=1;
263 static int framedrop=-1;
264 static enum ShowMode show_mode = SHOW_MODE_NONE;
265 static const char *audio_codec_name;
266 static const char *subtitle_codec_name;
267 static const char *video_codec_name;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 void exit_program(int ret)
287 {
288     exit(ret);
289 }
290
291 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
292 {
293     AVPacketList *pkt1;
294
295     /* duplicate the packet */
296     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
297         return -1;
298
299     pkt1 = av_malloc(sizeof(AVPacketList));
300     if (!pkt1)
301         return -1;
302     pkt1->pkt = *pkt;
303     pkt1->next = NULL;
304
305
306     SDL_LockMutex(q->mutex);
307
308     if (!q->last_pkt)
309
310         q->first_pkt = pkt1;
311     else
312         q->last_pkt->next = pkt1;
313     q->last_pkt = pkt1;
314     q->nb_packets++;
315     q->size += pkt1->pkt.size + sizeof(*pkt1);
316     /* XXX: should duplicate packet data in DV case */
317     SDL_CondSignal(q->cond);
318
319     SDL_UnlockMutex(q->mutex);
320     return 0;
321 }
322
323 /* packet queue handling */
324 static void packet_queue_init(PacketQueue *q)
325 {
326     memset(q, 0, sizeof(PacketQueue));
327     q->mutex = SDL_CreateMutex();
328     q->cond = SDL_CreateCond();
329     packet_queue_put(q, &flush_pkt);
330 }
331
332 static void packet_queue_flush(PacketQueue *q)
333 {
334     AVPacketList *pkt, *pkt1;
335
336     SDL_LockMutex(q->mutex);
337     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
338         pkt1 = pkt->next;
339         av_free_packet(&pkt->pkt);
340         av_freep(&pkt);
341     }
342     q->last_pkt = NULL;
343     q->first_pkt = NULL;
344     q->nb_packets = 0;
345     q->size = 0;
346     SDL_UnlockMutex(q->mutex);
347 }
348
349 static void packet_queue_end(PacketQueue *q)
350 {
351     packet_queue_flush(q);
352     SDL_DestroyMutex(q->mutex);
353     SDL_DestroyCond(q->cond);
354 }
355
356 static void packet_queue_abort(PacketQueue *q)
357 {
358     SDL_LockMutex(q->mutex);
359
360     q->abort_request = 1;
361
362     SDL_CondSignal(q->cond);
363
364     SDL_UnlockMutex(q->mutex);
365 }
366
367 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
368 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
369 {
370     AVPacketList *pkt1;
371     int ret;
372
373     SDL_LockMutex(q->mutex);
374
375     for(;;) {
376         if (q->abort_request) {
377             ret = -1;
378             break;
379         }
380
381         pkt1 = q->first_pkt;
382         if (pkt1) {
383             q->first_pkt = pkt1->next;
384             if (!q->first_pkt)
385                 q->last_pkt = NULL;
386             q->nb_packets--;
387             q->size -= pkt1->pkt.size + sizeof(*pkt1);
388             *pkt = pkt1->pkt;
389             av_free(pkt1);
390             ret = 1;
391             break;
392         } else if (!block) {
393             ret = 0;
394             break;
395         } else {
396             SDL_CondWait(q->cond, q->mutex);
397         }
398     }
399     SDL_UnlockMutex(q->mutex);
400     return ret;
401 }
402
403 static inline void fill_rectangle(SDL_Surface *screen,
404                                   int x, int y, int w, int h, int color)
405 {
406     SDL_Rect rect;
407     rect.x = x;
408     rect.y = y;
409     rect.w = w;
410     rect.h = h;
411     SDL_FillRect(screen, &rect, color);
412 }
413
414 #define ALPHA_BLEND(a, oldp, newp, s)\
415 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
416
417 #define RGBA_IN(r, g, b, a, s)\
418 {\
419     unsigned int v = ((const uint32_t *)(s))[0];\
420     a = (v >> 24) & 0xff;\
421     r = (v >> 16) & 0xff;\
422     g = (v >> 8) & 0xff;\
423     b = v & 0xff;\
424 }
425
426 #define YUVA_IN(y, u, v, a, s, pal)\
427 {\
428     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
429     a = (val >> 24) & 0xff;\
430     y = (val >> 16) & 0xff;\
431     u = (val >> 8) & 0xff;\
432     v = val & 0xff;\
433 }
434
435 #define YUVA_OUT(d, y, u, v, a)\
436 {\
437     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
438 }
439
440
441 #define BPP 1
442
443 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
444 {
445     int wrap, wrap3, width2, skip2;
446     int y, u, v, a, u1, v1, a1, w, h;
447     uint8_t *lum, *cb, *cr;
448     const uint8_t *p;
449     const uint32_t *pal;
450     int dstx, dsty, dstw, dsth;
451
452     dstw = av_clip(rect->w, 0, imgw);
453     dsth = av_clip(rect->h, 0, imgh);
454     dstx = av_clip(rect->x, 0, imgw - dstw);
455     dsty = av_clip(rect->y, 0, imgh - dsth);
456     lum = dst->data[0] + dsty * dst->linesize[0];
457     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
458     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
459
460     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
461     skip2 = dstx >> 1;
462     wrap = dst->linesize[0];
463     wrap3 = rect->pict.linesize[0];
464     p = rect->pict.data[0];
465     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
466
467     if (dsty & 1) {
468         lum += dstx;
469         cb += skip2;
470         cr += skip2;
471
472         if (dstx & 1) {
473             YUVA_IN(y, u, v, a, p, pal);
474             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
476             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
477             cb++;
478             cr++;
479             lum++;
480             p += BPP;
481         }
482         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
483             YUVA_IN(y, u, v, a, p, pal);
484             u1 = u;
485             v1 = v;
486             a1 = a;
487             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
488
489             YUVA_IN(y, u, v, a, p + BPP, pal);
490             u1 += u;
491             v1 += v;
492             a1 += a;
493             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
494             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
495             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
496             cb++;
497             cr++;
498             p += 2 * BPP;
499             lum += 2;
500         }
501         if (w) {
502             YUVA_IN(y, u, v, a, p, pal);
503             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
504             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
505             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
506             p++;
507             lum++;
508         }
509         p += wrap3 - dstw * BPP;
510         lum += wrap - dstw - dstx;
511         cb += dst->linesize[1] - width2 - skip2;
512         cr += dst->linesize[2] - width2 - skip2;
513     }
514     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
515         lum += dstx;
516         cb += skip2;
517         cr += skip2;
518
519         if (dstx & 1) {
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 = u;
522             v1 = v;
523             a1 = a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             p += wrap3;
526             lum += wrap;
527             YUVA_IN(y, u, v, a, p, pal);
528             u1 += u;
529             v1 += v;
530             a1 += a;
531             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
533             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
534             cb++;
535             cr++;
536             p += -wrap3 + BPP;
537             lum += -wrap + 1;
538         }
539         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
540             YUVA_IN(y, u, v, a, p, pal);
541             u1 = u;
542             v1 = v;
543             a1 = a;
544             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545
546             YUVA_IN(y, u, v, a, p + BPP, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551             p += wrap3;
552             lum += wrap;
553
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 += u;
556             v1 += v;
557             a1 += a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559
560             YUVA_IN(y, u, v, a, p + BPP, pal);
561             u1 += u;
562             v1 += v;
563             a1 += a;
564             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
565
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
568
569             cb++;
570             cr++;
571             p += -wrap3 + 2 * BPP;
572             lum += -wrap + 2;
573         }
574         if (w) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             p += wrap3;
581             lum += wrap;
582             YUVA_IN(y, u, v, a, p, pal);
583             u1 += u;
584             v1 += v;
585             a1 += a;
586             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
588             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
589             cb++;
590             cr++;
591             p += -wrap3 + BPP;
592             lum += -wrap + 1;
593         }
594         p += wrap3 + (wrap3 - dstw * BPP);
595         lum += wrap + (wrap - dstw - dstx);
596         cb += dst->linesize[1] - width2 - skip2;
597         cr += dst->linesize[2] - width2 - skip2;
598     }
599     /* handle odd height */
600     if (h) {
601         lum += dstx;
602         cb += skip2;
603         cr += skip2;
604
605         if (dstx & 1) {
606             YUVA_IN(y, u, v, a, p, pal);
607             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
609             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
610             cb++;
611             cr++;
612             lum++;
613             p += BPP;
614         }
615         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 = u;
618             v1 = v;
619             a1 = a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621
622             YUVA_IN(y, u, v, a, p + BPP, pal);
623             u1 += u;
624             v1 += v;
625             a1 += a;
626             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
627             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
628             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
629             cb++;
630             cr++;
631             p += 2 * BPP;
632             lum += 2;
633         }
634         if (w) {
635             YUVA_IN(y, u, v, a, p, pal);
636             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
637             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
638             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
639         }
640     }
641 }
642
643 static void free_subpicture(SubPicture *sp)
644 {
645     avsubtitle_free(&sp->sub);
646 }
647
648 static void video_image_display(VideoState *is)
649 {
650     VideoPicture *vp;
651     SubPicture *sp;
652     AVPicture pict;
653     float aspect_ratio;
654     int width, height, x, y;
655     SDL_Rect rect;
656     int i;
657
658     vp = &is->pictq[is->pictq_rindex];
659     if (vp->bmp) {
660 #if CONFIG_AVFILTER
661          if (vp->picref->video->sample_aspect_ratio.num == 0)
662              aspect_ratio = 0;
663          else
664              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
665 #else
666
667         /* XXX: use variable in the frame */
668         if (is->video_st->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
670         else if (is->video_st->codec->sample_aspect_ratio.num)
671             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
672         else
673             aspect_ratio = 0;
674 #endif
675         if (aspect_ratio <= 0.0)
676             aspect_ratio = 1.0;
677         aspect_ratio *= (float)vp->width / (float)vp->height;
678
679         if (is->subtitle_st) {
680             if (is->subpq_size > 0) {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
684                     SDL_LockYUVOverlay (vp->bmp);
685
686                     pict.data[0] = vp->bmp->pixels[0];
687                     pict.data[1] = vp->bmp->pixels[2];
688                     pict.data[2] = vp->bmp->pixels[1];
689
690                     pict.linesize[0] = vp->bmp->pitches[0];
691                     pict.linesize[1] = vp->bmp->pitches[2];
692                     pict.linesize[2] = vp->bmp->pitches[1];
693
694                     for (i = 0; i < sp->sub.num_rects; i++)
695                         blend_subrect(&pict, sp->sub.rects[i],
696                                       vp->bmp->w, vp->bmp->h);
697
698                     SDL_UnlockYUVOverlay (vp->bmp);
699                 }
700             }
701         }
702
703
704         /* XXX: we suppose the screen has a 1.0 pixel ratio */
705         height = is->height;
706         width = ((int)rint(height * aspect_ratio)) & ~1;
707         if (width > is->width) {
708             width = is->width;
709             height = ((int)rint(width / aspect_ratio)) & ~1;
710         }
711         x = (is->width - width) / 2;
712         y = (is->height - height) / 2;
713         is->no_background = 0;
714         rect.x = is->xleft + x;
715         rect.y = is->ytop  + y;
716         rect.w = FFMAX(width,  1);
717         rect.h = FFMAX(height, 1);
718         SDL_DisplayYUVOverlay(vp->bmp, &rect);
719     }
720 }
721
722 static inline int compute_mod(int a, int b)
723 {
724     return a < 0 ? a%b + b : a%b;
725 }
726
727 static void video_audio_display(VideoState *s)
728 {
729     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
730     int ch, channels, h, h2, bgcolor, fgcolor;
731     int16_t time_diff;
732     int rdft_bits, nb_freq;
733
734     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
735         ;
736     nb_freq= 1<<(rdft_bits-1);
737
738     /* compute display index : center on currently output samples */
739     channels = s->audio_tgt_channels;
740     nb_display_channels = channels;
741     if (!s->paused) {
742         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
743         n = 2 * channels;
744         delay = s->audio_write_buf_size;
745         delay /= n;
746
747         /* to be more precise, we take into account the time spent since
748            the last buffer computation */
749         if (audio_callback_time) {
750             time_diff = av_gettime() - audio_callback_time;
751             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
752         }
753
754         delay += 2*data_used;
755         if (delay < data_used)
756             delay = data_used;
757
758         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
759         if (s->show_mode == SHOW_MODE_WAVES) {
760             h= INT_MIN;
761             for(i=0; i<1000; i+=channels){
762                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
763                 int a= s->sample_array[idx];
764                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
765                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
766                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
767                 int score= a-d;
768                 if(h<score && (b^c)<0){
769                     h= score;
770                     i_start= idx;
771                 }
772             }
773         }
774
775         s->last_i_start = i_start;
776     } else {
777         i_start = s->last_i_start;
778     }
779
780     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
781     if (s->show_mode == SHOW_MODE_WAVES) {
782         fill_rectangle(screen,
783                        s->xleft, s->ytop, s->width, s->height,
784                        bgcolor);
785
786         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
787
788         /* total height for one channel */
789         h = s->height / nb_display_channels;
790         /* graph height / 2 */
791         h2 = (h * 9) / 20;
792         for(ch = 0;ch < nb_display_channels; ch++) {
793             i = i_start + ch;
794             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
795             for(x = 0; x < s->width; x++) {
796                 y = (s->sample_array[i] * h2) >> 15;
797                 if (y < 0) {
798                     y = -y;
799                     ys = y1 - y;
800                 } else {
801                     ys = y1;
802                 }
803                 fill_rectangle(screen,
804                                s->xleft + x, ys, 1, y,
805                                fgcolor);
806                 i += channels;
807                 if (i >= SAMPLE_ARRAY_SIZE)
808                     i -= SAMPLE_ARRAY_SIZE;
809             }
810         }
811
812         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
813
814         for(ch = 1;ch < nb_display_channels; ch++) {
815             y = s->ytop + ch * h;
816             fill_rectangle(screen,
817                            s->xleft, y, s->width, 1,
818                            fgcolor);
819         }
820         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
821     }else{
822         nb_display_channels= FFMIN(nb_display_channels, 2);
823         if(rdft_bits != s->rdft_bits){
824             av_rdft_end(s->rdft);
825             av_free(s->rdft_data);
826             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
827             s->rdft_bits= rdft_bits;
828             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
829         }
830         {
831             FFTSample *data[2];
832             for(ch = 0;ch < nb_display_channels; ch++) {
833                 data[ch] = s->rdft_data + 2*nb_freq*ch;
834                 i = i_start + ch;
835                 for(x = 0; x < 2*nb_freq; x++) {
836                     double w= (x-nb_freq)*(1.0/nb_freq);
837                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
838                     i += channels;
839                     if (i >= SAMPLE_ARRAY_SIZE)
840                         i -= SAMPLE_ARRAY_SIZE;
841                 }
842                 av_rdft_calc(s->rdft, data[ch]);
843             }
844             //least efficient way to do this, we should of course directly access it but its more than fast enough
845             for(y=0; y<s->height; y++){
846                 double w= 1/sqrt(nb_freq);
847                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
848                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
849                        + data[1][2*y+1]*data[1][2*y+1])) : a;
850                 a= FFMIN(a,255);
851                 b= FFMIN(b,255);
852                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
853
854                 fill_rectangle(screen,
855                             s->xpos, s->height-y, 1, 1,
856                             fgcolor);
857             }
858         }
859         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
860         s->xpos++;
861         if(s->xpos >= s->width)
862             s->xpos= s->xleft;
863     }
864 }
865
866 static void stream_close(VideoState *is)
867 {
868     VideoPicture *vp;
869     int i;
870     /* XXX: use a special url_shutdown call to abort parse cleanly */
871     is->abort_request = 1;
872     SDL_WaitThread(is->read_tid, NULL);
873     SDL_WaitThread(is->refresh_tid, NULL);
874
875     /* free all pictures */
876     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
877         vp = &is->pictq[i];
878 #if CONFIG_AVFILTER
879         if (vp->picref) {
880             avfilter_unref_buffer(vp->picref);
881             vp->picref = NULL;
882         }
883 #endif
884         if (vp->bmp) {
885             SDL_FreeYUVOverlay(vp->bmp);
886             vp->bmp = NULL;
887         }
888     }
889     SDL_DestroyMutex(is->pictq_mutex);
890     SDL_DestroyCond(is->pictq_cond);
891     SDL_DestroyMutex(is->subpq_mutex);
892     SDL_DestroyCond(is->subpq_cond);
893 #if !CONFIG_AVFILTER
894     if (is->img_convert_ctx)
895         sws_freeContext(is->img_convert_ctx);
896 #endif
897     av_free(is);
898 }
899
900 static void do_exit(VideoState *is)
901 {
902     if (is) {
903         stream_close(is);
904     }
905     av_lockmgr_register(NULL);
906     uninit_opts();
907 #if CONFIG_AVFILTER
908     avfilter_uninit();
909 #endif
910     if (show_status)
911         printf("\n");
912     SDL_Quit();
913     av_log(NULL, AV_LOG_QUIET, "%s", "");
914     exit(0);
915 }
916
917 static int video_open(VideoState *is){
918     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
919     int w,h;
920
921     if(is_full_screen) flags |= SDL_FULLSCREEN;
922     else               flags |= SDL_RESIZABLE;
923
924     if (is_full_screen && fs_screen_width) {
925         w = fs_screen_width;
926         h = fs_screen_height;
927     } else if(!is_full_screen && screen_width){
928         w = screen_width;
929         h = screen_height;
930 #if CONFIG_AVFILTER
931     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
932         w = is->out_video_filter->inputs[0]->w;
933         h = is->out_video_filter->inputs[0]->h;
934 #else
935     }else if (is->video_st && is->video_st->codec->width){
936         w = is->video_st->codec->width;
937         h = is->video_st->codec->height;
938 #endif
939     } else {
940         w = 640;
941         h = 480;
942     }
943     if(screen && is->width == screen->w && screen->w == w
944        && is->height== screen->h && screen->h == h)
945         return 0;
946     screen = SDL_SetVideoMode(w, h, 0, flags);
947     if (!screen) {
948         fprintf(stderr, "SDL: could not set video mode - exiting\n");
949         do_exit(is);
950     }
951     if (!window_title)
952         window_title = input_filename;
953     SDL_WM_SetCaption(window_title, window_title);
954
955     is->width = screen->w;
956     is->height = screen->h;
957
958     return 0;
959 }
960
961 /* display the current picture, if any */
962 static void video_display(VideoState *is)
963 {
964     if(!screen)
965         video_open(is);
966     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
967         video_audio_display(is);
968     else if (is->video_st)
969         video_image_display(is);
970 }
971
972 static int refresh_thread(void *opaque)
973 {
974     VideoState *is= opaque;
975     while(!is->abort_request){
976         SDL_Event event;
977         event.type = FF_REFRESH_EVENT;
978         event.user.data1 = opaque;
979         if(!is->refresh){
980             is->refresh=1;
981             SDL_PushEvent(&event);
982         }
983         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
984         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
985     }
986     return 0;
987 }
988
989 /* get the current audio clock value */
990 static double get_audio_clock(VideoState *is)
991 {
992     if (is->paused) {
993         return is->audio_current_pts;
994     } else {
995         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
996     }
997 }
998
999 /* get the current video clock value */
1000 static double get_video_clock(VideoState *is)
1001 {
1002     if (is->paused) {
1003         return is->video_current_pts;
1004     } else {
1005         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1006     }
1007 }
1008
1009 /* get the current external clock value */
1010 static double get_external_clock(VideoState *is)
1011 {
1012     int64_t ti;
1013     ti = av_gettime();
1014     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1015 }
1016
1017 /* get the current master clock value */
1018 static double get_master_clock(VideoState *is)
1019 {
1020     double val;
1021
1022     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1023         if (is->video_st)
1024             val = get_video_clock(is);
1025         else
1026             val = get_audio_clock(is);
1027     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1028         if (is->audio_st)
1029             val = get_audio_clock(is);
1030         else
1031             val = get_video_clock(is);
1032     } else {
1033         val = get_external_clock(is);
1034     }
1035     return val;
1036 }
1037
1038 /* seek in the stream */
1039 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1040 {
1041     if (!is->seek_req) {
1042         is->seek_pos = pos;
1043         is->seek_rel = rel;
1044         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1045         if (seek_by_bytes)
1046             is->seek_flags |= AVSEEK_FLAG_BYTE;
1047         is->seek_req = 1;
1048     }
1049 }
1050
1051 /* pause or resume the video */
1052 static void stream_toggle_pause(VideoState *is)
1053 {
1054     if (is->paused) {
1055         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1056         if(is->read_pause_return != AVERROR(ENOSYS)){
1057             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1058         }
1059         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1060     }
1061     is->paused = !is->paused;
1062 }
1063
1064 static double compute_target_delay(double delay, VideoState *is)
1065 {
1066     double sync_threshold, diff;
1067
1068     /* update delay to follow master synchronisation source */
1069     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1070          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1071         /* if video is slave, we try to correct big delays by
1072            duplicating or deleting a frame */
1073         diff = get_video_clock(is) - get_master_clock(is);
1074
1075         /* skip or repeat frame. We take into account the
1076            delay to compute the threshold. I still don't know
1077            if it is the best guess */
1078         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1079         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1080             if (diff <= -sync_threshold)
1081                 delay = 0;
1082             else if (diff >= sync_threshold)
1083                 delay = 2 * delay;
1084         }
1085     }
1086
1087     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1088             delay, -diff);
1089
1090     return delay;
1091 }
1092
1093 static void pictq_next_picture(VideoState *is) {
1094     /* update queue size and signal for next picture */
1095     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1096         is->pictq_rindex = 0;
1097
1098     SDL_LockMutex(is->pictq_mutex);
1099     is->pictq_size--;
1100     SDL_CondSignal(is->pictq_cond);
1101     SDL_UnlockMutex(is->pictq_mutex);
1102 }
1103
1104 /* called to display each frame */
1105 static void video_refresh(void *opaque)
1106 {
1107     VideoState *is = opaque;
1108     VideoPicture *vp;
1109
1110     SubPicture *sp, *sp2;
1111
1112     if (is->video_st) {
1113 retry:
1114         if (is->pictq_size == 0) {
1115             //nothing to do, no picture to display in the que
1116         } else {
1117             double time= av_gettime()/1000000.0;
1118             double last_duration, duration, delay;
1119             /* dequeue the picture */
1120             vp = &is->pictq[is->pictq_rindex];
1121
1122             if (vp->skip) {
1123                 pictq_next_picture(is);
1124                 goto retry;
1125             }
1126
1127             /* compute nominal last_duration */
1128             last_duration = vp->pts - is->frame_last_pts;
1129             if (last_duration > 0 && last_duration < 10.0) {
1130                 /* if duration of the last frame was sane, update last_duration in video state */
1131                 is->frame_last_duration = last_duration;
1132             }
1133             delay = compute_target_delay(is->frame_last_duration, is);
1134
1135             if(time < is->frame_timer + delay)
1136                 return;
1137
1138             is->frame_last_pts = vp->pts;
1139             if (delay > 0)
1140                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1141
1142             /* update current video pts */
1143             is->video_current_pts = vp->pts;
1144             is->video_current_pts_drift = is->video_current_pts - time;
1145             is->video_current_pos = vp->pos;
1146
1147             if(is->pictq_size > 1) {
1148                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1149                  duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1150             } else {
1151                  duration = vp->duration;
1152             }
1153
1154             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1155                 if(is->pictq_size > 1){
1156                     pictq_next_picture(is);
1157                     goto retry;
1158                 }
1159             }
1160
1161             if(is->subtitle_st) {
1162                 if (is->subtitle_stream_changed) {
1163                     SDL_LockMutex(is->subpq_mutex);
1164
1165                     while (is->subpq_size) {
1166                         free_subpicture(&is->subpq[is->subpq_rindex]);
1167
1168                         /* update queue size and signal for next picture */
1169                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1170                             is->subpq_rindex = 0;
1171
1172                         is->subpq_size--;
1173                     }
1174                     is->subtitle_stream_changed = 0;
1175
1176                     SDL_CondSignal(is->subpq_cond);
1177                     SDL_UnlockMutex(is->subpq_mutex);
1178                 } else {
1179                     if (is->subpq_size > 0) {
1180                         sp = &is->subpq[is->subpq_rindex];
1181
1182                         if (is->subpq_size > 1)
1183                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1184                         else
1185                             sp2 = NULL;
1186
1187                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1188                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1189                         {
1190                             free_subpicture(sp);
1191
1192                             /* update queue size and signal for next picture */
1193                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1194                                 is->subpq_rindex = 0;
1195
1196                             SDL_LockMutex(is->subpq_mutex);
1197                             is->subpq_size--;
1198                             SDL_CondSignal(is->subpq_cond);
1199                             SDL_UnlockMutex(is->subpq_mutex);
1200                         }
1201                     }
1202                 }
1203             }
1204
1205             /* display picture */
1206             if (!display_disable)
1207                 video_display(is);
1208
1209             pictq_next_picture(is);
1210         }
1211     } else if (is->audio_st) {
1212         /* draw the next audio frame */
1213
1214         /* if only audio stream, then display the audio bars (better
1215            than nothing, just to test the implementation */
1216
1217         /* display picture */
1218         if (!display_disable)
1219             video_display(is);
1220     }
1221     if (show_status) {
1222         static int64_t last_time;
1223         int64_t cur_time;
1224         int aqsize, vqsize, sqsize;
1225         double av_diff;
1226
1227         cur_time = av_gettime();
1228         if (!last_time || (cur_time - last_time) >= 30000) {
1229             aqsize = 0;
1230             vqsize = 0;
1231             sqsize = 0;
1232             if (is->audio_st)
1233                 aqsize = is->audioq.size;
1234             if (is->video_st)
1235                 vqsize = is->videoq.size;
1236             if (is->subtitle_st)
1237                 sqsize = is->subtitleq.size;
1238             av_diff = 0;
1239             if (is->audio_st && is->video_st)
1240                 av_diff = get_audio_clock(is) - get_video_clock(is);
1241             printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1242                    get_master_clock(is),
1243                    av_diff,
1244                    aqsize / 1024,
1245                    vqsize / 1024,
1246                    sqsize,
1247                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1248                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1249             fflush(stdout);
1250             last_time = cur_time;
1251         }
1252     }
1253 }
1254
1255 /* allocate a picture (needs to do that in main thread to avoid
1256    potential locking problems */
1257 static void alloc_picture(void *opaque)
1258 {
1259     VideoState *is = opaque;
1260     VideoPicture *vp;
1261
1262     vp = &is->pictq[is->pictq_windex];
1263
1264     if (vp->bmp)
1265         SDL_FreeYUVOverlay(vp->bmp);
1266
1267 #if CONFIG_AVFILTER
1268     if (vp->picref)
1269         avfilter_unref_buffer(vp->picref);
1270     vp->picref = NULL;
1271
1272     vp->width   = is->out_video_filter->inputs[0]->w;
1273     vp->height  = is->out_video_filter->inputs[0]->h;
1274     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1275 #else
1276     vp->width   = is->video_st->codec->width;
1277     vp->height  = is->video_st->codec->height;
1278     vp->pix_fmt = is->video_st->codec->pix_fmt;
1279 #endif
1280
1281     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1282                                    SDL_YV12_OVERLAY,
1283                                    screen);
1284     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1285         /* SDL allocates a buffer smaller than requested if the video
1286          * overlay hardware is unable to support the requested size. */
1287         fprintf(stderr, "Error: the video system does not support an image\n"
1288                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1289                         "to reduce the image size.\n", vp->width, vp->height );
1290         do_exit(is);
1291     }
1292
1293     SDL_LockMutex(is->pictq_mutex);
1294     vp->allocated = 1;
1295     SDL_CondSignal(is->pictq_cond);
1296     SDL_UnlockMutex(is->pictq_mutex);
1297 }
1298
1299 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1300 {
1301     VideoPicture *vp;
1302     double frame_delay, pts = pts1;
1303
1304     /* compute the exact PTS for the picture if it is omitted in the stream
1305      * pts1 is the dts of the pkt / pts of the frame */
1306     if (pts != 0) {
1307         /* update video clock with pts, if present */
1308         is->video_clock = pts;
1309     } else {
1310         pts = is->video_clock;
1311     }
1312     /* update video clock for next frame */
1313     frame_delay = av_q2d(is->video_st->codec->time_base);
1314     /* for MPEG2, the frame can be repeated, so we update the
1315        clock accordingly */
1316     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1317     is->video_clock += frame_delay;
1318
1319 #if defined(DEBUG_SYNC) && 0
1320     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1321            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1322 #endif
1323
1324     /* wait until we have space to put a new picture */
1325     SDL_LockMutex(is->pictq_mutex);
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     vp->duration = frame_delay;
1339
1340     /* alloc or resize hardware picture buffer */
1341     if (!vp->bmp ||
1342 #if CONFIG_AVFILTER
1343         vp->width  != is->out_video_filter->inputs[0]->w ||
1344         vp->height != is->out_video_filter->inputs[0]->h) {
1345 #else
1346         vp->width != is->video_st->codec->width ||
1347         vp->height != is->video_st->codec->height) {
1348 #endif
1349         SDL_Event event;
1350
1351         vp->allocated = 0;
1352
1353         /* the allocation must be done in the main thread to avoid
1354            locking problems */
1355         event.type = FF_ALLOC_EVENT;
1356         event.user.data1 = is;
1357         SDL_PushEvent(&event);
1358
1359         /* wait until the picture is allocated */
1360         SDL_LockMutex(is->pictq_mutex);
1361         while (!vp->allocated && !is->videoq.abort_request) {
1362             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1363         }
1364         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1365         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1366             while (!vp->allocated) {
1367                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368             }
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict;
1379 #if CONFIG_AVFILTER
1380         if(vp->picref)
1381             avfilter_unref_buffer(vp->picref);
1382         vp->picref = src_frame->opaque;
1383 #endif
1384
1385         /* get a pointer on the bitmap */
1386         SDL_LockYUVOverlay (vp->bmp);
1387
1388         memset(&pict,0,sizeof(AVPicture));
1389         pict.data[0] = vp->bmp->pixels[0];
1390         pict.data[1] = vp->bmp->pixels[2];
1391         pict.data[2] = vp->bmp->pixels[1];
1392
1393         pict.linesize[0] = vp->bmp->pitches[0];
1394         pict.linesize[1] = vp->bmp->pitches[2];
1395         pict.linesize[2] = vp->bmp->pitches[1];
1396
1397 #if CONFIG_AVFILTER
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, (AVPicture *)src_frame,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418         vp->skip = 0;
1419
1420         /* now we can update the picture count */
1421         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422             is->pictq_windex = 0;
1423         SDL_LockMutex(is->pictq_mutex);
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1431 {
1432     int got_picture, i;
1433
1434     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1435         return -1;
1436
1437     if (pkt->data == flush_pkt.data) {
1438         avcodec_flush_buffers(is->video_st->codec);
1439
1440         SDL_LockMutex(is->pictq_mutex);
1441         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1442         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1443             is->pictq[i].skip = 1;
1444         }
1445         while (is->pictq_size && !is->videoq.abort_request) {
1446             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1447         }
1448         is->video_current_pos = -1;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450
1451         is->frame_last_pts = AV_NOPTS_VALUE;
1452         is->frame_last_duration = 0;
1453         is->frame_timer = (double)av_gettime() / 1000000.0;
1454         return 0;
1455     }
1456
1457     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1458
1459     if (got_picture) {
1460         if (decoder_reorder_pts == -1) {
1461             *pts = frame->best_effort_timestamp;
1462         } else if (decoder_reorder_pts) {
1463             *pts = frame->pkt_pts;
1464         } else {
1465             *pts = frame->pkt_dts;
1466         }
1467
1468         if (*pts == AV_NOPTS_VALUE) {
1469             *pts = 0;
1470         }
1471
1472         return 1;
1473
1474     }
1475     return 0;
1476 }
1477
1478 #if CONFIG_AVFILTER
1479 typedef struct {
1480     VideoState *is;
1481     AVFrame *frame;
1482     int use_dr1;
1483 } FilterPriv;
1484
1485 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1486 {
1487     AVFilterContext *ctx = codec->opaque;
1488     AVFilterBufferRef  *ref;
1489     int perms = AV_PERM_WRITE;
1490     int i, w, h, stride[4];
1491     unsigned edge;
1492     int pixel_size;
1493
1494     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1495
1496     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1497         perms |= AV_PERM_NEG_LINESIZES;
1498
1499     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1500         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1501         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1502         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1503     }
1504     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1505
1506     w = codec->width;
1507     h = codec->height;
1508
1509     if(av_image_check_size(w, h, 0, codec))
1510         return -1;
1511
1512     avcodec_align_dimensions2(codec, &w, &h, stride);
1513     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1514     w += edge << 1;
1515     h += edge << 1;
1516
1517     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1518         return -1;
1519
1520     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1521     ref->video->w = codec->width;
1522     ref->video->h = codec->height;
1523     for(i = 0; i < 4; i ++) {
1524         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1525         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1526
1527         if (ref->data[i]) {
1528             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1529         }
1530         pic->data[i]     = ref->data[i];
1531         pic->linesize[i] = ref->linesize[i];
1532     }
1533     pic->opaque = ref;
1534     pic->age    = INT_MAX;
1535     pic->type   = FF_BUFFER_TYPE_USER;
1536     pic->reordered_opaque = codec->reordered_opaque;
1537     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1538     else           pic->pkt_pts = AV_NOPTS_VALUE;
1539     return 0;
1540 }
1541
1542 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1543 {
1544     memset(pic->data, 0, sizeof(pic->data));
1545     avfilter_unref_buffer(pic->opaque);
1546 }
1547
1548 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1549 {
1550     AVFilterBufferRef *ref = pic->opaque;
1551
1552     if (pic->data[0] == NULL) {
1553         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1554         return codec->get_buffer(codec, pic);
1555     }
1556
1557     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1558         (codec->pix_fmt != ref->format)) {
1559         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1560         return -1;
1561     }
1562
1563     pic->reordered_opaque = codec->reordered_opaque;
1564     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1565     else           pic->pkt_pts = AV_NOPTS_VALUE;
1566     return 0;
1567 }
1568
1569 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1570 {
1571     FilterPriv *priv = ctx->priv;
1572     AVCodecContext *codec;
1573     if(!opaque) return -1;
1574
1575     priv->is = opaque;
1576     codec    = priv->is->video_st->codec;
1577     codec->opaque = ctx;
1578     if((codec->codec->capabilities & CODEC_CAP_DR1)
1579     ) {
1580         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1581         priv->use_dr1 = 1;
1582         codec->get_buffer     = input_get_buffer;
1583         codec->release_buffer = input_release_buffer;
1584         codec->reget_buffer   = input_reget_buffer;
1585         codec->thread_safe_callbacks = 1;
1586     }
1587
1588     priv->frame = avcodec_alloc_frame();
1589
1590     return 0;
1591 }
1592
1593 static void input_uninit(AVFilterContext *ctx)
1594 {
1595     FilterPriv *priv = ctx->priv;
1596     av_free(priv->frame);
1597 }
1598
1599 static int input_request_frame(AVFilterLink *link)
1600 {
1601     FilterPriv *priv = link->src->priv;
1602     AVFilterBufferRef *picref;
1603     int64_t pts = 0;
1604     AVPacket pkt;
1605     int ret;
1606
1607     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1608         av_free_packet(&pkt);
1609     if (ret < 0)
1610         return -1;
1611
1612     if(priv->use_dr1 && priv->frame->opaque) {
1613         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1614     } else {
1615         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1616         av_image_copy(picref->data, picref->linesize,
1617                       priv->frame->data, priv->frame->linesize,
1618                       picref->format, link->w, link->h);
1619     }
1620     av_free_packet(&pkt);
1621
1622     avfilter_copy_frame_props(picref, priv->frame);
1623     picref->pts = pts;
1624
1625     avfilter_start_frame(link, picref);
1626     avfilter_draw_slice(link, 0, link->h, 1);
1627     avfilter_end_frame(link);
1628
1629     return 0;
1630 }
1631
1632 static int input_query_formats(AVFilterContext *ctx)
1633 {
1634     FilterPriv *priv = ctx->priv;
1635     enum PixelFormat pix_fmts[] = {
1636         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1637     };
1638
1639     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1640     return 0;
1641 }
1642
1643 static int input_config_props(AVFilterLink *link)
1644 {
1645     FilterPriv *priv  = link->src->priv;
1646     AVStream *s = priv->is->video_st;
1647
1648     link->w = s->codec->width;
1649     link->h = s->codec->height;
1650     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1651         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1652     link->time_base = s->time_base;
1653
1654     return 0;
1655 }
1656
1657 static AVFilter input_filter =
1658 {
1659     .name      = "ffplay_input",
1660
1661     .priv_size = sizeof(FilterPriv),
1662
1663     .init      = input_init,
1664     .uninit    = input_uninit,
1665
1666     .query_formats = input_query_formats,
1667
1668     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1669     .outputs   = (AVFilterPad[]) {{ .name = "default",
1670                                     .type = AVMEDIA_TYPE_VIDEO,
1671                                     .request_frame = input_request_frame,
1672                                     .config_props  = input_config_props, },
1673                                   { .name = NULL }},
1674 };
1675
1676 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1677 {
1678     char sws_flags_str[128];
1679     int ret;
1680     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1681     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1682     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1683     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1684     graph->scale_sws_opts = av_strdup(sws_flags_str);
1685
1686     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1687                                             NULL, is, graph)) < 0)
1688         return ret;
1689 #if FF_API_OLD_VSINK_API
1690     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1691                                        NULL, pix_fmts, graph);
1692 #else
1693     buffersink_params->pixel_fmts = pix_fmts;
1694     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1695                                        NULL, buffersink_params, graph);
1696 #endif
1697     av_freep(&buffersink_params);
1698     if (ret < 0)
1699         return ret;
1700
1701     if(vfilters) {
1702         AVFilterInOut *outputs = avfilter_inout_alloc();
1703         AVFilterInOut *inputs  = avfilter_inout_alloc();
1704
1705         outputs->name    = av_strdup("in");
1706         outputs->filter_ctx = filt_src;
1707         outputs->pad_idx = 0;
1708         outputs->next    = NULL;
1709
1710         inputs->name    = av_strdup("out");
1711         inputs->filter_ctx = filt_out;
1712         inputs->pad_idx = 0;
1713         inputs->next    = NULL;
1714
1715         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1716             return ret;
1717     } else {
1718         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1719             return ret;
1720     }
1721
1722     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1723         return ret;
1724
1725     is->out_video_filter = filt_out;
1726
1727     return ret;
1728 }
1729
1730 #endif  /* CONFIG_AVFILTER */
1731
1732 static int video_thread(void *arg)
1733 {
1734     VideoState *is = arg;
1735     AVFrame *frame= avcodec_alloc_frame();
1736     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1737     double pts;
1738     int ret;
1739
1740 #if CONFIG_AVFILTER
1741     AVFilterGraph *graph = avfilter_graph_alloc();
1742     AVFilterContext *filt_out = NULL;
1743     int last_w = is->video_st->codec->width;
1744     int last_h = is->video_st->codec->height;
1745
1746     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1747         goto the_end;
1748     filt_out = is->out_video_filter;
1749 #endif
1750
1751     for(;;) {
1752 #if !CONFIG_AVFILTER
1753         AVPacket pkt;
1754 #else
1755         AVFilterBufferRef *picref;
1756         AVRational tb = filt_out->inputs[0]->time_base;
1757 #endif
1758         while (is->paused && !is->videoq.abort_request)
1759             SDL_Delay(10);
1760 #if CONFIG_AVFILTER
1761         if (   last_w != is->video_st->codec->width
1762             || last_h != is->video_st->codec->height) {
1763             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1764                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1765             avfilter_graph_free(&graph);
1766             graph = avfilter_graph_alloc();
1767             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1768                 goto the_end;
1769             filt_out = is->out_video_filter;
1770             last_w = is->video_st->codec->width;
1771             last_h = is->video_st->codec->height;
1772         }
1773         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1774         if (picref) {
1775             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1776             pts_int = picref->pts;
1777             pos     = picref->pos;
1778             frame->opaque = picref;
1779         }
1780
1781         if (av_cmp_q(tb, is->video_st->time_base)) {
1782             av_unused int64_t pts1 = pts_int;
1783             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1784             av_dlog(NULL, "video_thread(): "
1785                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1786                     tb.num, tb.den, pts1,
1787                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1788         }
1789 #else
1790         ret = get_video_frame(is, frame, &pts_int, &pkt);
1791         pos = pkt.pos;
1792         av_free_packet(&pkt);
1793 #endif
1794
1795         if (ret < 0) goto the_end;
1796
1797 #if CONFIG_AVFILTER
1798         if (!picref)
1799             continue;
1800 #endif
1801
1802         pts = pts_int*av_q2d(is->video_st->time_base);
1803
1804         ret = queue_picture(is, frame, pts, pos);
1805
1806         if (ret < 0)
1807             goto the_end;
1808
1809         if (is->step)
1810             stream_toggle_pause(is);
1811     }
1812  the_end:
1813 #if CONFIG_AVFILTER
1814     avfilter_graph_free(&graph);
1815 #endif
1816     av_free(frame);
1817     return 0;
1818 }
1819
1820 static int subtitle_thread(void *arg)
1821 {
1822     VideoState *is = arg;
1823     SubPicture *sp;
1824     AVPacket pkt1, *pkt = &pkt1;
1825     int got_subtitle;
1826     double pts;
1827     int i, j;
1828     int r, g, b, y, u, v, a;
1829
1830     for(;;) {
1831         while (is->paused && !is->subtitleq.abort_request) {
1832             SDL_Delay(10);
1833         }
1834         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1835             break;
1836
1837         if(pkt->data == flush_pkt.data){
1838             avcodec_flush_buffers(is->subtitle_st->codec);
1839             continue;
1840         }
1841         SDL_LockMutex(is->subpq_mutex);
1842         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1843                !is->subtitleq.abort_request) {
1844             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1845         }
1846         SDL_UnlockMutex(is->subpq_mutex);
1847
1848         if (is->subtitleq.abort_request)
1849             return 0;
1850
1851         sp = &is->subpq[is->subpq_windex];
1852
1853        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1854            this packet, if any */
1855         pts = 0;
1856         if (pkt->pts != AV_NOPTS_VALUE)
1857             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1858
1859         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1860                                  &got_subtitle, pkt);
1861
1862         if (got_subtitle && sp->sub.format == 0) {
1863             sp->pts = pts;
1864
1865             for (i = 0; i < sp->sub.num_rects; i++)
1866             {
1867                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1868                 {
1869                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1870                     y = RGB_TO_Y_CCIR(r, g, b);
1871                     u = RGB_TO_U_CCIR(r, g, b, 0);
1872                     v = RGB_TO_V_CCIR(r, g, b, 0);
1873                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1874                 }
1875             }
1876
1877             /* now we can update the picture count */
1878             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1879                 is->subpq_windex = 0;
1880             SDL_LockMutex(is->subpq_mutex);
1881             is->subpq_size++;
1882             SDL_UnlockMutex(is->subpq_mutex);
1883         }
1884         av_free_packet(pkt);
1885     }
1886     return 0;
1887 }
1888
1889 /* copy samples for viewing in editor window */
1890 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1891 {
1892     int size, len;
1893
1894     size = samples_size / sizeof(short);
1895     while (size > 0) {
1896         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1897         if (len > size)
1898             len = size;
1899         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1900         samples += len;
1901         is->sample_array_index += len;
1902         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1903             is->sample_array_index = 0;
1904         size -= len;
1905     }
1906 }
1907
1908 /* return the new audio buffer size (samples can be added or deleted
1909    to get better sync if video or external master clock) */
1910 static int synchronize_audio(VideoState *is, short *samples,
1911                              int samples_size1, double pts)
1912 {
1913     int n, samples_size;
1914     double ref_clock;
1915
1916     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1917     samples_size = samples_size1;
1918
1919     /* if not master, then we try to remove or add samples to correct the clock */
1920     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1921          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1922         double diff, avg_diff;
1923         int wanted_size, min_size, max_size, nb_samples;
1924
1925         ref_clock = get_master_clock(is);
1926         diff = get_audio_clock(is) - ref_clock;
1927
1928         if (diff < AV_NOSYNC_THRESHOLD) {
1929             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1930             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1931                 /* not enough measures to have a correct estimate */
1932                 is->audio_diff_avg_count++;
1933             } else {
1934                 /* estimate the A-V difference */
1935                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1936
1937                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1938                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1939                     nb_samples = samples_size / n;
1940
1941                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1942                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1943                     if (wanted_size < min_size)
1944                         wanted_size = min_size;
1945                     else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
1946                         wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
1947
1948                     /* add or remove samples to correction the synchro */
1949                     if (wanted_size < samples_size) {
1950                         /* remove samples */
1951                         samples_size = wanted_size;
1952                     } else if (wanted_size > samples_size) {
1953                         uint8_t *samples_end, *q;
1954                         int nb;
1955
1956                         /* add samples */
1957                         nb = (samples_size - wanted_size);
1958                         samples_end = (uint8_t *)samples + samples_size - n;
1959                         q = samples_end + n;
1960                         while (nb > 0) {
1961                             memcpy(q, samples_end, n);
1962                             q += n;
1963                             nb -= n;
1964                         }
1965                         samples_size = wanted_size;
1966                     }
1967                 }
1968                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1969                         diff, avg_diff, samples_size - samples_size1,
1970                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1971             }
1972         } else {
1973             /* too big difference : may be initial PTS errors, so
1974                reset A-V filter */
1975             is->audio_diff_avg_count = 0;
1976             is->audio_diff_cum = 0;
1977         }
1978     }
1979
1980     return samples_size;
1981 }
1982
1983 /* decode one audio frame and returns its uncompressed size */
1984 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1985 {
1986     AVPacket *pkt_temp = &is->audio_pkt_temp;
1987     AVPacket *pkt = &is->audio_pkt;
1988     AVCodecContext *dec= is->audio_st->codec;
1989     int len1, len2, data_size, resampled_data_size;
1990     int64_t dec_channel_layout;
1991     double pts;
1992     int new_packet = 0;
1993     int flush_complete = 0;
1994
1995     for(;;) {
1996         /* NOTE: the audio packet can contain several frames */
1997         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1998             if (flush_complete)
1999                 break;
2000             new_packet = 0;
2001             data_size = sizeof(is->audio_buf1);
2002             len1 = avcodec_decode_audio3(dec,
2003                                         (int16_t *)is->audio_buf1, &data_size,
2004                                         pkt_temp);
2005             if (len1 < 0) {
2006                 /* if error, we skip the frame */
2007                 pkt_temp->size = 0;
2008                 break;
2009             }
2010
2011             pkt_temp->data += len1;
2012             pkt_temp->size -= len1;
2013
2014             if (data_size <= 0) {
2015                 /* stop sending empty packets if the decoder is finished */
2016                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2017                     flush_complete = 1;
2018                 continue;
2019             }
2020
2021             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2022
2023             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2024                 if (is->swr_ctx)
2025                     swr_free(&is->swr_ctx);
2026                 is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2027                                                dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
2028                                                0, NULL);
2029                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2030                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2031                         dec->sample_rate,
2032                         av_get_sample_fmt_name(dec->sample_fmt),
2033                         dec->channels,
2034                         is->audio_tgt_freq,
2035                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2036                         is->audio_tgt_channels);
2037                     break;
2038                 }
2039                 is->audio_src_channel_layout = dec_channel_layout;
2040                 is->audio_src_channels = dec->channels;
2041                 is->audio_src_freq = dec->sample_rate;
2042                 is->audio_src_fmt = dec->sample_fmt;
2043             }
2044
2045             resampled_data_size = data_size;
2046             if (is->swr_ctx) {
2047                 const uint8_t *in[] = {is->audio_buf1};
2048                 uint8_t *out[] = {is->audio_buf2};
2049                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2050                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2051                 if (len2 < 0) {
2052                     fprintf(stderr, "audio_resample() failed\n");
2053                     break;
2054                 }
2055                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2056                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2057                     swr_init(is->swr_ctx);
2058                 }
2059                 is->audio_buf = is->audio_buf2;
2060                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2061             } else {
2062                 is->audio_buf= is->audio_buf1;
2063             }
2064
2065             /* if no pts, then compute it */
2066             pts = is->audio_clock;
2067             *pts_ptr = pts;
2068             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2069 #ifdef DEBUG
2070             {
2071                 static double last_clock;
2072                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2073                        is->audio_clock - last_clock,
2074                        is->audio_clock, pts);
2075                 last_clock = is->audio_clock;
2076             }
2077 #endif
2078             return resampled_data_size;
2079         }
2080
2081         /* free the current packet */
2082         if (pkt->data)
2083             av_free_packet(pkt);
2084
2085         if (is->paused || is->audioq.abort_request) {
2086             return -1;
2087         }
2088
2089         /* read next packet */
2090         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2091             return -1;
2092
2093         if (pkt->data == flush_pkt.data)
2094             avcodec_flush_buffers(dec);
2095
2096         pkt_temp->data = pkt->data;
2097         pkt_temp->size = pkt->size;
2098
2099         /* if update the audio clock with the pts */
2100         if (pkt->pts != AV_NOPTS_VALUE) {
2101             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2102         }
2103     }
2104 }
2105
2106 /* prepare a new audio buffer */
2107 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2108 {
2109     VideoState *is = opaque;
2110     int audio_size, len1;
2111     int bytes_per_sec;
2112     double pts;
2113
2114     audio_callback_time = av_gettime();
2115
2116     while (len > 0) {
2117         if (is->audio_buf_index >= is->audio_buf_size) {
2118            audio_size = audio_decode_frame(is, &pts);
2119            if (audio_size < 0) {
2120                 /* if error, just output silence */
2121                is->audio_buf = is->audio_buf1;
2122                is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2123                memset(is->audio_buf, 0, is->audio_buf_size);
2124            } else {
2125                if (is->show_mode != SHOW_MODE_VIDEO)
2126                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2127                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2128                                               pts);
2129                is->audio_buf_size = audio_size;
2130            }
2131            is->audio_buf_index = 0;
2132         }
2133         len1 = is->audio_buf_size - is->audio_buf_index;
2134         if (len1 > len)
2135             len1 = len;
2136         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2137         len -= len1;
2138         stream += len1;
2139         is->audio_buf_index += len1;
2140     }
2141     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2142     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2143     /* Let's assume the audio driver that is used by SDL has two periods. */
2144     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2145     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2146 }
2147
2148 /* open a given stream. Return 0 if OK */
2149 static int stream_component_open(VideoState *is, int stream_index)
2150 {
2151     AVFormatContext *ic = is->ic;
2152     AVCodecContext *avctx;
2153     AVCodec *codec;
2154     SDL_AudioSpec wanted_spec, spec;
2155     AVDictionary *opts;
2156     AVDictionaryEntry *t = NULL;
2157     int64_t wanted_channel_layout = 0;
2158
2159     if (stream_index < 0 || stream_index >= ic->nb_streams)
2160         return -1;
2161     avctx = ic->streams[stream_index]->codec;
2162
2163     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2164
2165     codec = avcodec_find_decoder(avctx->codec_id);
2166     switch(avctx->codec_type){
2167         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2168         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2169         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2170     }
2171     if (!codec)
2172         return -1;
2173
2174     avctx->workaround_bugs = workaround_bugs;
2175     avctx->lowres = lowres;
2176     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2177     avctx->idct_algo= idct;
2178     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2179     avctx->skip_frame= skip_frame;
2180     avctx->skip_idct= skip_idct;
2181     avctx->skip_loop_filter= skip_loop_filter;
2182     avctx->error_recognition= error_recognition;
2183     avctx->error_concealment= error_concealment;
2184
2185     if(codec->capabilities & CODEC_CAP_DR1)
2186         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2187
2188     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2189         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2190         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2191         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2192         wanted_spec.freq = avctx->sample_rate;
2193         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2194             fprintf(stderr, "Invalid sample rate or channel count!\n");
2195             return -1;
2196         }
2197     }
2198
2199     if (!codec ||
2200         avcodec_open2(avctx, codec, &opts) < 0)
2201         return -1;
2202     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2203         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2204         return AVERROR_OPTION_NOT_FOUND;
2205     }
2206
2207     /* prepare audio output */
2208     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2209         wanted_spec.format = AUDIO_S16SYS;
2210         wanted_spec.silence = 0;
2211         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2212         wanted_spec.callback = sdl_audio_callback;
2213         wanted_spec.userdata = is;
2214         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2215             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2216             return -1;
2217         }
2218         is->audio_hw_buf_size = spec.size;
2219         if (spec.format != AUDIO_S16SYS) {
2220             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2221             return -1;
2222         }
2223         if (spec.channels != wanted_spec.channels) {
2224             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2225             if (!wanted_channel_layout) {
2226                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2227                 return -1;
2228             }
2229         }
2230         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2231         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2232         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2233         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2234     }
2235
2236     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2237     switch(avctx->codec_type) {
2238     case AVMEDIA_TYPE_AUDIO:
2239         is->audio_stream = stream_index;
2240         is->audio_st = ic->streams[stream_index];
2241         is->audio_buf_size = 0;
2242         is->audio_buf_index = 0;
2243
2244         /* init averaging filter */
2245         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2246         is->audio_diff_avg_count = 0;
2247         /* since we do not have a precise anough audio fifo fullness,
2248            we correct audio sync only if larger than this threshold */
2249         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2250
2251         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2252         packet_queue_init(&is->audioq);
2253         SDL_PauseAudio(0);
2254         break;
2255     case AVMEDIA_TYPE_VIDEO:
2256         is->video_stream = stream_index;
2257         is->video_st = ic->streams[stream_index];
2258
2259         packet_queue_init(&is->videoq);
2260         is->video_tid = SDL_CreateThread(video_thread, is);
2261         break;
2262     case AVMEDIA_TYPE_SUBTITLE:
2263         is->subtitle_stream = stream_index;
2264         is->subtitle_st = ic->streams[stream_index];
2265         packet_queue_init(&is->subtitleq);
2266
2267         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2268         break;
2269     default:
2270         break;
2271     }
2272     return 0;
2273 }
2274
2275 static void stream_component_close(VideoState *is, int stream_index)
2276 {
2277     AVFormatContext *ic = is->ic;
2278     AVCodecContext *avctx;
2279
2280     if (stream_index < 0 || stream_index >= ic->nb_streams)
2281         return;
2282     avctx = ic->streams[stream_index]->codec;
2283
2284     switch(avctx->codec_type) {
2285     case AVMEDIA_TYPE_AUDIO:
2286         packet_queue_abort(&is->audioq);
2287
2288         SDL_CloseAudio();
2289
2290         packet_queue_end(&is->audioq);
2291         if (is->swr_ctx)
2292             swr_free(&is->swr_ctx);
2293         av_free_packet(&is->audio_pkt);
2294
2295         if (is->rdft) {
2296             av_rdft_end(is->rdft);
2297             av_freep(&is->rdft_data);
2298         }
2299         break;
2300     case AVMEDIA_TYPE_VIDEO:
2301         packet_queue_abort(&is->videoq);
2302
2303         /* note: we also signal this mutex to make sure we deblock the
2304            video thread in all cases */
2305         SDL_LockMutex(is->pictq_mutex);
2306         SDL_CondSignal(is->pictq_cond);
2307         SDL_UnlockMutex(is->pictq_mutex);
2308
2309         SDL_WaitThread(is->video_tid, NULL);
2310
2311         packet_queue_end(&is->videoq);
2312         break;
2313     case AVMEDIA_TYPE_SUBTITLE:
2314         packet_queue_abort(&is->subtitleq);
2315
2316         /* note: we also signal this mutex to make sure we deblock the
2317            video thread in all cases */
2318         SDL_LockMutex(is->subpq_mutex);
2319         is->subtitle_stream_changed = 1;
2320
2321         SDL_CondSignal(is->subpq_cond);
2322         SDL_UnlockMutex(is->subpq_mutex);
2323
2324         SDL_WaitThread(is->subtitle_tid, NULL);
2325
2326         packet_queue_end(&is->subtitleq);
2327         break;
2328     default:
2329         break;
2330     }
2331
2332     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2333     avcodec_close(avctx);
2334     switch(avctx->codec_type) {
2335     case AVMEDIA_TYPE_AUDIO:
2336         is->audio_st = NULL;
2337         is->audio_stream = -1;
2338         break;
2339     case AVMEDIA_TYPE_VIDEO:
2340         is->video_st = NULL;
2341         is->video_stream = -1;
2342         break;
2343     case AVMEDIA_TYPE_SUBTITLE:
2344         is->subtitle_st = NULL;
2345         is->subtitle_stream = -1;
2346         break;
2347     default:
2348         break;
2349     }
2350 }
2351
2352 /* since we have only one decoding thread, we can use a global
2353    variable instead of a thread local variable */
2354 static VideoState *global_video_state;
2355
2356 static int decode_interrupt_cb(void)
2357 {
2358     return (global_video_state && global_video_state->abort_request);
2359 }
2360
2361 /* this thread gets the stream from the disk or the network */
2362 static int read_thread(void *arg)
2363 {
2364     VideoState *is = arg;
2365     AVFormatContext *ic = NULL;
2366     int err, i, ret;
2367     int st_index[AVMEDIA_TYPE_NB];
2368     AVPacket pkt1, *pkt = &pkt1;
2369     int eof=0;
2370     int pkt_in_play_range = 0;
2371     AVDictionaryEntry *t;
2372     AVDictionary **opts;
2373     int orig_nb_streams;
2374
2375     memset(st_index, -1, sizeof(st_index));
2376     is->video_stream = -1;
2377     is->audio_stream = -1;
2378     is->subtitle_stream = -1;
2379
2380     global_video_state = is;
2381     avio_set_interrupt_cb(decode_interrupt_cb);
2382
2383     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2384     if (err < 0) {
2385         print_error(is->filename, err);
2386         ret = -1;
2387         goto fail;
2388     }
2389     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2390         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2391         ret = AVERROR_OPTION_NOT_FOUND;
2392         goto fail;
2393     }
2394     is->ic = ic;
2395
2396     if(genpts)
2397         ic->flags |= AVFMT_FLAG_GENPTS;
2398
2399     av_dict_set(&codec_opts, "request_channels", "2", 0);
2400
2401     opts = setup_find_stream_info_opts(ic, codec_opts);
2402     orig_nb_streams = ic->nb_streams;
2403
2404     err = avformat_find_stream_info(ic, opts);
2405     if (err < 0) {
2406         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2407         ret = -1;
2408         goto fail;
2409     }
2410     for (i = 0; i < orig_nb_streams; i++)
2411         av_dict_free(&opts[i]);
2412     av_freep(&opts);
2413
2414     if(ic->pb)
2415         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2416
2417     if(seek_by_bytes<0)
2418         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2419
2420     /* if seeking requested, we execute it */
2421     if (start_time != AV_NOPTS_VALUE) {
2422         int64_t timestamp;
2423
2424         timestamp = start_time;
2425         /* add the stream start time */
2426         if (ic->start_time != AV_NOPTS_VALUE)
2427             timestamp += ic->start_time;
2428         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2429         if (ret < 0) {
2430             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2431                     is->filename, (double)timestamp / AV_TIME_BASE);
2432         }
2433     }
2434
2435     for (i = 0; i < ic->nb_streams; i++)
2436         ic->streams[i]->discard = AVDISCARD_ALL;
2437     if (!video_disable)
2438         st_index[AVMEDIA_TYPE_VIDEO] =
2439             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2440                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2441     if (!audio_disable)
2442         st_index[AVMEDIA_TYPE_AUDIO] =
2443             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2444                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2445                                 st_index[AVMEDIA_TYPE_VIDEO],
2446                                 NULL, 0);
2447     if (!video_disable)
2448         st_index[AVMEDIA_TYPE_SUBTITLE] =
2449             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2450                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2451                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2452                                  st_index[AVMEDIA_TYPE_AUDIO] :
2453                                  st_index[AVMEDIA_TYPE_VIDEO]),
2454                                 NULL, 0);
2455     if (show_status) {
2456         av_dump_format(ic, 0, is->filename, 0);
2457     }
2458
2459     is->show_mode = show_mode;
2460
2461     /* open the streams */
2462     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2463         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2464     }
2465
2466     ret=-1;
2467     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2468         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2469     }
2470     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2471     if (is->show_mode == SHOW_MODE_NONE)
2472         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2473
2474     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2475         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2476     }
2477
2478     if (is->video_stream < 0 && is->audio_stream < 0) {
2479         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2480         ret = -1;
2481         goto fail;
2482     }
2483
2484     for(;;) {
2485         if (is->abort_request)
2486             break;
2487         if (is->paused != is->last_paused) {
2488             is->last_paused = is->paused;
2489             if (is->paused)
2490                 is->read_pause_return= av_read_pause(ic);
2491             else
2492                 av_read_play(ic);
2493         }
2494 #if CONFIG_RTSP_DEMUXER
2495         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2496             /* wait 10 ms to avoid trying to get another packet */
2497             /* XXX: horrible */
2498             SDL_Delay(10);
2499             continue;
2500         }
2501 #endif
2502         if (is->seek_req) {
2503             int64_t seek_target= is->seek_pos;
2504             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2505             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2506 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2507 //      of the seek_pos/seek_rel variables
2508
2509             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2510             if (ret < 0) {
2511                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2512             }else{
2513                 if (is->audio_stream >= 0) {
2514                     packet_queue_flush(&is->audioq);
2515                     packet_queue_put(&is->audioq, &flush_pkt);
2516                 }
2517                 if (is->subtitle_stream >= 0) {
2518                     packet_queue_flush(&is->subtitleq);
2519                     packet_queue_put(&is->subtitleq, &flush_pkt);
2520                 }
2521                 if (is->video_stream >= 0) {
2522                     packet_queue_flush(&is->videoq);
2523                     packet_queue_put(&is->videoq, &flush_pkt);
2524                 }
2525             }
2526             is->seek_req = 0;
2527             eof= 0;
2528         }
2529
2530         /* if the queue are full, no need to read more */
2531         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2532             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2533                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2534                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2535             /* wait 10 ms */
2536             SDL_Delay(10);
2537             continue;
2538         }
2539         if(eof) {
2540             if(is->video_stream >= 0){
2541                 av_init_packet(pkt);
2542                 pkt->data=NULL;
2543                 pkt->size=0;
2544                 pkt->stream_index= is->video_stream;
2545                 packet_queue_put(&is->videoq, pkt);
2546             }
2547             if (is->audio_stream >= 0 &&
2548                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2549                 av_init_packet(pkt);
2550                 pkt->data = NULL;
2551                 pkt->size = 0;
2552                 pkt->stream_index = is->audio_stream;
2553                 packet_queue_put(&is->audioq, pkt);
2554             }
2555             SDL_Delay(10);
2556             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2557                 if(loop!=1 && (!loop || --loop)){
2558                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2559                 }else if(autoexit){
2560                     ret=AVERROR_EOF;
2561                     goto fail;
2562                 }
2563             }
2564             eof=0;
2565             continue;
2566         }
2567         ret = av_read_frame(ic, pkt);
2568         if (ret < 0) {
2569             if (ret == AVERROR_EOF || url_feof(ic->pb))
2570                 eof=1;
2571             if (ic->pb && ic->pb->error)
2572                 break;
2573             SDL_Delay(100); /* wait for user event */
2574             continue;
2575         }
2576         /* check if packet is in play range specified by user, then queue, otherwise discard */
2577         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2578                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2579                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2580                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2581                 <= ((double)duration/1000000);
2582         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2583             packet_queue_put(&is->audioq, pkt);
2584         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->videoq, pkt);
2586         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2587             packet_queue_put(&is->subtitleq, pkt);
2588         } else {
2589             av_free_packet(pkt);
2590         }
2591     }
2592     /* wait until the end */
2593     while (!is->abort_request) {
2594         SDL_Delay(100);
2595     }
2596
2597     ret = 0;
2598  fail:
2599     /* disable interrupting */
2600     global_video_state = NULL;
2601
2602     /* close each stream */
2603     if (is->audio_stream >= 0)
2604         stream_component_close(is, is->audio_stream);
2605     if (is->video_stream >= 0)
2606         stream_component_close(is, is->video_stream);
2607     if (is->subtitle_stream >= 0)
2608         stream_component_close(is, is->subtitle_stream);
2609     if (is->ic) {
2610         av_close_input_file(is->ic);
2611         is->ic = NULL; /* safety */
2612     }
2613     avio_set_interrupt_cb(NULL);
2614
2615     if (ret != 0) {
2616         SDL_Event event;
2617
2618         event.type = FF_QUIT_EVENT;
2619         event.user.data1 = is;
2620         SDL_PushEvent(&event);
2621     }
2622     return 0;
2623 }
2624
2625 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2626 {
2627     VideoState *is;
2628
2629     is = av_mallocz(sizeof(VideoState));
2630     if (!is)
2631         return NULL;
2632     av_strlcpy(is->filename, filename, sizeof(is->filename));
2633     is->iformat = iformat;
2634     is->ytop = 0;
2635     is->xleft = 0;
2636
2637     /* start video display */
2638     is->pictq_mutex = SDL_CreateMutex();
2639     is->pictq_cond = SDL_CreateCond();
2640
2641     is->subpq_mutex = SDL_CreateMutex();
2642     is->subpq_cond = SDL_CreateCond();
2643
2644     is->av_sync_type = av_sync_type;
2645     is->read_tid = SDL_CreateThread(read_thread, is);
2646     if (!is->read_tid) {
2647         av_free(is);
2648         return NULL;
2649     }
2650     return is;
2651 }
2652
2653 static void stream_cycle_channel(VideoState *is, int codec_type)
2654 {
2655     AVFormatContext *ic = is->ic;
2656     int start_index, stream_index;
2657     AVStream *st;
2658
2659     if (codec_type == AVMEDIA_TYPE_VIDEO)
2660         start_index = is->video_stream;
2661     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2662         start_index = is->audio_stream;
2663     else
2664         start_index = is->subtitle_stream;
2665     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2666         return;
2667     stream_index = start_index;
2668     for(;;) {
2669         if (++stream_index >= is->ic->nb_streams)
2670         {
2671             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2672             {
2673                 stream_index = -1;
2674                 goto the_end;
2675             } else
2676                 stream_index = 0;
2677         }
2678         if (stream_index == start_index)
2679             return;
2680         st = ic->streams[stream_index];
2681         if (st->codec->codec_type == codec_type) {
2682             /* check that parameters are OK */
2683             switch(codec_type) {
2684             case AVMEDIA_TYPE_AUDIO:
2685                 if (st->codec->sample_rate != 0 &&
2686                     st->codec->channels != 0)
2687                     goto the_end;
2688                 break;
2689             case AVMEDIA_TYPE_VIDEO:
2690             case AVMEDIA_TYPE_SUBTITLE:
2691                 goto the_end;
2692             default:
2693                 break;
2694             }
2695         }
2696     }
2697  the_end:
2698     stream_component_close(is, start_index);
2699     stream_component_open(is, stream_index);
2700 }
2701
2702
2703 static void toggle_full_screen(VideoState *is)
2704 {
2705     is_full_screen = !is_full_screen;
2706     video_open(is);
2707 }
2708
2709 static void toggle_pause(VideoState *is)
2710 {
2711     stream_toggle_pause(is);
2712     is->step = 0;
2713 }
2714
2715 static void step_to_next_frame(VideoState *is)
2716 {
2717     /* if the stream is paused unpause it, then step */
2718     if (is->paused)
2719         stream_toggle_pause(is);
2720     is->step = 1;
2721 }
2722
2723 static void toggle_audio_display(VideoState *is)
2724 {
2725     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2726     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2727     fill_rectangle(screen,
2728                 is->xleft, is->ytop, is->width, is->height,
2729                 bgcolor);
2730     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2731 }
2732
2733 /* handle an event sent by the GUI */
2734 static void event_loop(VideoState *cur_stream)
2735 {
2736     SDL_Event event;
2737     double incr, pos, frac;
2738
2739     for(;;) {
2740         double x;
2741         SDL_WaitEvent(&event);
2742         switch(event.type) {
2743         case SDL_KEYDOWN:
2744             if (exit_on_keydown) {
2745                 do_exit(cur_stream);
2746                 break;
2747             }
2748             switch(event.key.keysym.sym) {
2749             case SDLK_ESCAPE:
2750             case SDLK_q:
2751                 do_exit(cur_stream);
2752                 break;
2753             case SDLK_f:
2754                 toggle_full_screen(cur_stream);
2755                 break;
2756             case SDLK_p:
2757             case SDLK_SPACE:
2758                 toggle_pause(cur_stream);
2759                 break;
2760             case SDLK_s: //S: Step to next frame
2761                 step_to_next_frame(cur_stream);
2762                 break;
2763             case SDLK_a:
2764                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2765                 break;
2766             case SDLK_v:
2767                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2768                 break;
2769             case SDLK_t:
2770                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2771                 break;
2772             case SDLK_w:
2773                 toggle_audio_display(cur_stream);
2774                 break;
2775             case SDLK_LEFT:
2776                 incr = -10.0;
2777                 goto do_seek;
2778             case SDLK_RIGHT:
2779                 incr = 10.0;
2780                 goto do_seek;
2781             case SDLK_UP:
2782                 incr = 60.0;
2783                 goto do_seek;
2784             case SDLK_DOWN:
2785                 incr = -60.0;
2786             do_seek:
2787                 if (seek_by_bytes) {
2788                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2789                         pos= cur_stream->video_current_pos;
2790                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2791                         pos= cur_stream->audio_pkt.pos;
2792                     }else
2793                         pos = avio_tell(cur_stream->ic->pb);
2794                     if (cur_stream->ic->bit_rate)
2795                         incr *= cur_stream->ic->bit_rate / 8.0;
2796                     else
2797                         incr *= 180000.0;
2798                     pos += incr;
2799                     stream_seek(cur_stream, pos, incr, 1);
2800                 } else {
2801                     pos = get_master_clock(cur_stream);
2802                     pos += incr;
2803                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2804                 }
2805                 break;
2806             default:
2807                 break;
2808             }
2809             break;
2810         case SDL_MOUSEBUTTONDOWN:
2811             if (exit_on_mousedown) {
2812                 do_exit(cur_stream);
2813                 break;
2814             }
2815         case SDL_MOUSEMOTION:
2816             if(event.type ==SDL_MOUSEBUTTONDOWN){
2817                 x= event.button.x;
2818             }else{
2819                 if(event.motion.state != SDL_PRESSED)
2820                     break;
2821                 x= event.motion.x;
2822             }
2823             if(seek_by_bytes || cur_stream->ic->duration<=0){
2824                 uint64_t size=  avio_size(cur_stream->ic->pb);
2825                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2826             }else{
2827                 int64_t ts;
2828                 int ns, hh, mm, ss;
2829                 int tns, thh, tmm, tss;
2830                 tns = cur_stream->ic->duration/1000000LL;
2831                 thh = tns/3600;
2832                 tmm = (tns%3600)/60;
2833                 tss = (tns%60);
2834                 frac = x/cur_stream->width;
2835                 ns = frac*tns;
2836                 hh = ns/3600;
2837                 mm = (ns%3600)/60;
2838                 ss = (ns%60);
2839                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2840                         hh, mm, ss, thh, tmm, tss);
2841                 ts = frac*cur_stream->ic->duration;
2842                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2843                     ts += cur_stream->ic->start_time;
2844                 stream_seek(cur_stream, ts, 0, 0);
2845             }
2846             break;
2847         case SDL_VIDEORESIZE:
2848             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2849                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2850             screen_width = cur_stream->width = event.resize.w;
2851             screen_height= cur_stream->height= event.resize.h;
2852             break;
2853         case SDL_QUIT:
2854         case FF_QUIT_EVENT:
2855             do_exit(cur_stream);
2856             break;
2857         case FF_ALLOC_EVENT:
2858             video_open(event.user.data1);
2859             alloc_picture(event.user.data1);
2860             break;
2861         case FF_REFRESH_EVENT:
2862             video_refresh(event.user.data1);
2863             cur_stream->refresh=0;
2864             break;
2865         default:
2866             break;
2867         }
2868     }
2869 }
2870
2871 static int opt_frame_size(const char *opt, const char *arg)
2872 {
2873     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2874     return opt_default("video_size", arg);
2875 }
2876
2877 static int opt_width(const char *opt, const char *arg)
2878 {
2879     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2880     return 0;
2881 }
2882
2883 static int opt_height(const char *opt, const char *arg)
2884 {
2885     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2886     return 0;
2887 }
2888
2889 static int opt_format(const char *opt, const char *arg)
2890 {
2891     file_iformat = av_find_input_format(arg);
2892     if (!file_iformat) {
2893         fprintf(stderr, "Unknown input format: %s\n", arg);
2894         return AVERROR(EINVAL);
2895     }
2896     return 0;
2897 }
2898
2899 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2900 {
2901     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2902     return opt_default("pixel_format", arg);
2903 }
2904
2905 static int opt_sync(const char *opt, const char *arg)
2906 {
2907     if (!strcmp(arg, "audio"))
2908         av_sync_type = AV_SYNC_AUDIO_MASTER;
2909     else if (!strcmp(arg, "video"))
2910         av_sync_type = AV_SYNC_VIDEO_MASTER;
2911     else if (!strcmp(arg, "ext"))
2912         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2913     else {
2914         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2915         exit(1);
2916     }
2917     return 0;
2918 }
2919
2920 static int opt_seek(const char *opt, const char *arg)
2921 {
2922     start_time = parse_time_or_die(opt, arg, 1);
2923     return 0;
2924 }
2925
2926 static int opt_duration(const char *opt, const char *arg)
2927 {
2928     duration = parse_time_or_die(opt, arg, 1);
2929     return 0;
2930 }
2931
2932 static int opt_show_mode(const char *opt, const char *arg)
2933 {
2934     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2935                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2936                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2937                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2938     return 0;
2939 }
2940
2941 static void opt_input_file(void *optctx, const char *filename)
2942 {
2943     if (input_filename) {
2944         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2945                 filename, input_filename);
2946         exit_program(1);
2947     }
2948     if (!strcmp(filename, "-"))
2949         filename = "pipe:";
2950     input_filename = filename;
2951 }
2952
2953 static int opt_codec(void *o, const char *opt, const char *arg)
2954 {
2955     switch(opt[strlen(opt)-1]){
2956     case 'a' :    audio_codec_name = arg; break;
2957     case 's' : subtitle_codec_name = arg; break;
2958     case 'v' :    video_codec_name = arg; break;
2959     }
2960     return 0;
2961 }
2962
2963 static int dummy;
2964
2965 static const OptionDef options[] = {
2966 #include "cmdutils_common_opts.h"
2967     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2968     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2969     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2970     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2971     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2972     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2973     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2974     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2975     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2976     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2977     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2978     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2979     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2980     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2981     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2982     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2983     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2984     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2985     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2986     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2987     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2988     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2989     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2990     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2991     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2992     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2993     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2994     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2995     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2996     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2997     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2998     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2999     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3000     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3001 #if CONFIG_AVFILTER
3002     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3003 #endif
3004     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3005     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3006     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3007     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3008     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3009     { NULL, },
3010 };
3011
3012 static void show_usage(void)
3013 {
3014     printf("Simple media player\n");
3015     printf("usage: %s [options] input_file\n", program_name);
3016     printf("\n");
3017 }
3018
3019 static int opt_help(const char *opt, const char *arg)
3020 {
3021     av_log_set_callback(log_callback_help);
3022     show_usage();
3023     show_help_options(options, "Main options:\n",
3024                       OPT_EXPERT, 0);
3025     show_help_options(options, "\nAdvanced options:\n",
3026                       OPT_EXPERT, OPT_EXPERT);
3027     printf("\n");
3028     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3029     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3030 #if !CONFIG_AVFILTER
3031     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3032 #endif
3033     printf("\nWhile playing:\n"
3034            "q, ESC              quit\n"
3035            "f                   toggle full screen\n"
3036            "p, SPC              pause\n"
3037            "a                   cycle audio channel\n"
3038            "v                   cycle video channel\n"
3039            "t                   cycle subtitle channel\n"
3040            "w                   show audio waves\n"
3041            "s                   activate frame-step mode\n"
3042            "left/right          seek backward/forward 10 seconds\n"
3043            "down/up             seek backward/forward 1 minute\n"
3044            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3045            );
3046     return 0;
3047 }
3048
3049 static int lockmgr(void **mtx, enum AVLockOp op)
3050 {
3051    switch(op) {
3052       case AV_LOCK_CREATE:
3053           *mtx = SDL_CreateMutex();
3054           if(!*mtx)
3055               return 1;
3056           return 0;
3057       case AV_LOCK_OBTAIN:
3058           return !!SDL_LockMutex(*mtx);
3059       case AV_LOCK_RELEASE:
3060           return !!SDL_UnlockMutex(*mtx);
3061       case AV_LOCK_DESTROY:
3062           SDL_DestroyMutex(*mtx);
3063           return 0;
3064    }
3065    return 1;
3066 }
3067
3068 /* Called from the main */
3069 int main(int argc, char **argv)
3070 {
3071     int flags;
3072     VideoState *is;
3073
3074     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3075     parse_loglevel(argc, argv, options);
3076
3077     /* register all codecs, demux and protocols */
3078     avcodec_register_all();
3079 #if CONFIG_AVDEVICE
3080     avdevice_register_all();
3081 #endif
3082 #if CONFIG_AVFILTER
3083     avfilter_register_all();
3084 #endif
3085     av_register_all();
3086
3087     init_opts();
3088
3089     show_banner();
3090
3091     parse_options(NULL, argc, argv, options, opt_input_file);
3092
3093     if (!input_filename) {
3094         show_usage();
3095         fprintf(stderr, "An input file must be specified\n");
3096         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3097         exit(1);
3098     }
3099
3100     if (display_disable) {
3101         video_disable = 1;
3102     }
3103     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3104     if (audio_disable)
3105         flags &= ~SDL_INIT_AUDIO;
3106 #if !defined(__MINGW32__) && !defined(__APPLE__)
3107     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3108 #endif
3109     if (SDL_Init (flags)) {
3110         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3111         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3112         exit(1);
3113     }
3114
3115     if (!display_disable) {
3116 #if HAVE_SDL_VIDEO_SIZE
3117         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3118         fs_screen_width = vi->current_w;
3119         fs_screen_height = vi->current_h;
3120 #endif
3121     }
3122
3123     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3124     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3125     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3126
3127     if (av_lockmgr_register(lockmgr)) {
3128         fprintf(stderr, "Could not initialize lock manager!\n");
3129         do_exit(NULL);
3130     }
3131
3132     av_init_packet(&flush_pkt);
3133     flush_pkt.data= "FLUSH";
3134
3135     is = stream_open(input_filename, file_iformat);
3136     if (!is) {
3137         fprintf(stderr, "Failed to initialize VideoState!\n");
3138         do_exit(NULL);
3139     }
3140
3141     event_loop(is);
3142
3143     /* never returns */
3144
3145     return 0;
3146 }