]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: reimplement early frame drop
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double duration;                             ///<expected duration of the frame
100     int64_t pos;                                 ///<byte position in file
101     int skip;
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     int reallocate;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation, resampling and format conversion */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     enum AVSampleFormat audio_tgt_fmt;
166     int audio_src_channels;
167     int audio_tgt_channels;
168     int64_t audio_src_channel_layout;
169     int64_t audio_tgt_channel_layout;
170     int audio_src_freq;
171     int audio_tgt_freq;
172     struct SwrContext *swr_ctx;
173     double audio_current_pts;
174     double audio_current_pts_drift;
175
176     enum ShowMode {
177         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
178     } show_mode;
179     int16_t sample_array[SAMPLE_ARRAY_SIZE];
180     int sample_array_index;
181     int last_i_start;
182     RDFTContext *rdft;
183     int rdft_bits;
184     FFTSample *rdft_data;
185     int xpos;
186
187     SDL_Thread *subtitle_tid;
188     int subtitle_stream;
189     int subtitle_stream_changed;
190     AVStream *subtitle_st;
191     PacketQueue subtitleq;
192     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
193     int subpq_size, subpq_rindex, subpq_windex;
194     SDL_mutex *subpq_mutex;
195     SDL_cond *subpq_cond;
196
197     double frame_timer;
198     double frame_last_pts;
199     double frame_last_duration;
200     double frame_last_dropped_pts;
201     int64_t frame_last_dropped_pos;
202     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
203     int video_stream;
204     AVStream *video_st;
205     PacketQueue videoq;
206     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
207     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
208     int64_t video_current_pos;                   ///<current displayed file pos
209     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
210     int pictq_size, pictq_rindex, pictq_windex;
211     SDL_mutex *pictq_mutex;
212     SDL_cond *pictq_cond;
213 #if !CONFIG_AVFILTER
214     struct SwsContext *img_convert_ctx;
215 #endif
216
217     char filename[1024];
218     int width, height, xleft, ytop;
219     int step;
220
221 #if CONFIG_AVFILTER
222     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
223 #endif
224
225     int refresh;
226 } VideoState;
227
228 static int opt_help(const char *opt, const char *arg);
229
230 /* options specified by the user */
231 static AVInputFormat *file_iformat;
232 static const char *input_filename;
233 static const char *window_title;
234 static int fs_screen_width;
235 static int fs_screen_height;
236 static int screen_width = 0;
237 static int screen_height = 0;
238 static int audio_disable;
239 static int video_disable;
240 static int wanted_stream[AVMEDIA_TYPE_NB]={
241     [AVMEDIA_TYPE_AUDIO]=-1,
242     [AVMEDIA_TYPE_VIDEO]=-1,
243     [AVMEDIA_TYPE_SUBTITLE]=-1,
244 };
245 static int seek_by_bytes=-1;
246 static int display_disable;
247 static int show_status = 1;
248 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
249 static int64_t start_time = AV_NOPTS_VALUE;
250 static int64_t duration = AV_NOPTS_VALUE;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=-1;
267 static enum ShowMode show_mode = SHOW_MODE_NONE;
268 static const char *audio_codec_name;
269 static const char *subtitle_codec_name;
270 static const char *video_codec_name;
271
272 static int rdftspeed=20;
273 #if CONFIG_AVFILTER
274 static char *vfilters = NULL;
275 #endif
276
277 /* current context */
278 static int is_full_screen;
279 static int64_t audio_callback_time;
280
281 static AVPacket flush_pkt;
282
283 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
284 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
285 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
286
287 static SDL_Surface *screen;
288
289 void exit_program(int ret)
290 {
291     exit(ret);
292 }
293
294 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
295 {
296     AVPacketList *pkt1;
297
298     /* duplicate the packet */
299     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
300         return -1;
301
302     pkt1 = av_malloc(sizeof(AVPacketList));
303     if (!pkt1)
304         return -1;
305     pkt1->pkt = *pkt;
306     pkt1->next = NULL;
307
308
309     SDL_LockMutex(q->mutex);
310
311     if (!q->last_pkt)
312
313         q->first_pkt = pkt1;
314     else
315         q->last_pkt->next = pkt1;
316     q->last_pkt = pkt1;
317     q->nb_packets++;
318     q->size += pkt1->pkt.size + sizeof(*pkt1);
319     /* XXX: should duplicate packet data in DV case */
320     SDL_CondSignal(q->cond);
321
322     SDL_UnlockMutex(q->mutex);
323     return 0;
324 }
325
326 /* packet queue handling */
327 static void packet_queue_init(PacketQueue *q)
328 {
329     memset(q, 0, sizeof(PacketQueue));
330     q->mutex = SDL_CreateMutex();
331     q->cond = SDL_CreateCond();
332     packet_queue_put(q, &flush_pkt);
333 }
334
335 static void packet_queue_flush(PacketQueue *q)
336 {
337     AVPacketList *pkt, *pkt1;
338
339     SDL_LockMutex(q->mutex);
340     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
341         pkt1 = pkt->next;
342         av_free_packet(&pkt->pkt);
343         av_freep(&pkt);
344     }
345     q->last_pkt = NULL;
346     q->first_pkt = NULL;
347     q->nb_packets = 0;
348     q->size = 0;
349     SDL_UnlockMutex(q->mutex);
350 }
351
352 static void packet_queue_end(PacketQueue *q)
353 {
354     packet_queue_flush(q);
355     SDL_DestroyMutex(q->mutex);
356     SDL_DestroyCond(q->cond);
357 }
358
359 static void packet_queue_abort(PacketQueue *q)
360 {
361     SDL_LockMutex(q->mutex);
362
363     q->abort_request = 1;
364
365     SDL_CondSignal(q->cond);
366
367     SDL_UnlockMutex(q->mutex);
368 }
369
370 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
371 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
372 {
373     AVPacketList *pkt1;
374     int ret;
375
376     SDL_LockMutex(q->mutex);
377
378     for(;;) {
379         if (q->abort_request) {
380             ret = -1;
381             break;
382         }
383
384         pkt1 = q->first_pkt;
385         if (pkt1) {
386             q->first_pkt = pkt1->next;
387             if (!q->first_pkt)
388                 q->last_pkt = NULL;
389             q->nb_packets--;
390             q->size -= pkt1->pkt.size + sizeof(*pkt1);
391             *pkt = pkt1->pkt;
392             av_free(pkt1);
393             ret = 1;
394             break;
395         } else if (!block) {
396             ret = 0;
397             break;
398         } else {
399             SDL_CondWait(q->cond, q->mutex);
400         }
401     }
402     SDL_UnlockMutex(q->mutex);
403     return ret;
404 }
405
406 static inline void fill_rectangle(SDL_Surface *screen,
407                                   int x, int y, int w, int h, int color)
408 {
409     SDL_Rect rect;
410     rect.x = x;
411     rect.y = y;
412     rect.w = w;
413     rect.h = h;
414     SDL_FillRect(screen, &rect, color);
415 }
416
417 #define ALPHA_BLEND(a, oldp, newp, s)\
418 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
419
420 #define RGBA_IN(r, g, b, a, s)\
421 {\
422     unsigned int v = ((const uint32_t *)(s))[0];\
423     a = (v >> 24) & 0xff;\
424     r = (v >> 16) & 0xff;\
425     g = (v >> 8) & 0xff;\
426     b = v & 0xff;\
427 }
428
429 #define YUVA_IN(y, u, v, a, s, pal)\
430 {\
431     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
432     a = (val >> 24) & 0xff;\
433     y = (val >> 16) & 0xff;\
434     u = (val >> 8) & 0xff;\
435     v = val & 0xff;\
436 }
437
438 #define YUVA_OUT(d, y, u, v, a)\
439 {\
440     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
441 }
442
443
444 #define BPP 1
445
446 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
447 {
448     int wrap, wrap3, width2, skip2;
449     int y, u, v, a, u1, v1, a1, w, h;
450     uint8_t *lum, *cb, *cr;
451     const uint8_t *p;
452     const uint32_t *pal;
453     int dstx, dsty, dstw, dsth;
454
455     dstw = av_clip(rect->w, 0, imgw);
456     dsth = av_clip(rect->h, 0, imgh);
457     dstx = av_clip(rect->x, 0, imgw - dstw);
458     dsty = av_clip(rect->y, 0, imgh - dsth);
459     lum = dst->data[0] + dsty * dst->linesize[0];
460     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
461     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
462
463     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
464     skip2 = dstx >> 1;
465     wrap = dst->linesize[0];
466     wrap3 = rect->pict.linesize[0];
467     p = rect->pict.data[0];
468     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
469
470     if (dsty & 1) {
471         lum += dstx;
472         cb += skip2;
473         cr += skip2;
474
475         if (dstx & 1) {
476             YUVA_IN(y, u, v, a, p, pal);
477             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
479             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
480             cb++;
481             cr++;
482             lum++;
483             p += BPP;
484         }
485         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
486             YUVA_IN(y, u, v, a, p, pal);
487             u1 = u;
488             v1 = v;
489             a1 = a;
490             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491
492             YUVA_IN(y, u, v, a, p + BPP, pal);
493             u1 += u;
494             v1 += v;
495             a1 += a;
496             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
497             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
498             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
499             cb++;
500             cr++;
501             p += 2 * BPP;
502             lum += 2;
503         }
504         if (w) {
505             YUVA_IN(y, u, v, a, p, pal);
506             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509             p++;
510             lum++;
511         }
512         p += wrap3 - dstw * BPP;
513         lum += wrap - dstw - dstx;
514         cb += dst->linesize[1] - width2 - skip2;
515         cr += dst->linesize[2] - width2 - skip2;
516     }
517     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
518         lum += dstx;
519         cb += skip2;
520         cr += skip2;
521
522         if (dstx & 1) {
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 = u;
525             v1 = v;
526             a1 = a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             p += wrap3;
529             lum += wrap;
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 += u;
532             v1 += v;
533             a1 += a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
536             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
537             cb++;
538             cr++;
539             p += -wrap3 + BPP;
540             lum += -wrap + 1;
541         }
542         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 = u;
545             v1 = v;
546             a1 = a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554             p += wrap3;
555             lum += wrap;
556
557             YUVA_IN(y, u, v, a, p, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562
563             YUVA_IN(y, u, v, a, p + BPP, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
568
569             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
570             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
571
572             cb++;
573             cr++;
574             p += -wrap3 + 2 * BPP;
575             lum += -wrap + 2;
576         }
577         if (w) {
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 = u;
580             v1 = v;
581             a1 = a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             p += wrap3;
584             lum += wrap;
585             YUVA_IN(y, u, v, a, p, pal);
586             u1 += u;
587             v1 += v;
588             a1 += a;
589             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
590             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
591             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
592             cb++;
593             cr++;
594             p += -wrap3 + BPP;
595             lum += -wrap + 1;
596         }
597         p += wrap3 + (wrap3 - dstw * BPP);
598         lum += wrap + (wrap - dstw - dstx);
599         cb += dst->linesize[1] - width2 - skip2;
600         cr += dst->linesize[2] - width2 - skip2;
601     }
602     /* handle odd height */
603     if (h) {
604         lum += dstx;
605         cb += skip2;
606         cr += skip2;
607
608         if (dstx & 1) {
609             YUVA_IN(y, u, v, a, p, pal);
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
612             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
613             cb++;
614             cr++;
615             lum++;
616             p += BPP;
617         }
618         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
619             YUVA_IN(y, u, v, a, p, pal);
620             u1 = u;
621             v1 = v;
622             a1 = a;
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624
625             YUVA_IN(y, u, v, a, p + BPP, pal);
626             u1 += u;
627             v1 += v;
628             a1 += a;
629             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
630             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
631             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
632             cb++;
633             cr++;
634             p += 2 * BPP;
635             lum += 2;
636         }
637         if (w) {
638             YUVA_IN(y, u, v, a, p, pal);
639             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642         }
643     }
644 }
645
646 static void free_subpicture(SubPicture *sp)
647 {
648     avsubtitle_free(&sp->sub);
649 }
650
651 static void video_image_display(VideoState *is)
652 {
653     VideoPicture *vp;
654     SubPicture *sp;
655     AVPicture pict;
656     float aspect_ratio;
657     int width, height, x, y;
658     SDL_Rect rect;
659     int i;
660
661     vp = &is->pictq[is->pictq_rindex];
662     if (vp->bmp) {
663 #if CONFIG_AVFILTER
664          if (vp->picref->video->sample_aspect_ratio.num == 0)
665              aspect_ratio = 0;
666          else
667              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
668 #else
669
670         /* XXX: use variable in the frame */
671         if (is->video_st->sample_aspect_ratio.num)
672             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
673         else if (is->video_st->codec->sample_aspect_ratio.num)
674             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
675         else
676             aspect_ratio = 0;
677 #endif
678         if (aspect_ratio <= 0.0)
679             aspect_ratio = 1.0;
680         aspect_ratio *= (float)vp->width / (float)vp->height;
681
682         if (is->subtitle_st) {
683             if (is->subpq_size > 0) {
684                 sp = &is->subpq[is->subpq_rindex];
685
686                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
687                     SDL_LockYUVOverlay (vp->bmp);
688
689                     pict.data[0] = vp->bmp->pixels[0];
690                     pict.data[1] = vp->bmp->pixels[2];
691                     pict.data[2] = vp->bmp->pixels[1];
692
693                     pict.linesize[0] = vp->bmp->pitches[0];
694                     pict.linesize[1] = vp->bmp->pitches[2];
695                     pict.linesize[2] = vp->bmp->pitches[1];
696
697                     for (i = 0; i < sp->sub.num_rects; i++)
698                         blend_subrect(&pict, sp->sub.rects[i],
699                                       vp->bmp->w, vp->bmp->h);
700
701                     SDL_UnlockYUVOverlay (vp->bmp);
702                 }
703             }
704         }
705
706
707         /* XXX: we suppose the screen has a 1.0 pixel ratio */
708         height = is->height;
709         width = ((int)rint(height * aspect_ratio)) & ~1;
710         if (width > is->width) {
711             width = is->width;
712             height = ((int)rint(width / aspect_ratio)) & ~1;
713         }
714         x = (is->width - width) / 2;
715         y = (is->height - height) / 2;
716         is->no_background = 0;
717         rect.x = is->xleft + x;
718         rect.y = is->ytop  + y;
719         rect.w = FFMAX(width,  1);
720         rect.h = FFMAX(height, 1);
721         SDL_DisplayYUVOverlay(vp->bmp, &rect);
722     }
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     return a < 0 ? a%b + b : a%b;
728 }
729
730 static void video_audio_display(VideoState *s)
731 {
732     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
733     int ch, channels, h, h2, bgcolor, fgcolor;
734     int16_t time_diff;
735     int rdft_bits, nb_freq;
736
737     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
738         ;
739     nb_freq= 1<<(rdft_bits-1);
740
741     /* compute display index : center on currently output samples */
742     channels = s->audio_tgt_channels;
743     nb_display_channels = channels;
744     if (!s->paused) {
745         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
746         n = 2 * channels;
747         delay = s->audio_write_buf_size;
748         delay /= n;
749
750         /* to be more precise, we take into account the time spent since
751            the last buffer computation */
752         if (audio_callback_time) {
753             time_diff = av_gettime() - audio_callback_time;
754             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
755         }
756
757         delay += 2*data_used;
758         if (delay < data_used)
759             delay = data_used;
760
761         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
762         if (s->show_mode == SHOW_MODE_WAVES) {
763             h= INT_MIN;
764             for(i=0; i<1000; i+=channels){
765                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
766                 int a= s->sample_array[idx];
767                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
768                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
769                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
770                 int score= a-d;
771                 if(h<score && (b^c)<0){
772                     h= score;
773                     i_start= idx;
774                 }
775             }
776         }
777
778         s->last_i_start = i_start;
779     } else {
780         i_start = s->last_i_start;
781     }
782
783     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
784     if (s->show_mode == SHOW_MODE_WAVES) {
785         fill_rectangle(screen,
786                        s->xleft, s->ytop, s->width, s->height,
787                        bgcolor);
788
789         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
790
791         /* total height for one channel */
792         h = s->height / nb_display_channels;
793         /* graph height / 2 */
794         h2 = (h * 9) / 20;
795         for(ch = 0;ch < nb_display_channels; ch++) {
796             i = i_start + ch;
797             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
798             for(x = 0; x < s->width; x++) {
799                 y = (s->sample_array[i] * h2) >> 15;
800                 if (y < 0) {
801                     y = -y;
802                     ys = y1 - y;
803                 } else {
804                     ys = y1;
805                 }
806                 fill_rectangle(screen,
807                                s->xleft + x, ys, 1, y,
808                                fgcolor);
809                 i += channels;
810                 if (i >= SAMPLE_ARRAY_SIZE)
811                     i -= SAMPLE_ARRAY_SIZE;
812             }
813         }
814
815         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
816
817         for(ch = 1;ch < nb_display_channels; ch++) {
818             y = s->ytop + ch * h;
819             fill_rectangle(screen,
820                            s->xleft, y, s->width, 1,
821                            fgcolor);
822         }
823         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
824     }else{
825         nb_display_channels= FFMIN(nb_display_channels, 2);
826         if(rdft_bits != s->rdft_bits){
827             av_rdft_end(s->rdft);
828             av_free(s->rdft_data);
829             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
830             s->rdft_bits= rdft_bits;
831             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
832         }
833         {
834             FFTSample *data[2];
835             for(ch = 0;ch < nb_display_channels; ch++) {
836                 data[ch] = s->rdft_data + 2*nb_freq*ch;
837                 i = i_start + ch;
838                 for(x = 0; x < 2*nb_freq; x++) {
839                     double w= (x-nb_freq)*(1.0/nb_freq);
840                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
841                     i += channels;
842                     if (i >= SAMPLE_ARRAY_SIZE)
843                         i -= SAMPLE_ARRAY_SIZE;
844                 }
845                 av_rdft_calc(s->rdft, data[ch]);
846             }
847             //least efficient way to do this, we should of course directly access it but its more than fast enough
848             for(y=0; y<s->height; y++){
849                 double w= 1/sqrt(nb_freq);
850                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
851                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
852                        + data[1][2*y+1]*data[1][2*y+1])) : a;
853                 a= FFMIN(a,255);
854                 b= FFMIN(b,255);
855                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
856
857                 fill_rectangle(screen,
858                             s->xpos, s->height-y, 1, 1,
859                             fgcolor);
860             }
861         }
862         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
863         s->xpos++;
864         if(s->xpos >= s->width)
865             s->xpos= s->xleft;
866     }
867 }
868
869 static void stream_close(VideoState *is)
870 {
871     VideoPicture *vp;
872     int i;
873     /* XXX: use a special url_shutdown call to abort parse cleanly */
874     is->abort_request = 1;
875     SDL_WaitThread(is->read_tid, NULL);
876     SDL_WaitThread(is->refresh_tid, NULL);
877
878     /* free all pictures */
879     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
880         vp = &is->pictq[i];
881 #if CONFIG_AVFILTER
882         if (vp->picref) {
883             avfilter_unref_buffer(vp->picref);
884             vp->picref = NULL;
885         }
886 #endif
887         if (vp->bmp) {
888             SDL_FreeYUVOverlay(vp->bmp);
889             vp->bmp = NULL;
890         }
891     }
892     SDL_DestroyMutex(is->pictq_mutex);
893     SDL_DestroyCond(is->pictq_cond);
894     SDL_DestroyMutex(is->subpq_mutex);
895     SDL_DestroyCond(is->subpq_cond);
896 #if !CONFIG_AVFILTER
897     if (is->img_convert_ctx)
898         sws_freeContext(is->img_convert_ctx);
899 #endif
900     av_free(is);
901 }
902
903 static void do_exit(VideoState *is)
904 {
905     if (is) {
906         stream_close(is);
907     }
908     av_lockmgr_register(NULL);
909     uninit_opts();
910 #if CONFIG_AVFILTER
911     avfilter_uninit();
912 #endif
913     if (show_status)
914         printf("\n");
915     SDL_Quit();
916     av_log(NULL, AV_LOG_QUIET, "%s", "");
917     exit(0);
918 }
919
920 static int video_open(VideoState *is){
921     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
922     int w,h;
923
924     if(is_full_screen) flags |= SDL_FULLSCREEN;
925     else               flags |= SDL_RESIZABLE;
926
927     if (is_full_screen && fs_screen_width) {
928         w = fs_screen_width;
929         h = fs_screen_height;
930     } else if(!is_full_screen && screen_width){
931         w = screen_width;
932         h = screen_height;
933 #if CONFIG_AVFILTER
934     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
935         w = is->out_video_filter->inputs[0]->w;
936         h = is->out_video_filter->inputs[0]->h;
937 #else
938     }else if (is->video_st && is->video_st->codec->width){
939         w = is->video_st->codec->width;
940         h = is->video_st->codec->height;
941 #endif
942     } else {
943         w = 640;
944         h = 480;
945     }
946     if(screen && is->width == screen->w && screen->w == w
947        && is->height== screen->h && screen->h == h)
948         return 0;
949     screen = SDL_SetVideoMode(w, h, 0, flags);
950     if (!screen) {
951         fprintf(stderr, "SDL: could not set video mode - exiting\n");
952         do_exit(is);
953     }
954     if (!window_title)
955         window_title = input_filename;
956     SDL_WM_SetCaption(window_title, window_title);
957
958     is->width = screen->w;
959     is->height = screen->h;
960
961     return 0;
962 }
963
964 /* display the current picture, if any */
965 static void video_display(VideoState *is)
966 {
967     if(!screen)
968         video_open(is);
969     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
970         video_audio_display(is);
971     else if (is->video_st)
972         video_image_display(is);
973 }
974
975 static int refresh_thread(void *opaque)
976 {
977     VideoState *is= opaque;
978     while(!is->abort_request){
979         SDL_Event event;
980         event.type = FF_REFRESH_EVENT;
981         event.user.data1 = opaque;
982         if(!is->refresh){
983             is->refresh=1;
984             SDL_PushEvent(&event);
985         }
986         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
987         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
988     }
989     return 0;
990 }
991
992 /* get the current audio clock value */
993 static double get_audio_clock(VideoState *is)
994 {
995     if (is->paused) {
996         return is->audio_current_pts;
997     } else {
998         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
999     }
1000 }
1001
1002 /* get the current video clock value */
1003 static double get_video_clock(VideoState *is)
1004 {
1005     if (is->paused) {
1006         return is->video_current_pts;
1007     } else {
1008         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1009     }
1010 }
1011
1012 /* get the current external clock value */
1013 static double get_external_clock(VideoState *is)
1014 {
1015     int64_t ti;
1016     ti = av_gettime();
1017     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1018 }
1019
1020 /* get the current master clock value */
1021 static double get_master_clock(VideoState *is)
1022 {
1023     double val;
1024
1025     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1026         if (is->video_st)
1027             val = get_video_clock(is);
1028         else
1029             val = get_audio_clock(is);
1030     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1031         if (is->audio_st)
1032             val = get_audio_clock(is);
1033         else
1034             val = get_video_clock(is);
1035     } else {
1036         val = get_external_clock(is);
1037     }
1038     return val;
1039 }
1040
1041 /* seek in the stream */
1042 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1043 {
1044     if (!is->seek_req) {
1045         is->seek_pos = pos;
1046         is->seek_rel = rel;
1047         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1048         if (seek_by_bytes)
1049             is->seek_flags |= AVSEEK_FLAG_BYTE;
1050         is->seek_req = 1;
1051     }
1052 }
1053
1054 /* pause or resume the video */
1055 static void stream_toggle_pause(VideoState *is)
1056 {
1057     if (is->paused) {
1058         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1059         if(is->read_pause_return != AVERROR(ENOSYS)){
1060             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1061         }
1062         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1063     }
1064     is->paused = !is->paused;
1065 }
1066
1067 static double compute_target_delay(double delay, VideoState *is)
1068 {
1069     double sync_threshold, diff;
1070
1071     /* update delay to follow master synchronisation source */
1072     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1073          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1074         /* if video is slave, we try to correct big delays by
1075            duplicating or deleting a frame */
1076         diff = get_video_clock(is) - get_master_clock(is);
1077
1078         /* skip or repeat frame. We take into account the
1079            delay to compute the threshold. I still don't know
1080            if it is the best guess */
1081         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1082         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1083             if (diff <= -sync_threshold)
1084                 delay = 0;
1085             else if (diff >= sync_threshold)
1086                 delay = 2 * delay;
1087         }
1088     }
1089
1090     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1091             delay, -diff);
1092
1093     return delay;
1094 }
1095
1096 static void pictq_next_picture(VideoState *is) {
1097     /* update queue size and signal for next picture */
1098     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1099         is->pictq_rindex = 0;
1100
1101     SDL_LockMutex(is->pictq_mutex);
1102     is->pictq_size--;
1103     SDL_CondSignal(is->pictq_cond);
1104     SDL_UnlockMutex(is->pictq_mutex);
1105 }
1106
1107 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1108     double time = av_gettime() / 1000000.0;
1109     /* update current video pts */
1110     is->video_current_pts = pts;
1111     is->video_current_pts_drift = is->video_current_pts - time;
1112     is->video_current_pos = pos;
1113     is->frame_last_pts = pts;
1114 }
1115
1116 /* called to display each frame */
1117 static void video_refresh(void *opaque)
1118 {
1119     VideoState *is = opaque;
1120     VideoPicture *vp;
1121     double time;
1122
1123     SubPicture *sp, *sp2;
1124
1125     if (is->video_st) {
1126 retry:
1127         if (is->pictq_size == 0) {
1128             SDL_LockMutex(is->pictq_mutex);
1129             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1130                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1131                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1132             }
1133             SDL_UnlockMutex(is->pictq_mutex);
1134             //nothing to do, no picture to display in the que
1135         } else {
1136             double last_duration, duration, delay;
1137             /* dequeue the picture */
1138             vp = &is->pictq[is->pictq_rindex];
1139
1140             if (vp->skip) {
1141                 pictq_next_picture(is);
1142                 goto retry;
1143             }
1144
1145             /* compute nominal last_duration */
1146             last_duration = vp->pts - is->frame_last_pts;
1147             if (last_duration > 0 && last_duration < 10.0) {
1148                 /* if duration of the last frame was sane, update last_duration in video state */
1149                 is->frame_last_duration = last_duration;
1150             }
1151             delay = compute_target_delay(is->frame_last_duration, is);
1152
1153             time= av_gettime()/1000000.0;
1154             if(time < is->frame_timer + delay)
1155                 return;
1156
1157             if (delay > 0)
1158                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1159
1160             SDL_LockMutex(is->pictq_mutex);
1161             update_video_pts(is, vp->pts, vp->pos);
1162             SDL_UnlockMutex(is->pictq_mutex);
1163
1164             if(is->pictq_size > 1) {
1165                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1166                  duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1167             } else {
1168                  duration = vp->duration;
1169             }
1170
1171             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1172                 if(is->pictq_size > 1){
1173                     pictq_next_picture(is);
1174                     goto retry;
1175                 }
1176             }
1177
1178             if(is->subtitle_st) {
1179                 if (is->subtitle_stream_changed) {
1180                     SDL_LockMutex(is->subpq_mutex);
1181
1182                     while (is->subpq_size) {
1183                         free_subpicture(&is->subpq[is->subpq_rindex]);
1184
1185                         /* update queue size and signal for next picture */
1186                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1187                             is->subpq_rindex = 0;
1188
1189                         is->subpq_size--;
1190                     }
1191                     is->subtitle_stream_changed = 0;
1192
1193                     SDL_CondSignal(is->subpq_cond);
1194                     SDL_UnlockMutex(is->subpq_mutex);
1195                 } else {
1196                     if (is->subpq_size > 0) {
1197                         sp = &is->subpq[is->subpq_rindex];
1198
1199                         if (is->subpq_size > 1)
1200                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1201                         else
1202                             sp2 = NULL;
1203
1204                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1205                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1206                         {
1207                             free_subpicture(sp);
1208
1209                             /* update queue size and signal for next picture */
1210                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1211                                 is->subpq_rindex = 0;
1212
1213                             SDL_LockMutex(is->subpq_mutex);
1214                             is->subpq_size--;
1215                             SDL_CondSignal(is->subpq_cond);
1216                             SDL_UnlockMutex(is->subpq_mutex);
1217                         }
1218                     }
1219                 }
1220             }
1221
1222             /* display picture */
1223             if (!display_disable)
1224                 video_display(is);
1225
1226             pictq_next_picture(is);
1227         }
1228     } else if (is->audio_st) {
1229         /* draw the next audio frame */
1230
1231         /* if only audio stream, then display the audio bars (better
1232            than nothing, just to test the implementation */
1233
1234         /* display picture */
1235         if (!display_disable)
1236             video_display(is);
1237     }
1238     if (show_status) {
1239         static int64_t last_time;
1240         int64_t cur_time;
1241         int aqsize, vqsize, sqsize;
1242         double av_diff;
1243
1244         cur_time = av_gettime();
1245         if (!last_time || (cur_time - last_time) >= 30000) {
1246             aqsize = 0;
1247             vqsize = 0;
1248             sqsize = 0;
1249             if (is->audio_st)
1250                 aqsize = is->audioq.size;
1251             if (is->video_st)
1252                 vqsize = is->videoq.size;
1253             if (is->subtitle_st)
1254                 sqsize = is->subtitleq.size;
1255             av_diff = 0;
1256             if (is->audio_st && is->video_st)
1257                 av_diff = get_audio_clock(is) - get_video_clock(is);
1258             printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1259                    get_master_clock(is),
1260                    av_diff,
1261                    aqsize / 1024,
1262                    vqsize / 1024,
1263                    sqsize,
1264                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1265                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1266             fflush(stdout);
1267             last_time = cur_time;
1268         }
1269     }
1270 }
1271
1272 /* allocate a picture (needs to do that in main thread to avoid
1273    potential locking problems */
1274 static void alloc_picture(void *opaque)
1275 {
1276     VideoState *is = opaque;
1277     VideoPicture *vp;
1278
1279     vp = &is->pictq[is->pictq_windex];
1280
1281     if (vp->bmp)
1282         SDL_FreeYUVOverlay(vp->bmp);
1283
1284 #if CONFIG_AVFILTER
1285     if (vp->picref)
1286         avfilter_unref_buffer(vp->picref);
1287     vp->picref = NULL;
1288
1289     vp->width   = is->out_video_filter->inputs[0]->w;
1290     vp->height  = is->out_video_filter->inputs[0]->h;
1291     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1292 #else
1293     vp->width   = is->video_st->codec->width;
1294     vp->height  = is->video_st->codec->height;
1295     vp->pix_fmt = is->video_st->codec->pix_fmt;
1296 #endif
1297
1298     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1299                                    SDL_YV12_OVERLAY,
1300                                    screen);
1301     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1302         /* SDL allocates a buffer smaller than requested if the video
1303          * overlay hardware is unable to support the requested size. */
1304         fprintf(stderr, "Error: the video system does not support an image\n"
1305                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1306                         "to reduce the image size.\n", vp->width, vp->height );
1307         do_exit(is);
1308     }
1309
1310     SDL_LockMutex(is->pictq_mutex);
1311     vp->allocated = 1;
1312     SDL_CondSignal(is->pictq_cond);
1313     SDL_UnlockMutex(is->pictq_mutex);
1314 }
1315
1316 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1317 {
1318     VideoPicture *vp;
1319     double frame_delay, pts = pts1;
1320
1321     /* compute the exact PTS for the picture if it is omitted in the stream
1322      * pts1 is the dts of the pkt / pts of the frame */
1323     if (pts != 0) {
1324         /* update video clock with pts, if present */
1325         is->video_clock = pts;
1326     } else {
1327         pts = is->video_clock;
1328     }
1329     /* update video clock for next frame */
1330     frame_delay = av_q2d(is->video_st->codec->time_base);
1331     /* for MPEG2, the frame can be repeated, so we update the
1332        clock accordingly */
1333     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1334     is->video_clock += frame_delay;
1335
1336 #if defined(DEBUG_SYNC) && 0
1337     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1338            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1339 #endif
1340
1341     /* wait until we have space to put a new picture */
1342     SDL_LockMutex(is->pictq_mutex);
1343
1344     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1345            !is->videoq.abort_request) {
1346         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1347     }
1348     SDL_UnlockMutex(is->pictq_mutex);
1349
1350     if (is->videoq.abort_request)
1351         return -1;
1352
1353     vp = &is->pictq[is->pictq_windex];
1354
1355     vp->duration = frame_delay;
1356
1357     /* alloc or resize hardware picture buffer */
1358     if (!vp->bmp || vp->reallocate ||
1359 #if CONFIG_AVFILTER
1360         vp->width  != is->out_video_filter->inputs[0]->w ||
1361         vp->height != is->out_video_filter->inputs[0]->h) {
1362 #else
1363         vp->width != is->video_st->codec->width ||
1364         vp->height != is->video_st->codec->height) {
1365 #endif
1366         SDL_Event event;
1367
1368         vp->allocated  = 0;
1369         vp->reallocate = 0;
1370
1371         /* the allocation must be done in the main thread to avoid
1372            locking problems */
1373         event.type = FF_ALLOC_EVENT;
1374         event.user.data1 = is;
1375         SDL_PushEvent(&event);
1376
1377         /* wait until the picture is allocated */
1378         SDL_LockMutex(is->pictq_mutex);
1379         while (!vp->allocated && !is->videoq.abort_request) {
1380             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1381         }
1382         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1383         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1384             while (!vp->allocated) {
1385                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1386             }
1387         }
1388         SDL_UnlockMutex(is->pictq_mutex);
1389
1390         if (is->videoq.abort_request)
1391             return -1;
1392     }
1393
1394     /* if the frame is not skipped, then display it */
1395     if (vp->bmp) {
1396         AVPicture pict;
1397 #if CONFIG_AVFILTER
1398         if(vp->picref)
1399             avfilter_unref_buffer(vp->picref);
1400         vp->picref = src_frame->opaque;
1401 #endif
1402
1403         /* get a pointer on the bitmap */
1404         SDL_LockYUVOverlay (vp->bmp);
1405
1406         memset(&pict,0,sizeof(AVPicture));
1407         pict.data[0] = vp->bmp->pixels[0];
1408         pict.data[1] = vp->bmp->pixels[2];
1409         pict.data[2] = vp->bmp->pixels[1];
1410
1411         pict.linesize[0] = vp->bmp->pitches[0];
1412         pict.linesize[1] = vp->bmp->pitches[2];
1413         pict.linesize[2] = vp->bmp->pitches[1];
1414
1415 #if CONFIG_AVFILTER
1416         //FIXME use direct rendering
1417         av_picture_copy(&pict, (AVPicture *)src_frame,
1418                         vp->pix_fmt, vp->width, vp->height);
1419 #else
1420         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1421         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1422             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1423             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1424         if (is->img_convert_ctx == NULL) {
1425             fprintf(stderr, "Cannot initialize the conversion context\n");
1426             exit(1);
1427         }
1428         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1429                   0, vp->height, pict.data, pict.linesize);
1430 #endif
1431         /* update the bitmap content */
1432         SDL_UnlockYUVOverlay(vp->bmp);
1433
1434         vp->pts = pts;
1435         vp->pos = pos;
1436         vp->skip = 0;
1437
1438         /* now we can update the picture count */
1439         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1440             is->pictq_windex = 0;
1441         SDL_LockMutex(is->pictq_mutex);
1442         is->pictq_size++;
1443         SDL_UnlockMutex(is->pictq_mutex);
1444     }
1445     return 0;
1446 }
1447
1448 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1449 {
1450     int got_picture, i;
1451
1452     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1453         return -1;
1454
1455     if (pkt->data == flush_pkt.data) {
1456         avcodec_flush_buffers(is->video_st->codec);
1457
1458         SDL_LockMutex(is->pictq_mutex);
1459         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1460         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1461             is->pictq[i].skip = 1;
1462         }
1463         while (is->pictq_size && !is->videoq.abort_request) {
1464             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1465         }
1466         is->video_current_pos = -1;
1467         is->frame_last_pts = AV_NOPTS_VALUE;
1468         is->frame_last_duration = 0;
1469         is->frame_timer = (double)av_gettime() / 1000000.0;
1470         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1471         SDL_UnlockMutex(is->pictq_mutex);
1472
1473         return 0;
1474     }
1475
1476     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1477
1478     if (got_picture) {
1479         int ret = 1;
1480
1481         if (decoder_reorder_pts == -1) {
1482             *pts = frame->best_effort_timestamp;
1483         } else if (decoder_reorder_pts) {
1484             *pts = frame->pkt_pts;
1485         } else {
1486             *pts = frame->pkt_dts;
1487         }
1488
1489         if (*pts == AV_NOPTS_VALUE) {
1490             *pts = 0;
1491         }
1492
1493         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1494              (framedrop>0 || (framedrop && is->audio_st))) {
1495             SDL_LockMutex(is->pictq_mutex);
1496             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1497                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1498                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1499                 double ptsdiff = dpts - is->frame_last_pts;
1500                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1501                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1502                      clockdiff + ptsdiff < 0) { //TODO: Substract approxiamte time of filter
1503                     is->frame_last_dropped_pos = pkt->pos;
1504                     is->frame_last_dropped_pts = dpts;
1505                     ret = 0;
1506                 }
1507             }
1508             SDL_UnlockMutex(is->pictq_mutex);
1509         }
1510
1511         return ret;
1512     }
1513     return 0;
1514 }
1515
1516 #if CONFIG_AVFILTER
1517 typedef struct {
1518     VideoState *is;
1519     AVFrame *frame;
1520     int use_dr1;
1521 } FilterPriv;
1522
1523 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1524 {
1525     AVFilterContext *ctx = codec->opaque;
1526     AVFilterBufferRef  *ref;
1527     int perms = AV_PERM_WRITE;
1528     int i, w, h, stride[4];
1529     unsigned edge;
1530     int pixel_size;
1531
1532     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1533
1534     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1535         perms |= AV_PERM_NEG_LINESIZES;
1536
1537     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1538         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1539         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1540         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1541     }
1542     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1543
1544     w = codec->width;
1545     h = codec->height;
1546
1547     if(av_image_check_size(w, h, 0, codec))
1548         return -1;
1549
1550     avcodec_align_dimensions2(codec, &w, &h, stride);
1551     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1552     w += edge << 1;
1553     h += edge << 1;
1554
1555     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1556         return -1;
1557
1558     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1559     ref->video->w = codec->width;
1560     ref->video->h = codec->height;
1561     for(i = 0; i < 4; i ++) {
1562         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1563         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1564
1565         if (ref->data[i]) {
1566             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1567         }
1568         pic->data[i]     = ref->data[i];
1569         pic->linesize[i] = ref->linesize[i];
1570     }
1571     pic->opaque = ref;
1572     pic->age    = INT_MAX;
1573     pic->type   = FF_BUFFER_TYPE_USER;
1574     pic->reordered_opaque = codec->reordered_opaque;
1575     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1576     else           pic->pkt_pts = AV_NOPTS_VALUE;
1577     return 0;
1578 }
1579
1580 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1581 {
1582     memset(pic->data, 0, sizeof(pic->data));
1583     avfilter_unref_buffer(pic->opaque);
1584 }
1585
1586 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1587 {
1588     AVFilterBufferRef *ref = pic->opaque;
1589
1590     if (pic->data[0] == NULL) {
1591         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1592         return codec->get_buffer(codec, pic);
1593     }
1594
1595     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1596         (codec->pix_fmt != ref->format)) {
1597         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1598         return -1;
1599     }
1600
1601     pic->reordered_opaque = codec->reordered_opaque;
1602     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1603     else           pic->pkt_pts = AV_NOPTS_VALUE;
1604     return 0;
1605 }
1606
1607 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1608 {
1609     FilterPriv *priv = ctx->priv;
1610     AVCodecContext *codec;
1611     if(!opaque) return -1;
1612
1613     priv->is = opaque;
1614     codec    = priv->is->video_st->codec;
1615     codec->opaque = ctx;
1616     if((codec->codec->capabilities & CODEC_CAP_DR1)
1617     ) {
1618         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1619         priv->use_dr1 = 1;
1620         codec->get_buffer     = input_get_buffer;
1621         codec->release_buffer = input_release_buffer;
1622         codec->reget_buffer   = input_reget_buffer;
1623         codec->thread_safe_callbacks = 1;
1624     }
1625
1626     priv->frame = avcodec_alloc_frame();
1627
1628     return 0;
1629 }
1630
1631 static void input_uninit(AVFilterContext *ctx)
1632 {
1633     FilterPriv *priv = ctx->priv;
1634     av_free(priv->frame);
1635 }
1636
1637 static int input_request_frame(AVFilterLink *link)
1638 {
1639     FilterPriv *priv = link->src->priv;
1640     AVFilterBufferRef *picref;
1641     int64_t pts = 0;
1642     AVPacket pkt;
1643     int ret;
1644
1645     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1646         av_free_packet(&pkt);
1647     if (ret < 0)
1648         return -1;
1649
1650     if(priv->use_dr1 && priv->frame->opaque) {
1651         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1652     } else {
1653         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1654         av_image_copy(picref->data, picref->linesize,
1655                       priv->frame->data, priv->frame->linesize,
1656                       picref->format, link->w, link->h);
1657     }
1658     av_free_packet(&pkt);
1659
1660     avfilter_copy_frame_props(picref, priv->frame);
1661     picref->pts = pts;
1662
1663     avfilter_start_frame(link, picref);
1664     avfilter_draw_slice(link, 0, link->h, 1);
1665     avfilter_end_frame(link);
1666
1667     return 0;
1668 }
1669
1670 static int input_query_formats(AVFilterContext *ctx)
1671 {
1672     FilterPriv *priv = ctx->priv;
1673     enum PixelFormat pix_fmts[] = {
1674         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1675     };
1676
1677     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1678     return 0;
1679 }
1680
1681 static int input_config_props(AVFilterLink *link)
1682 {
1683     FilterPriv *priv  = link->src->priv;
1684     AVStream *s = priv->is->video_st;
1685
1686     link->w = s->codec->width;
1687     link->h = s->codec->height;
1688     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1689         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1690     link->time_base = s->time_base;
1691
1692     return 0;
1693 }
1694
1695 static AVFilter input_filter =
1696 {
1697     .name      = "ffplay_input",
1698
1699     .priv_size = sizeof(FilterPriv),
1700
1701     .init      = input_init,
1702     .uninit    = input_uninit,
1703
1704     .query_formats = input_query_formats,
1705
1706     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1707     .outputs   = (AVFilterPad[]) {{ .name = "default",
1708                                     .type = AVMEDIA_TYPE_VIDEO,
1709                                     .request_frame = input_request_frame,
1710                                     .config_props  = input_config_props, },
1711                                   { .name = NULL }},
1712 };
1713
1714 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1715 {
1716     char sws_flags_str[128];
1717     int ret;
1718     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1719     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1720     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1721     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1722     graph->scale_sws_opts = av_strdup(sws_flags_str);
1723
1724     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1725                                             NULL, is, graph)) < 0)
1726         return ret;
1727 #if FF_API_OLD_VSINK_API
1728     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1729                                        NULL, pix_fmts, graph);
1730 #else
1731     buffersink_params->pixel_fmts = pix_fmts;
1732     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1733                                        NULL, buffersink_params, graph);
1734 #endif
1735     av_freep(&buffersink_params);
1736     if (ret < 0)
1737         return ret;
1738
1739     if(vfilters) {
1740         AVFilterInOut *outputs = avfilter_inout_alloc();
1741         AVFilterInOut *inputs  = avfilter_inout_alloc();
1742
1743         outputs->name    = av_strdup("in");
1744         outputs->filter_ctx = filt_src;
1745         outputs->pad_idx = 0;
1746         outputs->next    = NULL;
1747
1748         inputs->name    = av_strdup("out");
1749         inputs->filter_ctx = filt_out;
1750         inputs->pad_idx = 0;
1751         inputs->next    = NULL;
1752
1753         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1754             return ret;
1755     } else {
1756         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1757             return ret;
1758     }
1759
1760     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1761         return ret;
1762
1763     is->out_video_filter = filt_out;
1764
1765     return ret;
1766 }
1767
1768 #endif  /* CONFIG_AVFILTER */
1769
1770 static int video_thread(void *arg)
1771 {
1772     VideoState *is = arg;
1773     AVFrame *frame= avcodec_alloc_frame();
1774     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1775     double pts;
1776     int ret;
1777
1778 #if CONFIG_AVFILTER
1779     AVFilterGraph *graph = avfilter_graph_alloc();
1780     AVFilterContext *filt_out = NULL;
1781     int last_w = is->video_st->codec->width;
1782     int last_h = is->video_st->codec->height;
1783
1784     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1785         goto the_end;
1786     filt_out = is->out_video_filter;
1787 #endif
1788
1789     for(;;) {
1790 #if !CONFIG_AVFILTER
1791         AVPacket pkt;
1792 #else
1793         AVFilterBufferRef *picref;
1794         AVRational tb = filt_out->inputs[0]->time_base;
1795 #endif
1796         while (is->paused && !is->videoq.abort_request)
1797             SDL_Delay(10);
1798 #if CONFIG_AVFILTER
1799         if (   last_w != is->video_st->codec->width
1800             || last_h != is->video_st->codec->height) {
1801             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1802                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1803             avfilter_graph_free(&graph);
1804             graph = avfilter_graph_alloc();
1805             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1806                 goto the_end;
1807             filt_out = is->out_video_filter;
1808             last_w = is->video_st->codec->width;
1809             last_h = is->video_st->codec->height;
1810         }
1811         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1812         if (picref) {
1813             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1814             pts_int = picref->pts;
1815             pos     = picref->pos;
1816             frame->opaque = picref;
1817         }
1818
1819         if (av_cmp_q(tb, is->video_st->time_base)) {
1820             av_unused int64_t pts1 = pts_int;
1821             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1822             av_dlog(NULL, "video_thread(): "
1823                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1824                     tb.num, tb.den, pts1,
1825                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1826         }
1827 #else
1828         ret = get_video_frame(is, frame, &pts_int, &pkt);
1829         pos = pkt.pos;
1830         av_free_packet(&pkt);
1831 #endif
1832
1833         if (ret < 0) goto the_end;
1834
1835 #if CONFIG_AVFILTER
1836         if (!picref)
1837             continue;
1838 #endif
1839
1840         pts = pts_int*av_q2d(is->video_st->time_base);
1841
1842         ret = queue_picture(is, frame, pts, pos);
1843
1844         if (ret < 0)
1845             goto the_end;
1846
1847         if (is->step)
1848             stream_toggle_pause(is);
1849     }
1850  the_end:
1851 #if CONFIG_AVFILTER
1852     avfilter_graph_free(&graph);
1853 #endif
1854     av_free(frame);
1855     return 0;
1856 }
1857
1858 static int subtitle_thread(void *arg)
1859 {
1860     VideoState *is = arg;
1861     SubPicture *sp;
1862     AVPacket pkt1, *pkt = &pkt1;
1863     int got_subtitle;
1864     double pts;
1865     int i, j;
1866     int r, g, b, y, u, v, a;
1867
1868     for(;;) {
1869         while (is->paused && !is->subtitleq.abort_request) {
1870             SDL_Delay(10);
1871         }
1872         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1873             break;
1874
1875         if(pkt->data == flush_pkt.data){
1876             avcodec_flush_buffers(is->subtitle_st->codec);
1877             continue;
1878         }
1879         SDL_LockMutex(is->subpq_mutex);
1880         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1881                !is->subtitleq.abort_request) {
1882             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1883         }
1884         SDL_UnlockMutex(is->subpq_mutex);
1885
1886         if (is->subtitleq.abort_request)
1887             return 0;
1888
1889         sp = &is->subpq[is->subpq_windex];
1890
1891        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1892            this packet, if any */
1893         pts = 0;
1894         if (pkt->pts != AV_NOPTS_VALUE)
1895             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1896
1897         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1898                                  &got_subtitle, pkt);
1899
1900         if (got_subtitle && sp->sub.format == 0) {
1901             sp->pts = pts;
1902
1903             for (i = 0; i < sp->sub.num_rects; i++)
1904             {
1905                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1906                 {
1907                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1908                     y = RGB_TO_Y_CCIR(r, g, b);
1909                     u = RGB_TO_U_CCIR(r, g, b, 0);
1910                     v = RGB_TO_V_CCIR(r, g, b, 0);
1911                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1912                 }
1913             }
1914
1915             /* now we can update the picture count */
1916             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1917                 is->subpq_windex = 0;
1918             SDL_LockMutex(is->subpq_mutex);
1919             is->subpq_size++;
1920             SDL_UnlockMutex(is->subpq_mutex);
1921         }
1922         av_free_packet(pkt);
1923     }
1924     return 0;
1925 }
1926
1927 /* copy samples for viewing in editor window */
1928 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1929 {
1930     int size, len;
1931
1932     size = samples_size / sizeof(short);
1933     while (size > 0) {
1934         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1935         if (len > size)
1936             len = size;
1937         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1938         samples += len;
1939         is->sample_array_index += len;
1940         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1941             is->sample_array_index = 0;
1942         size -= len;
1943     }
1944 }
1945
1946 /* return the new audio buffer size (samples can be added or deleted
1947    to get better sync if video or external master clock) */
1948 static int synchronize_audio(VideoState *is, short *samples,
1949                              int samples_size1, double pts)
1950 {
1951     int n, samples_size;
1952     double ref_clock;
1953
1954     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1955     samples_size = samples_size1;
1956
1957     /* if not master, then we try to remove or add samples to correct the clock */
1958     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1959          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1960         double diff, avg_diff;
1961         int wanted_size, min_size, max_size, nb_samples;
1962
1963         ref_clock = get_master_clock(is);
1964         diff = get_audio_clock(is) - ref_clock;
1965
1966         if (diff < AV_NOSYNC_THRESHOLD) {
1967             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1968             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1969                 /* not enough measures to have a correct estimate */
1970                 is->audio_diff_avg_count++;
1971             } else {
1972                 /* estimate the A-V difference */
1973                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1974
1975                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1976                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1977                     nb_samples = samples_size / n;
1978
1979                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1980                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1981                     if (wanted_size < min_size)
1982                         wanted_size = min_size;
1983                     else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
1984                         wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
1985
1986                     /* add or remove samples to correction the synchro */
1987                     if (wanted_size < samples_size) {
1988                         /* remove samples */
1989                         samples_size = wanted_size;
1990                     } else if (wanted_size > samples_size) {
1991                         uint8_t *samples_end, *q;
1992                         int nb;
1993
1994                         /* add samples */
1995                         nb = (samples_size - wanted_size);
1996                         samples_end = (uint8_t *)samples + samples_size - n;
1997                         q = samples_end + n;
1998                         while (nb > 0) {
1999                             memcpy(q, samples_end, n);
2000                             q += n;
2001                             nb -= n;
2002                         }
2003                         samples_size = wanted_size;
2004                     }
2005                 }
2006                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2007                         diff, avg_diff, samples_size - samples_size1,
2008                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2009             }
2010         } else {
2011             /* too big difference : may be initial PTS errors, so
2012                reset A-V filter */
2013             is->audio_diff_avg_count = 0;
2014             is->audio_diff_cum = 0;
2015         }
2016     }
2017
2018     return samples_size;
2019 }
2020
2021 /* decode one audio frame and returns its uncompressed size */
2022 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2023 {
2024     AVPacket *pkt_temp = &is->audio_pkt_temp;
2025     AVPacket *pkt = &is->audio_pkt;
2026     AVCodecContext *dec= is->audio_st->codec;
2027     int len1, len2, data_size, resampled_data_size;
2028     int64_t dec_channel_layout;
2029     double pts;
2030     int new_packet = 0;
2031     int flush_complete = 0;
2032
2033     for(;;) {
2034         /* NOTE: the audio packet can contain several frames */
2035         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2036             if (flush_complete)
2037                 break;
2038             new_packet = 0;
2039             data_size = sizeof(is->audio_buf1);
2040             len1 = avcodec_decode_audio3(dec,
2041                                         (int16_t *)is->audio_buf1, &data_size,
2042                                         pkt_temp);
2043             if (len1 < 0) {
2044                 /* if error, we skip the frame */
2045                 pkt_temp->size = 0;
2046                 break;
2047             }
2048
2049             pkt_temp->data += len1;
2050             pkt_temp->size -= len1;
2051
2052             if (data_size <= 0) {
2053                 /* stop sending empty packets if the decoder is finished */
2054                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2055                     flush_complete = 1;
2056                 continue;
2057             }
2058
2059             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2060
2061             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2062                 if (is->swr_ctx)
2063                     swr_free(&is->swr_ctx);
2064                 is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2065                                                dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
2066                                                0, NULL);
2067                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2068                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2069                         dec->sample_rate,
2070                         av_get_sample_fmt_name(dec->sample_fmt),
2071                         dec->channels,
2072                         is->audio_tgt_freq,
2073                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2074                         is->audio_tgt_channels);
2075                     break;
2076                 }
2077                 is->audio_src_channel_layout = dec_channel_layout;
2078                 is->audio_src_channels = dec->channels;
2079                 is->audio_src_freq = dec->sample_rate;
2080                 is->audio_src_fmt = dec->sample_fmt;
2081             }
2082
2083             resampled_data_size = data_size;
2084             if (is->swr_ctx) {
2085                 const uint8_t *in[] = {is->audio_buf1};
2086                 uint8_t *out[] = {is->audio_buf2};
2087                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2088                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2089                 if (len2 < 0) {
2090                     fprintf(stderr, "audio_resample() failed\n");
2091                     break;
2092                 }
2093                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2094                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2095                     swr_init(is->swr_ctx);
2096                 }
2097                 is->audio_buf = is->audio_buf2;
2098                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2099             } else {
2100                 is->audio_buf= is->audio_buf1;
2101             }
2102
2103             /* if no pts, then compute it */
2104             pts = is->audio_clock;
2105             *pts_ptr = pts;
2106             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2107 #ifdef DEBUG
2108             {
2109                 static double last_clock;
2110                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2111                        is->audio_clock - last_clock,
2112                        is->audio_clock, pts);
2113                 last_clock = is->audio_clock;
2114             }
2115 #endif
2116             return resampled_data_size;
2117         }
2118
2119         /* free the current packet */
2120         if (pkt->data)
2121             av_free_packet(pkt);
2122
2123         if (is->paused || is->audioq.abort_request) {
2124             return -1;
2125         }
2126
2127         /* read next packet */
2128         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2129             return -1;
2130
2131         if (pkt->data == flush_pkt.data)
2132             avcodec_flush_buffers(dec);
2133
2134         pkt_temp->data = pkt->data;
2135         pkt_temp->size = pkt->size;
2136
2137         /* if update the audio clock with the pts */
2138         if (pkt->pts != AV_NOPTS_VALUE) {
2139             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2140         }
2141     }
2142 }
2143
2144 /* prepare a new audio buffer */
2145 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2146 {
2147     VideoState *is = opaque;
2148     int audio_size, len1;
2149     int bytes_per_sec;
2150     double pts;
2151
2152     audio_callback_time = av_gettime();
2153
2154     while (len > 0) {
2155         if (is->audio_buf_index >= is->audio_buf_size) {
2156            audio_size = audio_decode_frame(is, &pts);
2157            if (audio_size < 0) {
2158                 /* if error, just output silence */
2159                is->audio_buf = is->audio_buf1;
2160                is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2161                memset(is->audio_buf, 0, is->audio_buf_size);
2162            } else {
2163                if (is->show_mode != SHOW_MODE_VIDEO)
2164                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2165                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2166                                               pts);
2167                is->audio_buf_size = audio_size;
2168            }
2169            is->audio_buf_index = 0;
2170         }
2171         len1 = is->audio_buf_size - is->audio_buf_index;
2172         if (len1 > len)
2173             len1 = len;
2174         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2175         len -= len1;
2176         stream += len1;
2177         is->audio_buf_index += len1;
2178     }
2179     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2180     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2181     /* Let's assume the audio driver that is used by SDL has two periods. */
2182     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2183     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2184 }
2185
2186 /* open a given stream. Return 0 if OK */
2187 static int stream_component_open(VideoState *is, int stream_index)
2188 {
2189     AVFormatContext *ic = is->ic;
2190     AVCodecContext *avctx;
2191     AVCodec *codec;
2192     SDL_AudioSpec wanted_spec, spec;
2193     AVDictionary *opts;
2194     AVDictionaryEntry *t = NULL;
2195     int64_t wanted_channel_layout = 0;
2196
2197     if (stream_index < 0 || stream_index >= ic->nb_streams)
2198         return -1;
2199     avctx = ic->streams[stream_index]->codec;
2200
2201     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2202
2203     codec = avcodec_find_decoder(avctx->codec_id);
2204     switch(avctx->codec_type){
2205         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2206         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2207         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2208     }
2209     if (!codec)
2210         return -1;
2211
2212     avctx->workaround_bugs = workaround_bugs;
2213     avctx->lowres = lowres;
2214     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2215     avctx->idct_algo= idct;
2216     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2217     avctx->skip_frame= skip_frame;
2218     avctx->skip_idct= skip_idct;
2219     avctx->skip_loop_filter= skip_loop_filter;
2220     avctx->error_recognition= error_recognition;
2221     avctx->error_concealment= error_concealment;
2222
2223     if(codec->capabilities & CODEC_CAP_DR1)
2224         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2225
2226     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2227         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2228         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2229         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2230         wanted_spec.freq = avctx->sample_rate;
2231         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2232             fprintf(stderr, "Invalid sample rate or channel count!\n");
2233             return -1;
2234         }
2235     }
2236
2237     if (!codec ||
2238         avcodec_open2(avctx, codec, &opts) < 0)
2239         return -1;
2240     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2241         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2242         return AVERROR_OPTION_NOT_FOUND;
2243     }
2244
2245     /* prepare audio output */
2246     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2247         wanted_spec.format = AUDIO_S16SYS;
2248         wanted_spec.silence = 0;
2249         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2250         wanted_spec.callback = sdl_audio_callback;
2251         wanted_spec.userdata = is;
2252         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2253             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2254             return -1;
2255         }
2256         is->audio_hw_buf_size = spec.size;
2257         if (spec.format != AUDIO_S16SYS) {
2258             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2259             return -1;
2260         }
2261         if (spec.channels != wanted_spec.channels) {
2262             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2263             if (!wanted_channel_layout) {
2264                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2265                 return -1;
2266             }
2267         }
2268         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2269         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2270         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2271         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2272     }
2273
2274     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2275     switch(avctx->codec_type) {
2276     case AVMEDIA_TYPE_AUDIO:
2277         is->audio_stream = stream_index;
2278         is->audio_st = ic->streams[stream_index];
2279         is->audio_buf_size = 0;
2280         is->audio_buf_index = 0;
2281
2282         /* init averaging filter */
2283         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2284         is->audio_diff_avg_count = 0;
2285         /* since we do not have a precise anough audio fifo fullness,
2286            we correct audio sync only if larger than this threshold */
2287         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2288
2289         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2290         packet_queue_init(&is->audioq);
2291         SDL_PauseAudio(0);
2292         break;
2293     case AVMEDIA_TYPE_VIDEO:
2294         is->video_stream = stream_index;
2295         is->video_st = ic->streams[stream_index];
2296
2297         packet_queue_init(&is->videoq);
2298         is->video_tid = SDL_CreateThread(video_thread, is);
2299         break;
2300     case AVMEDIA_TYPE_SUBTITLE:
2301         is->subtitle_stream = stream_index;
2302         is->subtitle_st = ic->streams[stream_index];
2303         packet_queue_init(&is->subtitleq);
2304
2305         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2306         break;
2307     default:
2308         break;
2309     }
2310     return 0;
2311 }
2312
2313 static void stream_component_close(VideoState *is, int stream_index)
2314 {
2315     AVFormatContext *ic = is->ic;
2316     AVCodecContext *avctx;
2317
2318     if (stream_index < 0 || stream_index >= ic->nb_streams)
2319         return;
2320     avctx = ic->streams[stream_index]->codec;
2321
2322     switch(avctx->codec_type) {
2323     case AVMEDIA_TYPE_AUDIO:
2324         packet_queue_abort(&is->audioq);
2325
2326         SDL_CloseAudio();
2327
2328         packet_queue_end(&is->audioq);
2329         if (is->swr_ctx)
2330             swr_free(&is->swr_ctx);
2331         av_free_packet(&is->audio_pkt);
2332
2333         if (is->rdft) {
2334             av_rdft_end(is->rdft);
2335             av_freep(&is->rdft_data);
2336         }
2337         break;
2338     case AVMEDIA_TYPE_VIDEO:
2339         packet_queue_abort(&is->videoq);
2340
2341         /* note: we also signal this mutex to make sure we deblock the
2342            video thread in all cases */
2343         SDL_LockMutex(is->pictq_mutex);
2344         SDL_CondSignal(is->pictq_cond);
2345         SDL_UnlockMutex(is->pictq_mutex);
2346
2347         SDL_WaitThread(is->video_tid, NULL);
2348
2349         packet_queue_end(&is->videoq);
2350         break;
2351     case AVMEDIA_TYPE_SUBTITLE:
2352         packet_queue_abort(&is->subtitleq);
2353
2354         /* note: we also signal this mutex to make sure we deblock the
2355            video thread in all cases */
2356         SDL_LockMutex(is->subpq_mutex);
2357         is->subtitle_stream_changed = 1;
2358
2359         SDL_CondSignal(is->subpq_cond);
2360         SDL_UnlockMutex(is->subpq_mutex);
2361
2362         SDL_WaitThread(is->subtitle_tid, NULL);
2363
2364         packet_queue_end(&is->subtitleq);
2365         break;
2366     default:
2367         break;
2368     }
2369
2370     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2371     avcodec_close(avctx);
2372     switch(avctx->codec_type) {
2373     case AVMEDIA_TYPE_AUDIO:
2374         is->audio_st = NULL;
2375         is->audio_stream = -1;
2376         break;
2377     case AVMEDIA_TYPE_VIDEO:
2378         is->video_st = NULL;
2379         is->video_stream = -1;
2380         break;
2381     case AVMEDIA_TYPE_SUBTITLE:
2382         is->subtitle_st = NULL;
2383         is->subtitle_stream = -1;
2384         break;
2385     default:
2386         break;
2387     }
2388 }
2389
2390 /* since we have only one decoding thread, we can use a global
2391    variable instead of a thread local variable */
2392 static VideoState *global_video_state;
2393
2394 static int decode_interrupt_cb(void)
2395 {
2396     return (global_video_state && global_video_state->abort_request);
2397 }
2398
2399 /* this thread gets the stream from the disk or the network */
2400 static int read_thread(void *arg)
2401 {
2402     VideoState *is = arg;
2403     AVFormatContext *ic = NULL;
2404     int err, i, ret;
2405     int st_index[AVMEDIA_TYPE_NB];
2406     AVPacket pkt1, *pkt = &pkt1;
2407     int eof=0;
2408     int pkt_in_play_range = 0;
2409     AVDictionaryEntry *t;
2410     AVDictionary **opts;
2411     int orig_nb_streams;
2412
2413     memset(st_index, -1, sizeof(st_index));
2414     is->video_stream = -1;
2415     is->audio_stream = -1;
2416     is->subtitle_stream = -1;
2417
2418     global_video_state = is;
2419     avio_set_interrupt_cb(decode_interrupt_cb);
2420
2421     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2422     if (err < 0) {
2423         print_error(is->filename, err);
2424         ret = -1;
2425         goto fail;
2426     }
2427     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2428         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2429         ret = AVERROR_OPTION_NOT_FOUND;
2430         goto fail;
2431     }
2432     is->ic = ic;
2433
2434     if(genpts)
2435         ic->flags |= AVFMT_FLAG_GENPTS;
2436
2437     av_dict_set(&codec_opts, "request_channels", "2", 0);
2438
2439     opts = setup_find_stream_info_opts(ic, codec_opts);
2440     orig_nb_streams = ic->nb_streams;
2441
2442     err = avformat_find_stream_info(ic, opts);
2443     if (err < 0) {
2444         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2445         ret = -1;
2446         goto fail;
2447     }
2448     for (i = 0; i < orig_nb_streams; i++)
2449         av_dict_free(&opts[i]);
2450     av_freep(&opts);
2451
2452     if(ic->pb)
2453         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2454
2455     if(seek_by_bytes<0)
2456         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2457
2458     /* if seeking requested, we execute it */
2459     if (start_time != AV_NOPTS_VALUE) {
2460         int64_t timestamp;
2461
2462         timestamp = start_time;
2463         /* add the stream start time */
2464         if (ic->start_time != AV_NOPTS_VALUE)
2465             timestamp += ic->start_time;
2466         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2467         if (ret < 0) {
2468             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2469                     is->filename, (double)timestamp / AV_TIME_BASE);
2470         }
2471     }
2472
2473     for (i = 0; i < ic->nb_streams; i++)
2474         ic->streams[i]->discard = AVDISCARD_ALL;
2475     if (!video_disable)
2476         st_index[AVMEDIA_TYPE_VIDEO] =
2477             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2478                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2479     if (!audio_disable)
2480         st_index[AVMEDIA_TYPE_AUDIO] =
2481             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2482                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2483                                 st_index[AVMEDIA_TYPE_VIDEO],
2484                                 NULL, 0);
2485     if (!video_disable)
2486         st_index[AVMEDIA_TYPE_SUBTITLE] =
2487             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2488                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2489                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2490                                  st_index[AVMEDIA_TYPE_AUDIO] :
2491                                  st_index[AVMEDIA_TYPE_VIDEO]),
2492                                 NULL, 0);
2493     if (show_status) {
2494         av_dump_format(ic, 0, is->filename, 0);
2495     }
2496
2497     is->show_mode = show_mode;
2498
2499     /* open the streams */
2500     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2501         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2502     }
2503
2504     ret=-1;
2505     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2506         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2507     }
2508     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2509     if (is->show_mode == SHOW_MODE_NONE)
2510         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2511
2512     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2513         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2514     }
2515
2516     if (is->video_stream < 0 && is->audio_stream < 0) {
2517         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2518         ret = -1;
2519         goto fail;
2520     }
2521
2522     for(;;) {
2523         if (is->abort_request)
2524             break;
2525         if (is->paused != is->last_paused) {
2526             is->last_paused = is->paused;
2527             if (is->paused)
2528                 is->read_pause_return= av_read_pause(ic);
2529             else
2530                 av_read_play(ic);
2531         }
2532 #if CONFIG_RTSP_DEMUXER
2533         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2534             /* wait 10 ms to avoid trying to get another packet */
2535             /* XXX: horrible */
2536             SDL_Delay(10);
2537             continue;
2538         }
2539 #endif
2540         if (is->seek_req) {
2541             int64_t seek_target= is->seek_pos;
2542             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2543             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2544 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2545 //      of the seek_pos/seek_rel variables
2546
2547             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2548             if (ret < 0) {
2549                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2550             }else{
2551                 if (is->audio_stream >= 0) {
2552                     packet_queue_flush(&is->audioq);
2553                     packet_queue_put(&is->audioq, &flush_pkt);
2554                 }
2555                 if (is->subtitle_stream >= 0) {
2556                     packet_queue_flush(&is->subtitleq);
2557                     packet_queue_put(&is->subtitleq, &flush_pkt);
2558                 }
2559                 if (is->video_stream >= 0) {
2560                     packet_queue_flush(&is->videoq);
2561                     packet_queue_put(&is->videoq, &flush_pkt);
2562                 }
2563             }
2564             is->seek_req = 0;
2565             eof= 0;
2566         }
2567
2568         /* if the queue are full, no need to read more */
2569         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2570             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2571                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2572                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2573             /* wait 10 ms */
2574             SDL_Delay(10);
2575             continue;
2576         }
2577         if(eof) {
2578             if(is->video_stream >= 0){
2579                 av_init_packet(pkt);
2580                 pkt->data=NULL;
2581                 pkt->size=0;
2582                 pkt->stream_index= is->video_stream;
2583                 packet_queue_put(&is->videoq, pkt);
2584             }
2585             if (is->audio_stream >= 0 &&
2586                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2587                 av_init_packet(pkt);
2588                 pkt->data = NULL;
2589                 pkt->size = 0;
2590                 pkt->stream_index = is->audio_stream;
2591                 packet_queue_put(&is->audioq, pkt);
2592             }
2593             SDL_Delay(10);
2594             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2595                 if(loop!=1 && (!loop || --loop)){
2596                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2597                 }else if(autoexit){
2598                     ret=AVERROR_EOF;
2599                     goto fail;
2600                 }
2601             }
2602             eof=0;
2603             continue;
2604         }
2605         ret = av_read_frame(ic, pkt);
2606         if (ret < 0) {
2607             if (ret == AVERROR_EOF || url_feof(ic->pb))
2608                 eof=1;
2609             if (ic->pb && ic->pb->error)
2610                 break;
2611             SDL_Delay(100); /* wait for user event */
2612             continue;
2613         }
2614         /* check if packet is in play range specified by user, then queue, otherwise discard */
2615         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2616                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2617                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2618                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2619                 <= ((double)duration/1000000);
2620         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2621             packet_queue_put(&is->audioq, pkt);
2622         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2623             packet_queue_put(&is->videoq, pkt);
2624         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2625             packet_queue_put(&is->subtitleq, pkt);
2626         } else {
2627             av_free_packet(pkt);
2628         }
2629     }
2630     /* wait until the end */
2631     while (!is->abort_request) {
2632         SDL_Delay(100);
2633     }
2634
2635     ret = 0;
2636  fail:
2637     /* disable interrupting */
2638     global_video_state = NULL;
2639
2640     /* close each stream */
2641     if (is->audio_stream >= 0)
2642         stream_component_close(is, is->audio_stream);
2643     if (is->video_stream >= 0)
2644         stream_component_close(is, is->video_stream);
2645     if (is->subtitle_stream >= 0)
2646         stream_component_close(is, is->subtitle_stream);
2647     if (is->ic) {
2648         av_close_input_file(is->ic);
2649         is->ic = NULL; /* safety */
2650     }
2651     avio_set_interrupt_cb(NULL);
2652
2653     if (ret != 0) {
2654         SDL_Event event;
2655
2656         event.type = FF_QUIT_EVENT;
2657         event.user.data1 = is;
2658         SDL_PushEvent(&event);
2659     }
2660     return 0;
2661 }
2662
2663 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2664 {
2665     VideoState *is;
2666
2667     is = av_mallocz(sizeof(VideoState));
2668     if (!is)
2669         return NULL;
2670     av_strlcpy(is->filename, filename, sizeof(is->filename));
2671     is->iformat = iformat;
2672     is->ytop = 0;
2673     is->xleft = 0;
2674
2675     /* start video display */
2676     is->pictq_mutex = SDL_CreateMutex();
2677     is->pictq_cond = SDL_CreateCond();
2678
2679     is->subpq_mutex = SDL_CreateMutex();
2680     is->subpq_cond = SDL_CreateCond();
2681
2682     is->av_sync_type = av_sync_type;
2683     is->read_tid = SDL_CreateThread(read_thread, is);
2684     if (!is->read_tid) {
2685         av_free(is);
2686         return NULL;
2687     }
2688     return is;
2689 }
2690
2691 static void stream_cycle_channel(VideoState *is, int codec_type)
2692 {
2693     AVFormatContext *ic = is->ic;
2694     int start_index, stream_index;
2695     AVStream *st;
2696
2697     if (codec_type == AVMEDIA_TYPE_VIDEO)
2698         start_index = is->video_stream;
2699     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2700         start_index = is->audio_stream;
2701     else
2702         start_index = is->subtitle_stream;
2703     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2704         return;
2705     stream_index = start_index;
2706     for(;;) {
2707         if (++stream_index >= is->ic->nb_streams)
2708         {
2709             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2710             {
2711                 stream_index = -1;
2712                 goto the_end;
2713             } else
2714                 stream_index = 0;
2715         }
2716         if (stream_index == start_index)
2717             return;
2718         st = ic->streams[stream_index];
2719         if (st->codec->codec_type == codec_type) {
2720             /* check that parameters are OK */
2721             switch(codec_type) {
2722             case AVMEDIA_TYPE_AUDIO:
2723                 if (st->codec->sample_rate != 0 &&
2724                     st->codec->channels != 0)
2725                     goto the_end;
2726                 break;
2727             case AVMEDIA_TYPE_VIDEO:
2728             case AVMEDIA_TYPE_SUBTITLE:
2729                 goto the_end;
2730             default:
2731                 break;
2732             }
2733         }
2734     }
2735  the_end:
2736     stream_component_close(is, start_index);
2737     stream_component_open(is, stream_index);
2738 }
2739
2740
2741 static void toggle_full_screen(VideoState *is)
2742 {
2743     is_full_screen = !is_full_screen;
2744 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2745     /* OSX needs to reallocate the SDL overlays */
2746     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2747         is->pictq[i].reallocate = 1;
2748     }
2749 #endif
2750     video_open(is);
2751 }
2752
2753 static void toggle_pause(VideoState *is)
2754 {
2755     stream_toggle_pause(is);
2756     is->step = 0;
2757 }
2758
2759 static void step_to_next_frame(VideoState *is)
2760 {
2761     /* if the stream is paused unpause it, then step */
2762     if (is->paused)
2763         stream_toggle_pause(is);
2764     is->step = 1;
2765 }
2766
2767 static void toggle_audio_display(VideoState *is)
2768 {
2769     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2770     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2771     fill_rectangle(screen,
2772                 is->xleft, is->ytop, is->width, is->height,
2773                 bgcolor);
2774     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2775 }
2776
2777 /* handle an event sent by the GUI */
2778 static void event_loop(VideoState *cur_stream)
2779 {
2780     SDL_Event event;
2781     double incr, pos, frac;
2782
2783     for(;;) {
2784         double x;
2785         SDL_WaitEvent(&event);
2786         switch(event.type) {
2787         case SDL_KEYDOWN:
2788             if (exit_on_keydown) {
2789                 do_exit(cur_stream);
2790                 break;
2791             }
2792             switch(event.key.keysym.sym) {
2793             case SDLK_ESCAPE:
2794             case SDLK_q:
2795                 do_exit(cur_stream);
2796                 break;
2797             case SDLK_f:
2798                 toggle_full_screen(cur_stream);
2799                 break;
2800             case SDLK_p:
2801             case SDLK_SPACE:
2802                 toggle_pause(cur_stream);
2803                 break;
2804             case SDLK_s: //S: Step to next frame
2805                 step_to_next_frame(cur_stream);
2806                 break;
2807             case SDLK_a:
2808                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2809                 break;
2810             case SDLK_v:
2811                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2812                 break;
2813             case SDLK_t:
2814                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2815                 break;
2816             case SDLK_w:
2817                 toggle_audio_display(cur_stream);
2818                 break;
2819             case SDLK_LEFT:
2820                 incr = -10.0;
2821                 goto do_seek;
2822             case SDLK_RIGHT:
2823                 incr = 10.0;
2824                 goto do_seek;
2825             case SDLK_UP:
2826                 incr = 60.0;
2827                 goto do_seek;
2828             case SDLK_DOWN:
2829                 incr = -60.0;
2830             do_seek:
2831                 if (seek_by_bytes) {
2832                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2833                         pos= cur_stream->video_current_pos;
2834                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2835                         pos= cur_stream->audio_pkt.pos;
2836                     }else
2837                         pos = avio_tell(cur_stream->ic->pb);
2838                     if (cur_stream->ic->bit_rate)
2839                         incr *= cur_stream->ic->bit_rate / 8.0;
2840                     else
2841                         incr *= 180000.0;
2842                     pos += incr;
2843                     stream_seek(cur_stream, pos, incr, 1);
2844                 } else {
2845                     pos = get_master_clock(cur_stream);
2846                     pos += incr;
2847                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2848                 }
2849                 break;
2850             default:
2851                 break;
2852             }
2853             break;
2854         case SDL_MOUSEBUTTONDOWN:
2855             if (exit_on_mousedown) {
2856                 do_exit(cur_stream);
2857                 break;
2858             }
2859         case SDL_MOUSEMOTION:
2860             if(event.type ==SDL_MOUSEBUTTONDOWN){
2861                 x= event.button.x;
2862             }else{
2863                 if(event.motion.state != SDL_PRESSED)
2864                     break;
2865                 x= event.motion.x;
2866             }
2867             if(seek_by_bytes || cur_stream->ic->duration<=0){
2868                 uint64_t size=  avio_size(cur_stream->ic->pb);
2869                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2870             }else{
2871                 int64_t ts;
2872                 int ns, hh, mm, ss;
2873                 int tns, thh, tmm, tss;
2874                 tns = cur_stream->ic->duration/1000000LL;
2875                 thh = tns/3600;
2876                 tmm = (tns%3600)/60;
2877                 tss = (tns%60);
2878                 frac = x/cur_stream->width;
2879                 ns = frac*tns;
2880                 hh = ns/3600;
2881                 mm = (ns%3600)/60;
2882                 ss = (ns%60);
2883                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2884                         hh, mm, ss, thh, tmm, tss);
2885                 ts = frac*cur_stream->ic->duration;
2886                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2887                     ts += cur_stream->ic->start_time;
2888                 stream_seek(cur_stream, ts, 0, 0);
2889             }
2890             break;
2891         case SDL_VIDEORESIZE:
2892             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2893                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2894             screen_width = cur_stream->width = event.resize.w;
2895             screen_height= cur_stream->height= event.resize.h;
2896             break;
2897         case SDL_QUIT:
2898         case FF_QUIT_EVENT:
2899             do_exit(cur_stream);
2900             break;
2901         case FF_ALLOC_EVENT:
2902             video_open(event.user.data1);
2903             alloc_picture(event.user.data1);
2904             break;
2905         case FF_REFRESH_EVENT:
2906             video_refresh(event.user.data1);
2907             cur_stream->refresh=0;
2908             break;
2909         default:
2910             break;
2911         }
2912     }
2913 }
2914
2915 static int opt_frame_size(const char *opt, const char *arg)
2916 {
2917     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2918     return opt_default("video_size", arg);
2919 }
2920
2921 static int opt_width(const char *opt, const char *arg)
2922 {
2923     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2924     return 0;
2925 }
2926
2927 static int opt_height(const char *opt, const char *arg)
2928 {
2929     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2930     return 0;
2931 }
2932
2933 static int opt_format(const char *opt, const char *arg)
2934 {
2935     file_iformat = av_find_input_format(arg);
2936     if (!file_iformat) {
2937         fprintf(stderr, "Unknown input format: %s\n", arg);
2938         return AVERROR(EINVAL);
2939     }
2940     return 0;
2941 }
2942
2943 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2944 {
2945     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2946     return opt_default("pixel_format", arg);
2947 }
2948
2949 static int opt_sync(const char *opt, const char *arg)
2950 {
2951     if (!strcmp(arg, "audio"))
2952         av_sync_type = AV_SYNC_AUDIO_MASTER;
2953     else if (!strcmp(arg, "video"))
2954         av_sync_type = AV_SYNC_VIDEO_MASTER;
2955     else if (!strcmp(arg, "ext"))
2956         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2957     else {
2958         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2959         exit(1);
2960     }
2961     return 0;
2962 }
2963
2964 static int opt_seek(const char *opt, const char *arg)
2965 {
2966     start_time = parse_time_or_die(opt, arg, 1);
2967     return 0;
2968 }
2969
2970 static int opt_duration(const char *opt, const char *arg)
2971 {
2972     duration = parse_time_or_die(opt, arg, 1);
2973     return 0;
2974 }
2975
2976 static int opt_show_mode(const char *opt, const char *arg)
2977 {
2978     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2979                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2980                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2981                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2982     return 0;
2983 }
2984
2985 static void opt_input_file(void *optctx, const char *filename)
2986 {
2987     if (input_filename) {
2988         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2989                 filename, input_filename);
2990         exit_program(1);
2991     }
2992     if (!strcmp(filename, "-"))
2993         filename = "pipe:";
2994     input_filename = filename;
2995 }
2996
2997 static int opt_codec(void *o, const char *opt, const char *arg)
2998 {
2999     switch(opt[strlen(opt)-1]){
3000     case 'a' :    audio_codec_name = arg; break;
3001     case 's' : subtitle_codec_name = arg; break;
3002     case 'v' :    video_codec_name = arg; break;
3003     }
3004     return 0;
3005 }
3006
3007 static int dummy;
3008
3009 static const OptionDef options[] = {
3010 #include "cmdutils_common_opts.h"
3011     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
3012     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
3013     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3014     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3015     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3016     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3017     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3018     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3019     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3020     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3021     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3022     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3023     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3024     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3025     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3026     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3027     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3028     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3029     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3030     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3031     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3032     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3033     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3034     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3035     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3036     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3037     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3038     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3039     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3040     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3041     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3042     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3043     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3044     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3045 #if CONFIG_AVFILTER
3046     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3047 #endif
3048     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3049     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3050     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3051     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3052     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3053     { NULL, },
3054 };
3055
3056 static void show_usage(void)
3057 {
3058     printf("Simple media player\n");
3059     printf("usage: %s [options] input_file\n", program_name);
3060     printf("\n");
3061 }
3062
3063 static int opt_help(const char *opt, const char *arg)
3064 {
3065     av_log_set_callback(log_callback_help);
3066     show_usage();
3067     show_help_options(options, "Main options:\n",
3068                       OPT_EXPERT, 0);
3069     show_help_options(options, "\nAdvanced options:\n",
3070                       OPT_EXPERT, OPT_EXPERT);
3071     printf("\n");
3072     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3073     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3074 #if !CONFIG_AVFILTER
3075     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3076 #endif
3077     printf("\nWhile playing:\n"
3078            "q, ESC              quit\n"
3079            "f                   toggle full screen\n"
3080            "p, SPC              pause\n"
3081            "a                   cycle audio channel\n"
3082            "v                   cycle video channel\n"
3083            "t                   cycle subtitle channel\n"
3084            "w                   show audio waves\n"
3085            "s                   activate frame-step mode\n"
3086            "left/right          seek backward/forward 10 seconds\n"
3087            "down/up             seek backward/forward 1 minute\n"
3088            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3089            );
3090     return 0;
3091 }
3092
3093 static int lockmgr(void **mtx, enum AVLockOp op)
3094 {
3095    switch(op) {
3096       case AV_LOCK_CREATE:
3097           *mtx = SDL_CreateMutex();
3098           if(!*mtx)
3099               return 1;
3100           return 0;
3101       case AV_LOCK_OBTAIN:
3102           return !!SDL_LockMutex(*mtx);
3103       case AV_LOCK_RELEASE:
3104           return !!SDL_UnlockMutex(*mtx);
3105       case AV_LOCK_DESTROY:
3106           SDL_DestroyMutex(*mtx);
3107           return 0;
3108    }
3109    return 1;
3110 }
3111
3112 /* Called from the main */
3113 int main(int argc, char **argv)
3114 {
3115     int flags;
3116     VideoState *is;
3117
3118     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3119     parse_loglevel(argc, argv, options);
3120
3121     /* register all codecs, demux and protocols */
3122     avcodec_register_all();
3123 #if CONFIG_AVDEVICE
3124     avdevice_register_all();
3125 #endif
3126 #if CONFIG_AVFILTER
3127     avfilter_register_all();
3128 #endif
3129     av_register_all();
3130
3131     init_opts();
3132
3133     show_banner();
3134
3135     parse_options(NULL, argc, argv, options, opt_input_file);
3136
3137     if (!input_filename) {
3138         show_usage();
3139         fprintf(stderr, "An input file must be specified\n");
3140         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3141         exit(1);
3142     }
3143
3144     if (display_disable) {
3145         video_disable = 1;
3146     }
3147     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3148     if (audio_disable)
3149         flags &= ~SDL_INIT_AUDIO;
3150 #if !defined(__MINGW32__) && !defined(__APPLE__)
3151     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3152 #endif
3153     if (SDL_Init (flags)) {
3154         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3155         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3156         exit(1);
3157     }
3158
3159     if (!display_disable) {
3160 #if HAVE_SDL_VIDEO_SIZE
3161         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3162         fs_screen_width = vi->current_w;
3163         fs_screen_height = vi->current_h;
3164 #endif
3165     }
3166
3167     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3168     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3169     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3170
3171     if (av_lockmgr_register(lockmgr)) {
3172         fprintf(stderr, "Could not initialize lock manager!\n");
3173         do_exit(NULL);
3174     }
3175
3176     av_init_packet(&flush_pkt);
3177     flush_pkt.data= "FLUSH";
3178
3179     is = stream_open(input_filename, file_iformat);
3180     if (!is) {
3181         fprintf(stderr, "Failed to initialize VideoState!\n");
3182         do_exit(NULL);
3183     }
3184
3185     event_loop(is);
3186
3187     /* never returns */
3188
3189     return 0;
3190 }