]> git.sesse.net Git - ffmpeg/blob - avplay.c
0de5b93ea9c2e309302e83eff7886ca7a0807347
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/display.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/dict.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/samplefmt.h"
37 #include "libavutil/time.h"
38 #include "libavformat/avformat.h"
39 #include "libavdevice/avdevice.h"
40 #include "libswscale/swscale.h"
41 #include "libavresample/avresample.h"
42 #include "libavutil/opt.h"
43 #include "libavcodec/avfft.h"
44
45 #if CONFIG_AVFILTER
46 # include "libavfilter/avfilter.h"
47 # include "libavfilter/buffersink.h"
48 # include "libavfilter/buffersrc.h"
49 #endif
50
51 #include "cmdutils.h"
52
53 #include <SDL.h>
54 #include <SDL_thread.h>
55
56 #ifdef __MINGW32__
57 #undef main /* We don't want SDL to override our main() */
58 #endif
59
60 #include <assert.h>
61
62 const char program_name[] = "avplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2 * 65536)
88
89 static int64_t sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;             // presentation timestamp for this picture
105     double target_clock;    // av_gettime_relative() time at which this should be displayed ideally
106     int64_t pos;            // byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     int reallocate;
111     enum AVPixelFormat pix_fmt;
112
113     AVRational sar;
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     int sdl_sample_rate;
168     enum AVSampleFormat resample_sample_fmt;
169     uint64_t resample_channel_layout;
170     int resample_sample_rate;
171     AVAudioResampleContext *avr;
172     AVFrame *frame;
173
174     int show_audio; /* if true, display audio samples */
175     int16_t sample_array[SAMPLE_ARRAY_SIZE];
176     int sample_array_index;
177     int last_i_start;
178     RDFTContext *rdft;
179     int rdft_bits;
180     FFTSample *rdft_data;
181     int xpos;
182
183     SDL_Thread *subtitle_tid;
184     int subtitle_stream;
185     int subtitle_stream_changed;
186     AVStream *subtitle_st;
187     PacketQueue subtitleq;
188     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
189     int subpq_size, subpq_rindex, subpq_windex;
190     SDL_mutex *subpq_mutex;
191     SDL_cond *subpq_cond;
192
193     double frame_timer;
194     double frame_last_pts;
195     double frame_last_delay;
196     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
197     int video_stream;
198     AVStream *video_st;
199     PacketQueue videoq;
200     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
201     double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
202     int64_t video_current_pos;      // current displayed file pos
203     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204     int pictq_size, pictq_rindex, pictq_windex;
205     SDL_mutex *pictq_mutex;
206     SDL_cond *pictq_cond;
207 #if !CONFIG_AVFILTER
208     struct SwsContext *img_convert_ctx;
209 #endif
210
211     //    QETimer *video_timer;
212     char filename[1024];
213     int width, height, xleft, ytop;
214
215     PtsCorrectionContext pts_ctx;
216
217 #if CONFIG_AVFILTER
218     AVFilterContext *in_video_filter;   // the first filter in the video chain
219     AVFilterContext *out_video_filter;  // the last filter in the video chain
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int step = 0;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts = -1;
258 static int noautoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop = 1;
262 static int framedrop = 1;
263 static int infinite_buffer = 0;
264
265 static int rdftspeed = 20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269 static int autorotate = 1;
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_packet_unref(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     pkt1 = av_malloc(sizeof(AVPacketList));
324     if (!pkt1)
325         return -1;
326     pkt1->pkt = *pkt;
327     pkt1->next = NULL;
328
329
330     SDL_LockMutex(q->mutex);
331
332     if (!q->last_pkt)
333
334         q->first_pkt = pkt1;
335     else
336         q->last_pkt->next = pkt1;
337     q->last_pkt = pkt1;
338     q->nb_packets++;
339     q->size += pkt1->pkt.size + sizeof(*pkt1);
340     /* XXX: should duplicate packet data in DV case */
341     SDL_CondSignal(q->cond);
342
343     SDL_UnlockMutex(q->mutex);
344     return 0;
345 }
346
347 static void packet_queue_abort(PacketQueue *q)
348 {
349     SDL_LockMutex(q->mutex);
350
351     q->abort_request = 1;
352
353     SDL_CondSignal(q->cond);
354
355     SDL_UnlockMutex(q->mutex);
356 }
357
358 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
359 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
360 {
361     AVPacketList *pkt1;
362     int ret;
363
364     SDL_LockMutex(q->mutex);
365
366     for (;;) {
367         if (q->abort_request) {
368             ret = -1;
369             break;
370         }
371
372         pkt1 = q->first_pkt;
373         if (pkt1) {
374             q->first_pkt = pkt1->next;
375             if (!q->first_pkt)
376                 q->last_pkt = NULL;
377             q->nb_packets--;
378             q->size -= pkt1->pkt.size + sizeof(*pkt1);
379             *pkt = pkt1->pkt;
380             av_free(pkt1);
381             ret = 1;
382             break;
383         } else if (!block) {
384             ret = 0;
385             break;
386         } else {
387             SDL_CondWait(q->cond, q->mutex);
388         }
389     }
390     SDL_UnlockMutex(q->mutex);
391     return ret;
392 }
393
394 static inline void fill_rectangle(SDL_Surface *screen,
395                                   int x, int y, int w, int h, int color)
396 {
397     SDL_Rect rect;
398     rect.x = x;
399     rect.y = y;
400     rect.w = w;
401     rect.h = h;
402     SDL_FillRect(screen, &rect, color);
403 }
404
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
407
408 #define RGBA_IN(r, g, b, a, s)\
409 {\
410     unsigned int v = ((const uint32_t *)(s))[0];\
411     a = (v >> 24) & 0xff;\
412     r = (v >> 16) & 0xff;\
413     g = (v >> 8) & 0xff;\
414     b = v & 0xff;\
415 }
416
417 #define YUVA_IN(y, u, v, a, s, pal)\
418 {\
419     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420     a = (val >> 24) & 0xff;\
421     y = (val >> 16) & 0xff;\
422     u = (val >> 8) & 0xff;\
423     v = val & 0xff;\
424 }
425
426 #define YUVA_OUT(d, y, u, v, a)\
427 {\
428     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
429 }
430
431
432 #define BPP 1
433
434 static void blend_subrect(uint8_t *dst[4], uint16_t dst_linesize[4],
435                           const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437     int wrap, wrap3, width2, skip2;
438     int y, u, v, a, u1, v1, a1, w, h;
439     uint8_t *lum, *cb, *cr;
440     const uint8_t *p;
441     const uint32_t *pal;
442     int dstx, dsty, dstw, dsth;
443
444     dstw = av_clip(rect->w, 0, imgw);
445     dsth = av_clip(rect->h, 0, imgh);
446     dstx = av_clip(rect->x, 0, imgw - dstw);
447     dsty = av_clip(rect->y, 0, imgh - dsth);
448     /* sdl has U and V inverted */
449     lum = dst[0] +  dsty       * dst_linesize[0];
450     cb  = dst[2] + (dsty >> 1) * dst_linesize[2];
451     cr  = dst[1] + (dsty >> 1) * dst_linesize[1];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst_linesize[0];
456     wrap3 = rect->linesize[0];
457     p = rect->data[0];
458     pal = (const uint32_t *)rect->data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst_linesize[2] - width2 - skip2;
505         cr += dst_linesize[1] - width2 - skip2;
506     }
507     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst_linesize[2] - width2 - skip2;
590         cr += dst_linesize[1] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     float aspect_ratio;
646     int width, height, x, y;
647     SDL_Rect rect;
648     int i;
649
650     vp = &is->pictq[is->pictq_rindex];
651     if (vp->bmp) {
652 #if CONFIG_AVFILTER
653          if (!vp->sar.num)
654              aspect_ratio = 0;
655          else
656              aspect_ratio = av_q2d(vp->sar);
657 #else
658
659         /* XXX: use variable in the frame */
660         if (is->video_st->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
662         else if (is->video_st->codec->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
664         else
665             aspect_ratio = 0;
666 #endif
667         if (aspect_ratio <= 0.0)
668             aspect_ratio = 1.0;
669         aspect_ratio *= (float)vp->width / (float)vp->height;
670
671         if (is->subtitle_st)
672         {
673             if (is->subpq_size > 0)
674             {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
678                 {
679                     SDL_LockYUVOverlay (vp->bmp);
680
681                     for (i = 0; i < sp->sub.num_rects; i++)
682                         blend_subrect(vp->bmp->pixels, vp->bmp->pitches,
683                                       sp->sub.rects[i], vp->bmp->w, vp->bmp->h);
684
685                     SDL_UnlockYUVOverlay (vp->bmp);
686                 }
687             }
688         }
689
690
691         /* XXX: we suppose the screen has a 1.0 pixel ratio */
692         height = is->height;
693         width = ((int)rint(height * aspect_ratio)) & ~1;
694         if (width > is->width) {
695             width = is->width;
696             height = ((int)rint(width / aspect_ratio)) & ~1;
697         }
698         x = (is->width - width) / 2;
699         y = (is->height - height) / 2;
700         is->no_background = 0;
701         rect.x = is->xleft + x;
702         rect.y = is->ytop  + y;
703         rect.w = width;
704         rect.h = height;
705         SDL_DisplayYUVOverlay(vp->bmp, &rect);
706     }
707 }
708
709 /* get the current audio output buffer size, in samples. With SDL, we
710    cannot have a precise information */
711 static int audio_write_get_buf_size(VideoState *is)
712 {
713     return is->audio_buf_size - is->audio_buf_index;
714 }
715
716 static inline int compute_mod(int a, int b)
717 {
718     a = a % b;
719     if (a >= 0)
720         return a;
721     else
722         return a + b;
723 }
724
725 static void video_audio_display(VideoState *s)
726 {
727     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
728     int ch, channels, h, h2, bgcolor, fgcolor;
729     int16_t time_diff;
730     int rdft_bits, nb_freq;
731
732     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
733         ;
734     nb_freq = 1 << (rdft_bits - 1);
735
736     /* compute display index : center on currently output samples */
737     channels = s->sdl_channels;
738     nb_display_channels = channels;
739     if (!s->paused) {
740         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
741         n = 2 * channels;
742         delay = audio_write_get_buf_size(s);
743         delay /= n;
744
745         /* to be more precise, we take into account the time spent since
746            the last buffer computation */
747         if (audio_callback_time) {
748             time_diff = av_gettime_relative() - audio_callback_time;
749             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
750         }
751
752         delay += 2 * data_used;
753         if (delay < data_used)
754             delay = data_used;
755
756         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
757         if (s->show_audio == 1) {
758             h = INT_MIN;
759             for (i = 0; i < 1000; i += channels) {
760                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
761                 int a = s->sample_array[idx];
762                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
763                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
764                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
765                 int score = a - d;
766                 if (h < score && (b ^ c) < 0) {
767                     h = score;
768                     i_start = idx;
769                 }
770             }
771         }
772
773         s->last_i_start = i_start;
774     } else {
775         i_start = s->last_i_start;
776     }
777
778     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
779     if (s->show_audio == 1) {
780         fill_rectangle(screen,
781                        s->xleft, s->ytop, s->width, s->height,
782                        bgcolor);
783
784         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
785
786         /* total height for one channel */
787         h = s->height / nb_display_channels;
788         /* graph height / 2 */
789         h2 = (h * 9) / 20;
790         for (ch = 0; ch < nb_display_channels; ch++) {
791             i = i_start + ch;
792             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
793             for (x = 0; x < s->width; x++) {
794                 y = (s->sample_array[i] * h2) >> 15;
795                 if (y < 0) {
796                     y = -y;
797                     ys = y1 - y;
798                 } else {
799                     ys = y1;
800                 }
801                 fill_rectangle(screen,
802                                s->xleft + x, ys, 1, y,
803                                fgcolor);
804                 i += channels;
805                 if (i >= SAMPLE_ARRAY_SIZE)
806                     i -= SAMPLE_ARRAY_SIZE;
807             }
808         }
809
810         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
811
812         for (ch = 1; ch < nb_display_channels; ch++) {
813             y = s->ytop + ch * h;
814             fill_rectangle(screen,
815                            s->xleft, y, s->width, 1,
816                            fgcolor);
817         }
818         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
819     } else {
820         nb_display_channels= FFMIN(nb_display_channels, 2);
821         if (rdft_bits != s->rdft_bits) {
822             av_rdft_end(s->rdft);
823             av_free(s->rdft_data);
824             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
825             s->rdft_bits = rdft_bits;
826             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
827         }
828         {
829             FFTSample *data[2];
830             for (ch = 0; ch < nb_display_channels; ch++) {
831                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
832                 i = i_start + ch;
833                 for (x = 0; x < 2 * nb_freq; x++) {
834                     double w = (x-nb_freq) * (1.0 / nb_freq);
835                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
836                     i += channels;
837                     if (i >= SAMPLE_ARRAY_SIZE)
838                         i -= SAMPLE_ARRAY_SIZE;
839                 }
840                 av_rdft_calc(s->rdft, data[ch]);
841             }
842             /* Least efficient way to do this, we should of course
843              * directly access it but it is more than fast enough. */
844             for (y = 0; y < s->height; y++) {
845                 double w = 1 / sqrt(nb_freq);
846                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
847                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
848                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
849                 a = FFMIN(a, 255);
850                 b = FFMIN(b, 255);
851                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
852
853                 fill_rectangle(screen,
854                             s->xpos, s->height-y, 1, 1,
855                             fgcolor);
856             }
857         }
858         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
859         s->xpos++;
860         if (s->xpos >= s->width)
861             s->xpos= s->xleft;
862     }
863 }
864
865 static int video_open(VideoState *is)
866 {
867     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
868     int w,h;
869
870     if (is_full_screen) flags |= SDL_FULLSCREEN;
871     else                flags |= SDL_RESIZABLE;
872
873     if (is_full_screen && fs_screen_width) {
874         w = fs_screen_width;
875         h = fs_screen_height;
876     } else if (!is_full_screen && screen_width) {
877         w = screen_width;
878         h = screen_height;
879 #if CONFIG_AVFILTER
880     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
881         w = is->out_video_filter->inputs[0]->w;
882         h = is->out_video_filter->inputs[0]->h;
883 #else
884     } else if (is->video_st && is->video_st->codec->width) {
885         w = is->video_st->codec->width;
886         h = is->video_st->codec->height;
887 #endif
888     } else {
889         w = 640;
890         h = 480;
891     }
892     if (screen && is->width == screen->w && screen->w == w
893        && is->height== screen->h && screen->h == h)
894         return 0;
895
896 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
897     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
898     screen = SDL_SetVideoMode(w, h, 24, flags);
899 #else
900     screen = SDL_SetVideoMode(w, h, 0, flags);
901 #endif
902     if (!screen) {
903         fprintf(stderr, "SDL: could not set video mode - exiting\n");
904         return -1;
905     }
906     if (!window_title)
907         window_title = input_filename;
908     SDL_WM_SetCaption(window_title, window_title);
909
910     is->width  = screen->w;
911     is->height = screen->h;
912
913     return 0;
914 }
915
916 /* display the current picture, if any */
917 static void video_display(VideoState *is)
918 {
919     if (!screen)
920         video_open(cur_stream);
921     if (is->audio_st && is->show_audio)
922         video_audio_display(is);
923     else if (is->video_st)
924         video_image_display(is);
925 }
926
927 static int refresh_thread(void *opaque)
928 {
929     VideoState *is= opaque;
930     while (!is->abort_request) {
931         SDL_Event event;
932         event.type = FF_REFRESH_EVENT;
933         event.user.data1 = opaque;
934         if (!is->refresh) {
935             is->refresh = 1;
936             SDL_PushEvent(&event);
937         }
938         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
939     }
940     return 0;
941 }
942
943 /* get the current audio clock value */
944 static double get_audio_clock(VideoState *is)
945 {
946     double pts;
947     int hw_buf_size, bytes_per_sec;
948     pts = is->audio_clock;
949     hw_buf_size = audio_write_get_buf_size(is);
950     bytes_per_sec = 0;
951     if (is->audio_st) {
952         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
953                         av_get_bytes_per_sample(is->sdl_sample_fmt);
954     }
955     if (bytes_per_sec)
956         pts -= (double)hw_buf_size / bytes_per_sec;
957     return pts;
958 }
959
960 /* get the current video clock value */
961 static double get_video_clock(VideoState *is)
962 {
963     if (is->paused) {
964         return is->video_current_pts;
965     } else {
966         return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
967     }
968 }
969
970 /* get the current external clock value */
971 static double get_external_clock(VideoState *is)
972 {
973     int64_t ti;
974     ti = av_gettime_relative();
975     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
976 }
977
978 /* get the current master clock value */
979 static double get_master_clock(VideoState *is)
980 {
981     double val;
982
983     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
984         if (is->video_st)
985             val = get_video_clock(is);
986         else
987             val = get_audio_clock(is);
988     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
989         if (is->audio_st)
990             val = get_audio_clock(is);
991         else
992             val = get_video_clock(is);
993     } else {
994         val = get_external_clock(is);
995     }
996     return val;
997 }
998
999 /* seek in the stream */
1000 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1001 {
1002     if (!is->seek_req) {
1003         is->seek_pos = pos;
1004         is->seek_rel = rel;
1005         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1006         if (seek_by_bytes)
1007             is->seek_flags |= AVSEEK_FLAG_BYTE;
1008         is->seek_req = 1;
1009     }
1010 }
1011
1012 /* pause or resume the video */
1013 static void stream_pause(VideoState *is)
1014 {
1015     if (is->paused) {
1016         is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1017         if (is->read_pause_return != AVERROR(ENOSYS)) {
1018             is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
1019         }
1020         is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
1021     }
1022     is->paused = !is->paused;
1023 }
1024
1025 static double compute_target_time(double frame_current_pts, VideoState *is)
1026 {
1027     double delay, sync_threshold, diff = 0;
1028
1029     /* compute nominal delay */
1030     delay = frame_current_pts - is->frame_last_pts;
1031     if (delay <= 0 || delay >= 10.0) {
1032         /* if incorrect delay, use previous one */
1033         delay = is->frame_last_delay;
1034     } else {
1035         is->frame_last_delay = delay;
1036     }
1037     is->frame_last_pts = frame_current_pts;
1038
1039     /* update delay to follow master synchronisation source */
1040     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1041          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1042         /* if video is slave, we try to correct big delays by
1043            duplicating or deleting a frame */
1044         diff = get_video_clock(is) - get_master_clock(is);
1045
1046         /* skip or repeat frame. We take into account the
1047            delay to compute the threshold. I still don't know
1048            if it is the best guess */
1049         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1050         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1051             if (diff <= -sync_threshold)
1052                 delay = 0;
1053             else if (diff >= sync_threshold)
1054                 delay = 2 * delay;
1055         }
1056     }
1057     is->frame_timer += delay;
1058
1059     av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1060             delay, frame_current_pts, -diff);
1061
1062     return is->frame_timer;
1063 }
1064
1065 /* called to display each frame */
1066 static void video_refresh_timer(void *opaque)
1067 {
1068     VideoState *is = opaque;
1069     VideoPicture *vp;
1070
1071     SubPicture *sp, *sp2;
1072
1073     if (is->video_st) {
1074 retry:
1075         if (is->pictq_size == 0) {
1076             // nothing to do, no picture to display in the que
1077         } else {
1078             double time = av_gettime_relative() / 1000000.0;
1079             double next_target;
1080             /* dequeue the picture */
1081             vp = &is->pictq[is->pictq_rindex];
1082
1083             if (time < vp->target_clock)
1084                 return;
1085             /* update current video pts */
1086             is->video_current_pts = vp->pts;
1087             is->video_current_pts_drift = is->video_current_pts - time;
1088             is->video_current_pos = vp->pos;
1089             if (is->pictq_size > 1) {
1090                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1091                 assert(nextvp->target_clock >= vp->target_clock);
1092                 next_target= nextvp->target_clock;
1093             } else {
1094                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1095             }
1096             if (framedrop && time > next_target) {
1097                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1098                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1099                     /* update queue size and signal for next picture */
1100                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1101                         is->pictq_rindex = 0;
1102
1103                     SDL_LockMutex(is->pictq_mutex);
1104                     is->pictq_size--;
1105                     SDL_CondSignal(is->pictq_cond);
1106                     SDL_UnlockMutex(is->pictq_mutex);
1107                     goto retry;
1108                 }
1109             }
1110
1111             if (is->subtitle_st) {
1112                 if (is->subtitle_stream_changed) {
1113                     SDL_LockMutex(is->subpq_mutex);
1114
1115                     while (is->subpq_size) {
1116                         free_subpicture(&is->subpq[is->subpq_rindex]);
1117
1118                         /* update queue size and signal for next picture */
1119                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1120                             is->subpq_rindex = 0;
1121
1122                         is->subpq_size--;
1123                     }
1124                     is->subtitle_stream_changed = 0;
1125
1126                     SDL_CondSignal(is->subpq_cond);
1127                     SDL_UnlockMutex(is->subpq_mutex);
1128                 } else {
1129                     if (is->subpq_size > 0) {
1130                         sp = &is->subpq[is->subpq_rindex];
1131
1132                         if (is->subpq_size > 1)
1133                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1134                         else
1135                             sp2 = NULL;
1136
1137                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1138                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1139                         {
1140                             free_subpicture(sp);
1141
1142                             /* update queue size and signal for next picture */
1143                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1144                                 is->subpq_rindex = 0;
1145
1146                             SDL_LockMutex(is->subpq_mutex);
1147                             is->subpq_size--;
1148                             SDL_CondSignal(is->subpq_cond);
1149                             SDL_UnlockMutex(is->subpq_mutex);
1150                         }
1151                     }
1152                 }
1153             }
1154
1155             /* display picture */
1156             if (!display_disable)
1157                 video_display(is);
1158
1159             /* update queue size and signal for next picture */
1160             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1161                 is->pictq_rindex = 0;
1162
1163             SDL_LockMutex(is->pictq_mutex);
1164             is->pictq_size--;
1165             SDL_CondSignal(is->pictq_cond);
1166             SDL_UnlockMutex(is->pictq_mutex);
1167         }
1168     } else if (is->audio_st) {
1169         /* draw the next audio frame */
1170
1171         /* if only audio stream, then display the audio bars (better
1172            than nothing, just to test the implementation */
1173
1174         /* display picture */
1175         if (!display_disable)
1176             video_display(is);
1177     }
1178     if (show_status) {
1179         static int64_t last_time;
1180         int64_t cur_time;
1181         int aqsize, vqsize, sqsize;
1182         double av_diff;
1183
1184         cur_time = av_gettime_relative();
1185         if (!last_time || (cur_time - last_time) >= 30000) {
1186             aqsize = 0;
1187             vqsize = 0;
1188             sqsize = 0;
1189             if (is->audio_st)
1190                 aqsize = is->audioq.size;
1191             if (is->video_st)
1192                 vqsize = is->videoq.size;
1193             if (is->subtitle_st)
1194                 sqsize = is->subtitleq.size;
1195             av_diff = 0;
1196             if (is->audio_st && is->video_st)
1197                 av_diff = get_audio_clock(is) - get_video_clock(is);
1198             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1199                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1200                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1201             fflush(stdout);
1202             last_time = cur_time;
1203         }
1204     }
1205 }
1206
1207 static void player_close(VideoState *is)
1208 {
1209     VideoPicture *vp;
1210     int i;
1211     /* XXX: use a special url_shutdown call to abort parse cleanly */
1212     is->abort_request = 1;
1213     SDL_WaitThread(is->parse_tid, NULL);
1214     SDL_WaitThread(is->refresh_tid, NULL);
1215
1216     /* free all pictures */
1217     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1218         vp = &is->pictq[i];
1219         if (vp->bmp) {
1220             SDL_FreeYUVOverlay(vp->bmp);
1221             vp->bmp = NULL;
1222         }
1223     }
1224     SDL_DestroyMutex(is->pictq_mutex);
1225     SDL_DestroyCond(is->pictq_cond);
1226     SDL_DestroyMutex(is->subpq_mutex);
1227     SDL_DestroyCond(is->subpq_cond);
1228 #if !CONFIG_AVFILTER
1229     if (is->img_convert_ctx)
1230         sws_freeContext(is->img_convert_ctx);
1231 #endif
1232     av_free(is);
1233 }
1234
1235 static void do_exit(void)
1236 {
1237     if (cur_stream) {
1238         player_close(cur_stream);
1239         cur_stream = NULL;
1240     }
1241     uninit_opts();
1242     avformat_network_deinit();
1243     if (show_status)
1244         printf("\n");
1245     SDL_Quit();
1246     av_log(NULL, AV_LOG_QUIET, "");
1247     exit(0);
1248 }
1249
1250 /* allocate a picture (needs to do that in main thread to avoid
1251    potential locking problems */
1252 static void alloc_picture(void *opaque)
1253 {
1254     VideoState *is = opaque;
1255     VideoPicture *vp;
1256
1257     vp = &is->pictq[is->pictq_windex];
1258
1259     if (vp->bmp)
1260         SDL_FreeYUVOverlay(vp->bmp);
1261
1262 #if CONFIG_AVFILTER
1263     vp->width   = is->out_video_filter->inputs[0]->w;
1264     vp->height  = is->out_video_filter->inputs[0]->h;
1265     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1266 #else
1267     vp->width   = is->video_st->codec->width;
1268     vp->height  = is->video_st->codec->height;
1269     vp->pix_fmt = is->video_st->codec->pix_fmt;
1270 #endif
1271
1272     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1273                                    SDL_YV12_OVERLAY,
1274                                    screen);
1275     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1276         /* SDL allocates a buffer smaller than requested if the video
1277          * overlay hardware is unable to support the requested size. */
1278         fprintf(stderr, "Error: the video system does not support an image\n"
1279                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1280                         "to reduce the image size.\n", vp->width, vp->height );
1281         do_exit();
1282     }
1283
1284     SDL_LockMutex(is->pictq_mutex);
1285     vp->allocated = 1;
1286     SDL_CondSignal(is->pictq_cond);
1287     SDL_UnlockMutex(is->pictq_mutex);
1288 }
1289
1290 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1291  * guessed if not known. */
1292 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1293 {
1294     VideoPicture *vp;
1295 #if !CONFIG_AVFILTER
1296     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1297 #endif
1298     /* wait until we have space to put a new picture */
1299     SDL_LockMutex(is->pictq_mutex);
1300
1301     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1302         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1303
1304     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1305            !is->videoq.abort_request) {
1306         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1307     }
1308     SDL_UnlockMutex(is->pictq_mutex);
1309
1310     if (is->videoq.abort_request)
1311         return -1;
1312
1313     vp = &is->pictq[is->pictq_windex];
1314
1315     vp->sar = src_frame->sample_aspect_ratio;
1316
1317     /* alloc or resize hardware picture buffer */
1318     if (!vp->bmp || vp->reallocate ||
1319 #if CONFIG_AVFILTER
1320         vp->width  != is->out_video_filter->inputs[0]->w ||
1321         vp->height != is->out_video_filter->inputs[0]->h) {
1322 #else
1323         vp->width != is->video_st->codec->width ||
1324         vp->height != is->video_st->codec->height) {
1325 #endif
1326         SDL_Event event;
1327
1328         vp->allocated  = 0;
1329         vp->reallocate = 0;
1330
1331         /* the allocation must be done in the main thread to avoid
1332            locking problems */
1333         event.type = FF_ALLOC_EVENT;
1334         event.user.data1 = is;
1335         SDL_PushEvent(&event);
1336
1337         /* wait until the picture is allocated */
1338         SDL_LockMutex(is->pictq_mutex);
1339         while (!vp->allocated && !is->videoq.abort_request) {
1340             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1341         }
1342         SDL_UnlockMutex(is->pictq_mutex);
1343
1344         if (is->videoq.abort_request)
1345             return -1;
1346     }
1347
1348     /* if the frame is not skipped, then display it */
1349     if (vp->bmp) {
1350         uint8_t *data[4];
1351         int linesize[4];
1352
1353         /* get a pointer on the bitmap */
1354         SDL_LockYUVOverlay (vp->bmp);
1355
1356         data[0] = vp->bmp->pixels[0];
1357         data[1] = vp->bmp->pixels[2];
1358         data[2] = vp->bmp->pixels[1];
1359
1360         linesize[0] = vp->bmp->pitches[0];
1361         linesize[1] = vp->bmp->pitches[2];
1362         linesize[2] = vp->bmp->pitches[1];
1363
1364 #if CONFIG_AVFILTER
1365         // FIXME use direct rendering
1366         av_image_copy(data, linesize, src_frame->data, src_frame->linesize,
1367                       vp->pix_fmt, vp->width, vp->height);
1368 #else
1369         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1370         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1371             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1372             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1373         if (!is->img_convert_ctx) {
1374             fprintf(stderr, "Cannot initialize the conversion context\n");
1375             exit(1);
1376         }
1377         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1378                   0, vp->height, data, linesize);
1379 #endif
1380         /* update the bitmap content */
1381         SDL_UnlockYUVOverlay(vp->bmp);
1382
1383         vp->pts = pts;
1384         vp->pos = pos;
1385
1386         /* now we can update the picture count */
1387         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1388             is->pictq_windex = 0;
1389         SDL_LockMutex(is->pictq_mutex);
1390         vp->target_clock = compute_target_time(vp->pts, is);
1391
1392         is->pictq_size++;
1393         SDL_UnlockMutex(is->pictq_mutex);
1394     }
1395     return 0;
1396 }
1397
1398 /* Compute the exact PTS for the picture if it is omitted in the stream.
1399  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1400 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1401 {
1402     double frame_delay, pts;
1403     int ret;
1404
1405     pts = pts1;
1406
1407     if (pts != 0) {
1408         /* update video clock with pts, if present */
1409         is->video_clock = pts;
1410     } else {
1411         pts = is->video_clock;
1412     }
1413     /* update video clock for next frame */
1414     frame_delay = av_q2d(is->video_st->codec->time_base);
1415     /* for MPEG2, the frame can be repeated, so we update the
1416        clock accordingly */
1417     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1418     is->video_clock += frame_delay;
1419
1420     ret = queue_picture(is, src_frame, pts, pos);
1421     av_frame_unref(src_frame);
1422     return ret;
1423 }
1424
1425 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1426 {
1427     int got_picture, i;
1428
1429     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1430         return -1;
1431
1432     if (pkt->data == flush_pkt.data) {
1433         avcodec_flush_buffers(is->video_st->codec);
1434
1435         SDL_LockMutex(is->pictq_mutex);
1436         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1437         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1438             is->pictq[i].target_clock= 0;
1439         }
1440         while (is->pictq_size && !is->videoq.abort_request) {
1441             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1442         }
1443         is->video_current_pos = -1;
1444         SDL_UnlockMutex(is->pictq_mutex);
1445
1446         init_pts_correction(&is->pts_ctx);
1447         is->frame_last_pts = AV_NOPTS_VALUE;
1448         is->frame_last_delay = 0;
1449         is->frame_timer = (double)av_gettime_relative() / 1000000.0;
1450         is->skip_frames = 1;
1451         is->skip_frames_index = 0;
1452         return 0;
1453     }
1454
1455     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1456
1457     if (got_picture) {
1458         if (decoder_reorder_pts == -1) {
1459             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1460         } else if (decoder_reorder_pts) {
1461             *pts = frame->pkt_pts;
1462         } else {
1463             *pts = frame->pkt_dts;
1464         }
1465
1466         if (*pts == AV_NOPTS_VALUE) {
1467             *pts = 0;
1468         }
1469         if (is->video_st->sample_aspect_ratio.num) {
1470             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1471         }
1472
1473         is->skip_frames_index += 1;
1474         if (is->skip_frames_index >= is->skip_frames) {
1475             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1476             return 1;
1477         }
1478         av_frame_unref(frame);
1479     }
1480     return 0;
1481 }
1482
1483 #if CONFIG_AVFILTER
1484 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1485 {
1486     char sws_flags_str[128];
1487     char buffersrc_args[256];
1488     int ret;
1489     AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
1490     AVCodecContext *codec = is->video_st->codec;
1491
1492     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1493     graph->scale_sws_opts = av_strdup(sws_flags_str);
1494
1495     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1496              codec->width, codec->height, codec->pix_fmt,
1497              is->video_st->time_base.num, is->video_st->time_base.den,
1498              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1499
1500
1501     if ((ret = avfilter_graph_create_filter(&filt_src,
1502                                             avfilter_get_by_name("buffer"),
1503                                             "src", buffersrc_args, NULL,
1504                                             graph)) < 0)
1505         return ret;
1506     if ((ret = avfilter_graph_create_filter(&filt_out,
1507                                             avfilter_get_by_name("buffersink"),
1508                                             "out", NULL, NULL, graph)) < 0)
1509         return ret;
1510
1511     last_filter = filt_out;
1512
1513 /* Note: this macro adds a filter before the lastly added filter, so the
1514  * processing order of the filters is in reverse */
1515 #define INSERT_FILT(name, arg) do {                                          \
1516     AVFilterContext *filt_ctx;                                               \
1517                                                                              \
1518     ret = avfilter_graph_create_filter(&filt_ctx,                            \
1519                                        avfilter_get_by_name(name),           \
1520                                        "avplay_" name, arg, NULL, graph);    \
1521     if (ret < 0)                                                             \
1522         return ret;                                                          \
1523                                                                              \
1524     ret = avfilter_link(filt_ctx, 0, last_filter, 0);                        \
1525     if (ret < 0)                                                             \
1526         return ret;                                                          \
1527                                                                              \
1528     last_filter = filt_ctx;                                                  \
1529 } while (0)
1530
1531     INSERT_FILT("format", "yuv420p");
1532
1533     if (autorotate) {
1534         uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
1535                                                          AV_PKT_DATA_DISPLAYMATRIX, NULL);
1536         if (displaymatrix) {
1537             double rot = av_display_rotation_get((int32_t*) displaymatrix);
1538             if (rot < -135 || rot > 135) {
1539                 INSERT_FILT("vflip", NULL);
1540                 INSERT_FILT("hflip", NULL);
1541             } else if (rot < -45) {
1542                 INSERT_FILT("transpose", "dir=clock");
1543             } else if (rot > 45) {
1544                 INSERT_FILT("transpose", "dir=cclock");
1545             }
1546         }
1547     }
1548
1549     if (vfilters) {
1550         AVFilterInOut *outputs = avfilter_inout_alloc();
1551         AVFilterInOut *inputs  = avfilter_inout_alloc();
1552
1553         outputs->name    = av_strdup("in");
1554         outputs->filter_ctx = filt_src;
1555         outputs->pad_idx = 0;
1556         outputs->next    = NULL;
1557
1558         inputs->name    = av_strdup("out");
1559         inputs->filter_ctx = last_filter;
1560         inputs->pad_idx = 0;
1561         inputs->next    = NULL;
1562
1563         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1564             return ret;
1565     } else {
1566         if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
1567             return ret;
1568     }
1569
1570     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1571         return ret;
1572
1573     is->in_video_filter  = filt_src;
1574     is->out_video_filter = filt_out;
1575
1576     return ret;
1577 }
1578
1579 #endif  /* CONFIG_AVFILTER */
1580
1581 static int video_thread(void *arg)
1582 {
1583     AVPacket pkt = { 0 };
1584     VideoState *is = arg;
1585     AVFrame *frame = av_frame_alloc();
1586     int64_t pts_int;
1587     double pts;
1588     int ret;
1589
1590 #if CONFIG_AVFILTER
1591     AVFilterGraph *graph = avfilter_graph_alloc();
1592     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1593     int last_w = is->video_st->codec->width;
1594     int last_h = is->video_st->codec->height;
1595     if (!graph) {
1596         av_frame_free(&frame);
1597         return AVERROR(ENOMEM);
1598     }
1599
1600     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1601         goto the_end;
1602     filt_in  = is->in_video_filter;
1603     filt_out = is->out_video_filter;
1604 #endif
1605
1606     if (!frame) {
1607 #if CONFIG_AVFILTER
1608         avfilter_graph_free(&graph);
1609 #endif
1610         return AVERROR(ENOMEM);
1611     }
1612
1613     for (;;) {
1614 #if CONFIG_AVFILTER
1615         AVRational tb;
1616 #endif
1617         while (is->paused && !is->videoq.abort_request)
1618             SDL_Delay(10);
1619
1620         av_packet_unref(&pkt);
1621
1622         ret = get_video_frame(is, frame, &pts_int, &pkt);
1623         if (ret < 0)
1624             goto the_end;
1625
1626         if (!ret)
1627             continue;
1628
1629 #if CONFIG_AVFILTER
1630         if (   last_w != is->video_st->codec->width
1631             || last_h != is->video_st->codec->height) {
1632             av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1633                     is->video_st->codec->width, is->video_st->codec->height);
1634             avfilter_graph_free(&graph);
1635             graph = avfilter_graph_alloc();
1636             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1637                 goto the_end;
1638             filt_in  = is->in_video_filter;
1639             filt_out = is->out_video_filter;
1640             last_w = is->video_st->codec->width;
1641             last_h = is->video_st->codec->height;
1642         }
1643
1644         frame->pts = pts_int;
1645         ret = av_buffersrc_add_frame(filt_in, frame);
1646         if (ret < 0)
1647             goto the_end;
1648
1649         while (ret >= 0) {
1650             ret = av_buffersink_get_frame(filt_out, frame);
1651             if (ret < 0) {
1652                 ret = 0;
1653                 break;
1654             }
1655
1656             pts_int = frame->pts;
1657             tb      = filt_out->inputs[0]->time_base;
1658             if (av_cmp_q(tb, is->video_st->time_base)) {
1659                 av_unused int64_t pts1 = pts_int;
1660                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1661                 av_log(NULL, AV_LOG_TRACE, "video_thread(): "
1662                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1663                         tb.num, tb.den, pts1,
1664                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1665             }
1666             pts = pts_int * av_q2d(is->video_st->time_base);
1667             ret = output_picture2(is, frame, pts, 0);
1668         }
1669 #else
1670         pts = pts_int * av_q2d(is->video_st->time_base);
1671         ret = output_picture2(is, frame, pts,  pkt.pos);
1672 #endif
1673
1674         if (ret < 0)
1675             goto the_end;
1676
1677
1678         if (step)
1679             if (cur_stream)
1680                 stream_pause(cur_stream);
1681     }
1682  the_end:
1683 #if CONFIG_AVFILTER
1684     av_freep(&vfilters);
1685     avfilter_graph_free(&graph);
1686 #endif
1687     av_packet_unref(&pkt);
1688     av_frame_free(&frame);
1689     return 0;
1690 }
1691
1692 static int subtitle_thread(void *arg)
1693 {
1694     VideoState *is = arg;
1695     SubPicture *sp;
1696     AVPacket pkt1, *pkt = &pkt1;
1697     int got_subtitle;
1698     double pts;
1699     int i, j;
1700     int r, g, b, y, u, v, a;
1701
1702     for (;;) {
1703         while (is->paused && !is->subtitleq.abort_request) {
1704             SDL_Delay(10);
1705         }
1706         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1707             break;
1708
1709         if (pkt->data == flush_pkt.data) {
1710             avcodec_flush_buffers(is->subtitle_st->codec);
1711             continue;
1712         }
1713         SDL_LockMutex(is->subpq_mutex);
1714         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1715                !is->subtitleq.abort_request) {
1716             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1717         }
1718         SDL_UnlockMutex(is->subpq_mutex);
1719
1720         if (is->subtitleq.abort_request)
1721             return 0;
1722
1723         sp = &is->subpq[is->subpq_windex];
1724
1725        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1726            this packet, if any */
1727         pts = 0;
1728         if (pkt->pts != AV_NOPTS_VALUE)
1729             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1730
1731         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1732                                  &got_subtitle, pkt);
1733
1734         if (got_subtitle && sp->sub.format == 0) {
1735             sp->pts = pts;
1736
1737             for (i = 0; i < sp->sub.num_rects; i++)
1738             {
1739                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1740                 {
1741                     RGBA_IN(r, g, b, a, (uint32_t *)sp->sub.rects[i]->data[1] + j);
1742                     y = RGB_TO_Y_CCIR(r, g, b);
1743                     u = RGB_TO_U_CCIR(r, g, b, 0);
1744                     v = RGB_TO_V_CCIR(r, g, b, 0);
1745                     YUVA_OUT((uint32_t *)sp->sub.rects[i]->data[1] + j, y, u, v, a);
1746                 }
1747             }
1748
1749             /* now we can update the picture count */
1750             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1751                 is->subpq_windex = 0;
1752             SDL_LockMutex(is->subpq_mutex);
1753             is->subpq_size++;
1754             SDL_UnlockMutex(is->subpq_mutex);
1755         }
1756         av_packet_unref(pkt);
1757     }
1758     return 0;
1759 }
1760
1761 /* copy samples for viewing in editor window */
1762 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1763 {
1764     int size, len;
1765
1766     size = samples_size / sizeof(short);
1767     while (size > 0) {
1768         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1769         if (len > size)
1770             len = size;
1771         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1772         samples += len;
1773         is->sample_array_index += len;
1774         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1775             is->sample_array_index = 0;
1776         size -= len;
1777     }
1778 }
1779
1780 /* return the new audio buffer size (samples can be added or deleted
1781    to get better sync if video or external master clock) */
1782 static int synchronize_audio(VideoState *is, short *samples,
1783                              int samples_size1, double pts)
1784 {
1785     int n, samples_size;
1786     double ref_clock;
1787
1788     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1789     samples_size = samples_size1;
1790
1791     /* if not master, then we try to remove or add samples to correct the clock */
1792     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1793          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1794         double diff, avg_diff;
1795         int wanted_size, min_size, max_size, nb_samples;
1796
1797         ref_clock = get_master_clock(is);
1798         diff = get_audio_clock(is) - ref_clock;
1799
1800         if (diff < AV_NOSYNC_THRESHOLD) {
1801             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1802             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1803                 /* not enough measures to have a correct estimate */
1804                 is->audio_diff_avg_count++;
1805             } else {
1806                 /* estimate the A-V difference */
1807                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1808
1809                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1810                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1811                     nb_samples = samples_size / n;
1812
1813                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1814                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1815                     if (wanted_size < min_size)
1816                         wanted_size = min_size;
1817                     else if (wanted_size > max_size)
1818                         wanted_size = max_size;
1819
1820                     /* add or remove samples to correction the synchro */
1821                     if (wanted_size < samples_size) {
1822                         /* remove samples */
1823                         samples_size = wanted_size;
1824                     } else if (wanted_size > samples_size) {
1825                         uint8_t *samples_end, *q;
1826                         int nb;
1827
1828                         /* add samples */
1829                         nb = (samples_size - wanted_size);
1830                         samples_end = (uint8_t *)samples + samples_size - n;
1831                         q = samples_end + n;
1832                         while (nb > 0) {
1833                             memcpy(q, samples_end, n);
1834                             q += n;
1835                             nb -= n;
1836                         }
1837                         samples_size = wanted_size;
1838                     }
1839                 }
1840                 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1841                         diff, avg_diff, samples_size - samples_size1,
1842                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1843             }
1844         } else {
1845             /* too big difference : may be initial PTS errors, so
1846                reset A-V filter */
1847             is->audio_diff_avg_count = 0;
1848             is->audio_diff_cum       = 0;
1849         }
1850     }
1851
1852     return samples_size;
1853 }
1854
1855 /* decode one audio frame and returns its uncompressed size */
1856 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1857 {
1858     AVPacket *pkt_temp = &is->audio_pkt_temp;
1859     AVPacket *pkt = &is->audio_pkt;
1860     AVCodecContext *dec = is->audio_st->codec;
1861     int n, len1, data_size, got_frame;
1862     double pts;
1863     int new_packet = 0;
1864     int flush_complete = 0;
1865
1866     for (;;) {
1867         /* NOTE: the audio packet can contain several frames */
1868         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1869             int resample_changed, audio_resample;
1870
1871             if (!is->frame) {
1872                 if (!(is->frame = av_frame_alloc()))
1873                     return AVERROR(ENOMEM);
1874             }
1875
1876             if (flush_complete)
1877                 break;
1878             new_packet = 0;
1879             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1880             if (len1 < 0) {
1881                 /* if error, we skip the frame */
1882                 pkt_temp->size = 0;
1883                 break;
1884             }
1885
1886             pkt_temp->data += len1;
1887             pkt_temp->size -= len1;
1888
1889             if (!got_frame) {
1890                 /* stop sending empty packets if the decoder is finished */
1891                 if (!pkt_temp->data && (dec->codec->capabilities & AV_CODEC_CAP_DELAY))
1892                     flush_complete = 1;
1893                 continue;
1894             }
1895             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1896                                                    is->frame->nb_samples,
1897                                                    is->frame->format, 1);
1898
1899             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1900                              is->frame->channel_layout != is->sdl_channel_layout ||
1901                              is->frame->sample_rate    != is->sdl_sample_rate;
1902
1903             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1904                                is->frame->channel_layout != is->resample_channel_layout ||
1905                                is->frame->sample_rate    != is->resample_sample_rate;
1906
1907             if ((!is->avr && audio_resample) || resample_changed) {
1908                 int ret;
1909                 if (is->avr)
1910                     avresample_close(is->avr);
1911                 else if (audio_resample) {
1912                     is->avr = avresample_alloc_context();
1913                     if (!is->avr) {
1914                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1915                         break;
1916                     }
1917                 }
1918                 if (audio_resample) {
1919                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1920                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1921                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1922                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1923                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1924                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1925
1926                     if ((ret = avresample_open(is->avr)) < 0) {
1927                         fprintf(stderr, "error initializing libavresample\n");
1928                         break;
1929                     }
1930                 }
1931                 is->resample_sample_fmt     = is->frame->format;
1932                 is->resample_channel_layout = is->frame->channel_layout;
1933                 is->resample_sample_rate    = is->frame->sample_rate;
1934             }
1935
1936             if (audio_resample) {
1937                 void *tmp_out;
1938                 int out_samples, out_size, out_linesize;
1939                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1940                 int nb_samples = is->frame->nb_samples;
1941
1942                 out_size = av_samples_get_buffer_size(&out_linesize,
1943                                                       is->sdl_channels,
1944                                                       nb_samples,
1945                                                       is->sdl_sample_fmt, 0);
1946                 tmp_out = av_realloc(is->audio_buf1, out_size);
1947                 if (!tmp_out)
1948                     return AVERROR(ENOMEM);
1949                 is->audio_buf1 = tmp_out;
1950
1951                 out_samples = avresample_convert(is->avr,
1952                                                  &is->audio_buf1,
1953                                                  out_linesize, nb_samples,
1954                                                  is->frame->data,
1955                                                  is->frame->linesize[0],
1956                                                  is->frame->nb_samples);
1957                 if (out_samples < 0) {
1958                     fprintf(stderr, "avresample_convert() failed\n");
1959                     break;
1960                 }
1961                 is->audio_buf = is->audio_buf1;
1962                 data_size = out_samples * osize * is->sdl_channels;
1963             } else {
1964                 is->audio_buf = is->frame->data[0];
1965             }
1966
1967             /* if no pts, then compute it */
1968             pts = is->audio_clock;
1969             *pts_ptr = pts;
1970             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1971             is->audio_clock += (double)data_size /
1972                 (double)(n * is->sdl_sample_rate);
1973 #ifdef DEBUG
1974             {
1975                 static double last_clock;
1976                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1977                        is->audio_clock - last_clock,
1978                        is->audio_clock, pts);
1979                 last_clock = is->audio_clock;
1980             }
1981 #endif
1982             return data_size;
1983         }
1984
1985         /* free the current packet */
1986         if (pkt->data)
1987             av_packet_unref(pkt);
1988         memset(pkt_temp, 0, sizeof(*pkt_temp));
1989
1990         if (is->paused || is->audioq.abort_request) {
1991             return -1;
1992         }
1993
1994         /* read next packet */
1995         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1996             return -1;
1997
1998         if (pkt->data == flush_pkt.data) {
1999             avcodec_flush_buffers(dec);
2000             flush_complete = 0;
2001         }
2002
2003         *pkt_temp = *pkt;
2004
2005         /* if update the audio clock with the pts */
2006         if (pkt->pts != AV_NOPTS_VALUE) {
2007             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2008         }
2009     }
2010 }
2011
2012 /* prepare a new audio buffer */
2013 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2014 {
2015     VideoState *is = opaque;
2016     int audio_size, len1;
2017     double pts;
2018
2019     audio_callback_time = av_gettime_relative();
2020
2021     while (len > 0) {
2022         if (is->audio_buf_index >= is->audio_buf_size) {
2023            audio_size = audio_decode_frame(is, &pts);
2024            if (audio_size < 0) {
2025                 /* if error, just output silence */
2026                is->audio_buf      = is->silence_buf;
2027                is->audio_buf_size = sizeof(is->silence_buf);
2028            } else {
2029                if (is->show_audio)
2030                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2031                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2032                                               pts);
2033                is->audio_buf_size = audio_size;
2034            }
2035            is->audio_buf_index = 0;
2036         }
2037         len1 = is->audio_buf_size - is->audio_buf_index;
2038         if (len1 > len)
2039             len1 = len;
2040         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2041         len -= len1;
2042         stream += len1;
2043         is->audio_buf_index += len1;
2044     }
2045 }
2046
2047 /* open a given stream. Return 0 if OK */
2048 static int stream_component_open(VideoState *is, int stream_index)
2049 {
2050     AVFormatContext *ic = is->ic;
2051     AVCodecContext *avctx;
2052     AVCodec *codec;
2053     SDL_AudioSpec wanted_spec, spec;
2054     AVDictionary *opts;
2055     AVDictionaryEntry *t = NULL;
2056     int ret = 0;
2057
2058     if (stream_index < 0 || stream_index >= ic->nb_streams)
2059         return -1;
2060     avctx = ic->streams[stream_index]->codec;
2061
2062     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2063
2064     codec = avcodec_find_decoder(avctx->codec_id);
2065     avctx->workaround_bugs   = workaround_bugs;
2066     avctx->idct_algo         = idct;
2067     avctx->skip_frame        = skip_frame;
2068     avctx->skip_idct         = skip_idct;
2069     avctx->skip_loop_filter  = skip_loop_filter;
2070     avctx->error_concealment = error_concealment;
2071
2072     if (fast)
2073         avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2074
2075     if (!av_dict_get(opts, "threads", NULL, 0))
2076         av_dict_set(&opts, "threads", "auto", 0);
2077     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2078         av_dict_set(&opts, "refcounted_frames", "1", 0);
2079     if (!codec ||
2080         (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2081         goto fail;
2082     }
2083     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2084         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2085         ret =  AVERROR_OPTION_NOT_FOUND;
2086         goto fail;
2087     }
2088
2089     /* prepare audio output */
2090     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2091         is->sdl_sample_rate = avctx->sample_rate;
2092
2093         if (!avctx->channel_layout)
2094             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2095         if (!avctx->channel_layout) {
2096             fprintf(stderr, "unable to guess channel layout\n");
2097             ret = AVERROR_INVALIDDATA;
2098             goto fail;
2099         }
2100         if (avctx->channels == 1)
2101             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2102         else
2103             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2104         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2105
2106         wanted_spec.format = AUDIO_S16SYS;
2107         wanted_spec.freq = is->sdl_sample_rate;
2108         wanted_spec.channels = is->sdl_channels;
2109         wanted_spec.silence = 0;
2110         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2111         wanted_spec.callback = sdl_audio_callback;
2112         wanted_spec.userdata = is;
2113         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2114             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2115             ret = AVERROR_UNKNOWN;
2116             goto fail;
2117         }
2118         is->audio_hw_buf_size = spec.size;
2119         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2120         is->resample_sample_fmt     = is->sdl_sample_fmt;
2121         is->resample_channel_layout = avctx->channel_layout;
2122         is->resample_sample_rate    = avctx->sample_rate;
2123     }
2124
2125     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2126     switch (avctx->codec_type) {
2127     case AVMEDIA_TYPE_AUDIO:
2128         is->audio_stream = stream_index;
2129         is->audio_st = ic->streams[stream_index];
2130         is->audio_buf_size  = 0;
2131         is->audio_buf_index = 0;
2132
2133         /* init averaging filter */
2134         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2135         is->audio_diff_avg_count = 0;
2136         /* since we do not have a precise anough audio fifo fullness,
2137            we correct audio sync only if larger than this threshold */
2138         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2139
2140         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2141         packet_queue_init(&is->audioq);
2142         SDL_PauseAudio(0);
2143         break;
2144     case AVMEDIA_TYPE_VIDEO:
2145         is->video_stream = stream_index;
2146         is->video_st = ic->streams[stream_index];
2147
2148         packet_queue_init(&is->videoq);
2149         is->video_tid = SDL_CreateThread(video_thread, is);
2150         break;
2151     case AVMEDIA_TYPE_SUBTITLE:
2152         is->subtitle_stream = stream_index;
2153         is->subtitle_st = ic->streams[stream_index];
2154         packet_queue_init(&is->subtitleq);
2155
2156         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2157         break;
2158     default:
2159         break;
2160     }
2161
2162 fail:
2163     av_dict_free(&opts);
2164
2165     return ret;
2166 }
2167
2168 static void stream_component_close(VideoState *is, int stream_index)
2169 {
2170     AVFormatContext *ic = is->ic;
2171     AVCodecContext *avctx;
2172
2173     if (stream_index < 0 || stream_index >= ic->nb_streams)
2174         return;
2175     avctx = ic->streams[stream_index]->codec;
2176
2177     switch (avctx->codec_type) {
2178     case AVMEDIA_TYPE_AUDIO:
2179         packet_queue_abort(&is->audioq);
2180
2181         SDL_CloseAudio();
2182
2183         packet_queue_end(&is->audioq);
2184         av_packet_unref(&is->audio_pkt);
2185         if (is->avr)
2186             avresample_free(&is->avr);
2187         av_freep(&is->audio_buf1);
2188         is->audio_buf = NULL;
2189         av_frame_free(&is->frame);
2190
2191         if (is->rdft) {
2192             av_rdft_end(is->rdft);
2193             av_freep(&is->rdft_data);
2194             is->rdft = NULL;
2195             is->rdft_bits = 0;
2196         }
2197         break;
2198     case AVMEDIA_TYPE_VIDEO:
2199         packet_queue_abort(&is->videoq);
2200
2201         /* note: we also signal this mutex to make sure we deblock the
2202            video thread in all cases */
2203         SDL_LockMutex(is->pictq_mutex);
2204         SDL_CondSignal(is->pictq_cond);
2205         SDL_UnlockMutex(is->pictq_mutex);
2206
2207         SDL_WaitThread(is->video_tid, NULL);
2208
2209         packet_queue_end(&is->videoq);
2210         break;
2211     case AVMEDIA_TYPE_SUBTITLE:
2212         packet_queue_abort(&is->subtitleq);
2213
2214         /* note: we also signal this mutex to make sure we deblock the
2215            video thread in all cases */
2216         SDL_LockMutex(is->subpq_mutex);
2217         is->subtitle_stream_changed = 1;
2218
2219         SDL_CondSignal(is->subpq_cond);
2220         SDL_UnlockMutex(is->subpq_mutex);
2221
2222         SDL_WaitThread(is->subtitle_tid, NULL);
2223
2224         packet_queue_end(&is->subtitleq);
2225         break;
2226     default:
2227         break;
2228     }
2229
2230     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2231     avcodec_close(avctx);
2232     switch (avctx->codec_type) {
2233     case AVMEDIA_TYPE_AUDIO:
2234         is->audio_st = NULL;
2235         is->audio_stream = -1;
2236         break;
2237     case AVMEDIA_TYPE_VIDEO:
2238         is->video_st = NULL;
2239         is->video_stream = -1;
2240         break;
2241     case AVMEDIA_TYPE_SUBTITLE:
2242         is->subtitle_st = NULL;
2243         is->subtitle_stream = -1;
2244         break;
2245     default:
2246         break;
2247     }
2248 }
2249
2250 /* since we have only one decoding thread, we can use a global
2251    variable instead of a thread local variable */
2252 static VideoState *global_video_state;
2253
2254 static int decode_interrupt_cb(void *ctx)
2255 {
2256     return global_video_state && global_video_state->abort_request;
2257 }
2258
2259 static void stream_close(VideoState *is)
2260 {
2261     /* disable interrupting */
2262     global_video_state = NULL;
2263
2264     /* close each stream */
2265     if (is->audio_stream >= 0)
2266         stream_component_close(is, is->audio_stream);
2267     if (is->video_stream >= 0)
2268         stream_component_close(is, is->video_stream);
2269     if (is->subtitle_stream >= 0)
2270         stream_component_close(is, is->subtitle_stream);
2271     if (is->ic) {
2272         avformat_close_input(&is->ic);
2273     }
2274 }
2275
2276 static int stream_setup(VideoState *is)
2277 {
2278     AVFormatContext *ic = NULL;
2279     int err, i, ret;
2280     int st_index[AVMEDIA_TYPE_NB];
2281     AVDictionaryEntry *t;
2282     AVDictionary **opts;
2283     int orig_nb_streams;
2284
2285     memset(st_index, -1, sizeof(st_index));
2286     is->video_stream = -1;
2287     is->audio_stream = -1;
2288     is->subtitle_stream = -1;
2289
2290     global_video_state = is;
2291
2292     ic = avformat_alloc_context();
2293     if (!ic) {
2294         av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2295         ret = AVERROR(ENOMEM);
2296         goto fail;
2297     }
2298     ic->interrupt_callback.callback = decode_interrupt_cb;
2299     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2300     if (err < 0) {
2301         print_error(is->filename, err);
2302         ret = -1;
2303         goto fail;
2304     }
2305     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2306         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2307         ret = AVERROR_OPTION_NOT_FOUND;
2308         goto fail;
2309     }
2310     is->ic = ic;
2311
2312     if (genpts)
2313         ic->flags |= AVFMT_FLAG_GENPTS;
2314
2315     opts = setup_find_stream_info_opts(ic, codec_opts);
2316     orig_nb_streams = ic->nb_streams;
2317
2318     err = avformat_find_stream_info(ic, opts);
2319
2320     for (i = 0; i < orig_nb_streams; i++)
2321         av_dict_free(&opts[i]);
2322     av_freep(&opts);
2323
2324     if (err < 0) {
2325         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2326         ret = -1;
2327         goto fail;
2328     }
2329
2330     if (ic->pb)
2331         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2332
2333     if (seek_by_bytes < 0)
2334         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2335
2336     /* if seeking requested, we execute it */
2337     if (start_time != AV_NOPTS_VALUE) {
2338         int64_t timestamp;
2339
2340         timestamp = start_time;
2341         /* add the stream start time */
2342         if (ic->start_time != AV_NOPTS_VALUE)
2343             timestamp += ic->start_time;
2344         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2345         if (ret < 0) {
2346             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2347                     is->filename, (double)timestamp / AV_TIME_BASE);
2348         }
2349     }
2350
2351     for (i = 0; i < ic->nb_streams; i++)
2352         ic->streams[i]->discard = AVDISCARD_ALL;
2353     if (!video_disable)
2354         st_index[AVMEDIA_TYPE_VIDEO] =
2355             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2356                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2357     if (!audio_disable)
2358         st_index[AVMEDIA_TYPE_AUDIO] =
2359             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2360                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2361                                 st_index[AVMEDIA_TYPE_VIDEO],
2362                                 NULL, 0);
2363     if (!video_disable)
2364         st_index[AVMEDIA_TYPE_SUBTITLE] =
2365             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2366                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2367                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2368                                  st_index[AVMEDIA_TYPE_AUDIO] :
2369                                  st_index[AVMEDIA_TYPE_VIDEO]),
2370                                 NULL, 0);
2371     if (show_status) {
2372         av_dump_format(ic, 0, is->filename, 0);
2373     }
2374
2375     /* open the streams */
2376     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2377         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2378     }
2379
2380     ret = -1;
2381     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2382         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2383     }
2384     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2385     if (ret < 0) {
2386         if (!display_disable)
2387             is->show_audio = 2;
2388     }
2389
2390     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2391         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2392     }
2393
2394     if (is->video_stream < 0 && is->audio_stream < 0) {
2395         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2396         ret = -1;
2397         goto fail;
2398     }
2399
2400     return 0;
2401
2402 fail:
2403     stream_close(is);
2404
2405     return ret;
2406 }
2407
2408 /* this thread gets the stream from the disk or the network */
2409 static int decode_thread(void *arg)
2410 {
2411     VideoState *is        = arg;
2412     AVPacket pkt1, *pkt   = &pkt1;
2413     AVFormatContext *ic   = is->ic;
2414     int pkt_in_play_range = 0;
2415     int ret, eof          = 0;
2416
2417     for (;;) {
2418         if (is->abort_request)
2419             break;
2420         if (is->paused != is->last_paused) {
2421             is->last_paused = is->paused;
2422             if (is->paused)
2423                 is->read_pause_return = av_read_pause(ic);
2424             else
2425                 av_read_play(ic);
2426         }
2427 #if CONFIG_RTSP_DEMUXER
2428         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2429             /* wait 10 ms to avoid trying to get another packet */
2430             /* XXX: horrible */
2431             SDL_Delay(10);
2432             continue;
2433         }
2434 #endif
2435         if (is->seek_req) {
2436             int64_t seek_target = is->seek_pos;
2437             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2438             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2439 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2440 //      of the seek_pos/seek_rel variables
2441
2442             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2443             if (ret < 0) {
2444                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2445             } else {
2446                 if (is->audio_stream >= 0) {
2447                     packet_queue_flush(&is->audioq);
2448                     packet_queue_put(&is->audioq, &flush_pkt);
2449                 }
2450                 if (is->subtitle_stream >= 0) {
2451                     packet_queue_flush(&is->subtitleq);
2452                     packet_queue_put(&is->subtitleq, &flush_pkt);
2453                 }
2454                 if (is->video_stream >= 0) {
2455                     packet_queue_flush(&is->videoq);
2456                     packet_queue_put(&is->videoq, &flush_pkt);
2457                 }
2458             }
2459             is->seek_req = 0;
2460             eof = 0;
2461         }
2462
2463         /* if the queue are full, no need to read more */
2464         if (!infinite_buffer &&
2465               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2466             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2467                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2468                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2469             /* wait 10 ms */
2470             SDL_Delay(10);
2471             continue;
2472         }
2473         if (eof) {
2474             if (is->video_stream >= 0) {
2475                 av_init_packet(pkt);
2476                 pkt->data = NULL;
2477                 pkt->size = 0;
2478                 pkt->stream_index = is->video_stream;
2479                 packet_queue_put(&is->videoq, pkt);
2480             }
2481             if (is->audio_stream >= 0 &&
2482                 (is->audio_st->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) {
2483                 av_init_packet(pkt);
2484                 pkt->data = NULL;
2485                 pkt->size = 0;
2486                 pkt->stream_index = is->audio_stream;
2487                 packet_queue_put(&is->audioq, pkt);
2488             }
2489             SDL_Delay(10);
2490             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2491                 if (loop != 1 && (!loop || --loop)) {
2492                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2493                 } else if (!noautoexit) {
2494                     ret = AVERROR_EOF;
2495                     goto fail;
2496                 }
2497             }
2498             continue;
2499         }
2500         ret = av_read_frame(ic, pkt);
2501         if (ret < 0) {
2502             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2503                 eof = 1;
2504             if (ic->pb && ic->pb->error)
2505                 break;
2506             SDL_Delay(100); /* wait for user event */
2507             continue;
2508         }
2509         /* check if packet is in play range specified by user, then queue, otherwise discard */
2510         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2511                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2512                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2513                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2514                 <= ((double)duration / 1000000);
2515         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2516             packet_queue_put(&is->audioq, pkt);
2517         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2518             packet_queue_put(&is->videoq, pkt);
2519         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2520             packet_queue_put(&is->subtitleq, pkt);
2521         } else {
2522             av_packet_unref(pkt);
2523         }
2524     }
2525     /* wait until the end */
2526     while (!is->abort_request) {
2527         SDL_Delay(100);
2528     }
2529
2530     ret = 0;
2531
2532 fail:
2533     stream_close(is);
2534
2535     if (ret != 0) {
2536         SDL_Event event;
2537
2538         event.type = FF_QUIT_EVENT;
2539         event.user.data1 = is;
2540         SDL_PushEvent(&event);
2541     }
2542     return 0;
2543 }
2544
2545 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2546 {
2547     VideoState *is;
2548
2549     is = av_mallocz(sizeof(VideoState));
2550     if (!is)
2551         return NULL;
2552     av_strlcpy(is->filename, filename, sizeof(is->filename));
2553     is->iformat = iformat;
2554     is->ytop    = 0;
2555     is->xleft   = 0;
2556
2557     if (stream_setup(is) < 0) {
2558         av_free(is);
2559         return NULL;
2560     }
2561
2562     /* start video display */
2563     is->pictq_mutex = SDL_CreateMutex();
2564     is->pictq_cond  = SDL_CreateCond();
2565
2566     is->subpq_mutex = SDL_CreateMutex();
2567     is->subpq_cond  = SDL_CreateCond();
2568
2569     is->av_sync_type = av_sync_type;
2570     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2571     if (!is->parse_tid) {
2572         av_free(is);
2573         return NULL;
2574     }
2575     return is;
2576 }
2577
2578 static void stream_cycle_channel(VideoState *is, int codec_type)
2579 {
2580     AVFormatContext *ic = is->ic;
2581     int start_index, stream_index;
2582     AVStream *st;
2583
2584     if (codec_type == AVMEDIA_TYPE_VIDEO)
2585         start_index = is->video_stream;
2586     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2587         start_index = is->audio_stream;
2588     else
2589         start_index = is->subtitle_stream;
2590     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2591         return;
2592     stream_index = start_index;
2593     for (;;) {
2594         if (++stream_index >= is->ic->nb_streams)
2595         {
2596             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2597             {
2598                 stream_index = -1;
2599                 goto the_end;
2600             } else
2601                 stream_index = 0;
2602         }
2603         if (stream_index == start_index)
2604             return;
2605         st = ic->streams[stream_index];
2606         if (st->codec->codec_type == codec_type) {
2607             /* check that parameters are OK */
2608             switch (codec_type) {
2609             case AVMEDIA_TYPE_AUDIO:
2610                 if (st->codec->sample_rate != 0 &&
2611                     st->codec->channels != 0)
2612                     goto the_end;
2613                 break;
2614             case AVMEDIA_TYPE_VIDEO:
2615             case AVMEDIA_TYPE_SUBTITLE:
2616                 goto the_end;
2617             default:
2618                 break;
2619             }
2620         }
2621     }
2622  the_end:
2623     stream_component_close(is, start_index);
2624     stream_component_open(is, stream_index);
2625 }
2626
2627
2628 static void toggle_full_screen(void)
2629 {
2630 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2631     /* OS X needs to empty the picture_queue */
2632     int i;
2633     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2634         cur_stream->pictq[i].reallocate = 1;
2635 #endif
2636     is_full_screen = !is_full_screen;
2637     video_open(cur_stream);
2638 }
2639
2640 static void toggle_pause(void)
2641 {
2642     if (cur_stream)
2643         stream_pause(cur_stream);
2644     step = 0;
2645 }
2646
2647 static void step_to_next_frame(void)
2648 {
2649     if (cur_stream) {
2650         /* if the stream is paused unpause it, then step */
2651         if (cur_stream->paused)
2652             stream_pause(cur_stream);
2653     }
2654     step = 1;
2655 }
2656
2657 static void toggle_audio_display(void)
2658 {
2659     if (cur_stream) {
2660         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2661         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2662         fill_rectangle(screen,
2663                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2664                        bgcolor);
2665         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2666     }
2667 }
2668
2669 static void seek_chapter(VideoState *is, int incr)
2670 {
2671     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2672     int i;
2673
2674     if (!is->ic->nb_chapters)
2675         return;
2676
2677     /* find the current chapter */
2678     for (i = 0; i < is->ic->nb_chapters; i++) {
2679         AVChapter *ch = is->ic->chapters[i];
2680         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2681             i--;
2682             break;
2683         }
2684     }
2685
2686     i += incr;
2687     i = FFMAX(i, 0);
2688     if (i >= is->ic->nb_chapters)
2689         return;
2690
2691     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2692     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2693                                  AV_TIME_BASE_Q), 0, 0);
2694 }
2695
2696 /* handle an event sent by the GUI */
2697 static void event_loop(void)
2698 {
2699     SDL_Event event;
2700     double incr, pos, frac;
2701
2702     for (;;) {
2703         double x;
2704         SDL_WaitEvent(&event);
2705         switch (event.type) {
2706         case SDL_KEYDOWN:
2707             if (exit_on_keydown) {
2708                 do_exit();
2709                 break;
2710             }
2711             switch (event.key.keysym.sym) {
2712             case SDLK_ESCAPE:
2713             case SDLK_q:
2714                 do_exit();
2715                 break;
2716             case SDLK_f:
2717                 toggle_full_screen();
2718                 break;
2719             case SDLK_p:
2720             case SDLK_SPACE:
2721                 toggle_pause();
2722                 break;
2723             case SDLK_s: // S: Step to next frame
2724                 step_to_next_frame();
2725                 break;
2726             case SDLK_a:
2727                 if (cur_stream)
2728                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2729                 break;
2730             case SDLK_v:
2731                 if (cur_stream)
2732                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2733                 break;
2734             case SDLK_t:
2735                 if (cur_stream)
2736                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2737                 break;
2738             case SDLK_w:
2739                 toggle_audio_display();
2740                 break;
2741             case SDLK_PAGEUP:
2742                 seek_chapter(cur_stream, 1);
2743                 break;
2744             case SDLK_PAGEDOWN:
2745                 seek_chapter(cur_stream, -1);
2746                 break;
2747             case SDLK_LEFT:
2748                 incr = -10.0;
2749                 goto do_seek;
2750             case SDLK_RIGHT:
2751                 incr = 10.0;
2752                 goto do_seek;
2753             case SDLK_UP:
2754                 incr = 60.0;
2755                 goto do_seek;
2756             case SDLK_DOWN:
2757                 incr = -60.0;
2758             do_seek:
2759                 if (cur_stream) {
2760                     if (seek_by_bytes) {
2761                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2762                             pos = cur_stream->video_current_pos;
2763                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2764                             pos = cur_stream->audio_pkt.pos;
2765                         } else
2766                             pos = avio_tell(cur_stream->ic->pb);
2767                         if (cur_stream->ic->bit_rate)
2768                             incr *= cur_stream->ic->bit_rate / 8.0;
2769                         else
2770                             incr *= 180000.0;
2771                         pos += incr;
2772                         stream_seek(cur_stream, pos, incr, 1);
2773                     } else {
2774                         pos = get_master_clock(cur_stream);
2775                         pos += incr;
2776                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2777                     }
2778                 }
2779                 break;
2780             default:
2781                 break;
2782             }
2783             break;
2784         case SDL_MOUSEBUTTONDOWN:
2785             if (exit_on_mousedown) {
2786                 do_exit();
2787                 break;
2788             }
2789         case SDL_MOUSEMOTION:
2790             if (event.type == SDL_MOUSEBUTTONDOWN) {
2791                 x = event.button.x;
2792             } else {
2793                 if (event.motion.state != SDL_PRESSED)
2794                     break;
2795                 x = event.motion.x;
2796             }
2797             if (cur_stream) {
2798                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2799                     uint64_t size =  avio_size(cur_stream->ic->pb);
2800                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2801                 } else {
2802                     int64_t ts;
2803                     int ns, hh, mm, ss;
2804                     int tns, thh, tmm, tss;
2805                     tns  = cur_stream->ic->duration / 1000000LL;
2806                     thh  = tns / 3600;
2807                     tmm  = (tns % 3600) / 60;
2808                     tss  = (tns % 60);
2809                     frac = x / cur_stream->width;
2810                     ns   = frac * tns;
2811                     hh   = ns / 3600;
2812                     mm   = (ns % 3600) / 60;
2813                     ss   = (ns % 60);
2814                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2815                             hh, mm, ss, thh, tmm, tss);
2816                     ts = frac * cur_stream->ic->duration;
2817                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2818                         ts += cur_stream->ic->start_time;
2819                     stream_seek(cur_stream, ts, 0, 0);
2820                 }
2821             }
2822             break;
2823         case SDL_VIDEORESIZE:
2824             if (cur_stream) {
2825                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2826                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2827                 screen_width  = cur_stream->width  = event.resize.w;
2828                 screen_height = cur_stream->height = event.resize.h;
2829             }
2830             break;
2831         case SDL_QUIT:
2832         case FF_QUIT_EVENT:
2833             do_exit();
2834             break;
2835         case FF_ALLOC_EVENT:
2836             video_open(event.user.data1);
2837             alloc_picture(event.user.data1);
2838             break;
2839         case FF_REFRESH_EVENT:
2840             video_refresh_timer(event.user.data1);
2841             cur_stream->refresh = 0;
2842             break;
2843         default:
2844             break;
2845         }
2846     }
2847 }
2848
2849 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2850 {
2851     av_log(NULL, AV_LOG_ERROR,
2852            "Option '%s' has been removed, use private format options instead\n", opt);
2853     return AVERROR(EINVAL);
2854 }
2855
2856 static int opt_width(void *optctx, const char *opt, const char *arg)
2857 {
2858     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2859     return 0;
2860 }
2861
2862 static int opt_height(void *optctx, const char *opt, const char *arg)
2863 {
2864     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2865     return 0;
2866 }
2867
2868 static int opt_format(void *optctx, const char *opt, const char *arg)
2869 {
2870     file_iformat = av_find_input_format(arg);
2871     if (!file_iformat) {
2872         fprintf(stderr, "Unknown input format: %s\n", arg);
2873         return AVERROR(EINVAL);
2874     }
2875     return 0;
2876 }
2877
2878 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2879 {
2880     av_log(NULL, AV_LOG_ERROR,
2881            "Option '%s' has been removed, use private format options instead\n", opt);
2882     return AVERROR(EINVAL);
2883 }
2884
2885 static int opt_sync(void *optctx, const char *opt, const char *arg)
2886 {
2887     if (!strcmp(arg, "audio"))
2888         av_sync_type = AV_SYNC_AUDIO_MASTER;
2889     else if (!strcmp(arg, "video"))
2890         av_sync_type = AV_SYNC_VIDEO_MASTER;
2891     else if (!strcmp(arg, "ext"))
2892         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2893     else {
2894         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2895         exit(1);
2896     }
2897     return 0;
2898 }
2899
2900 static int opt_seek(void *optctx, const char *opt, const char *arg)
2901 {
2902     start_time = parse_time_or_die(opt, arg, 1);
2903     return 0;
2904 }
2905
2906 static int opt_duration(void *optctx, const char *opt, const char *arg)
2907 {
2908     duration = parse_time_or_die(opt, arg, 1);
2909     return 0;
2910 }
2911
2912 static const OptionDef options[] = {
2913 #include "cmdutils_common_opts.h"
2914     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2915     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2916     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2917     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2918     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2919     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2920     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2921     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2922     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2923     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2924     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2925     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2926     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2927     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2928     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2929     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2930     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2931     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2932     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2933     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2934     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2935     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2936     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2937     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2938     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2939     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2940     { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
2941     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2942     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2943     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2944     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2945     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2946     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2947 #if CONFIG_AVFILTER
2948     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2949 #endif
2950     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2951     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2952     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2953     { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
2954     { NULL, },
2955 };
2956
2957 static void show_usage(void)
2958 {
2959     printf("Simple media player\n");
2960     printf("usage: %s [options] input_file\n", program_name);
2961     printf("\n");
2962 }
2963
2964 void show_help_default(const char *opt, const char *arg)
2965 {
2966     av_log_set_callback(log_callback_help);
2967     show_usage();
2968     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2969     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2970     printf("\n");
2971     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2972     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2973 #if !CONFIG_AVFILTER
2974     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2975 #endif
2976     printf("\nWhile playing:\n"
2977            "q, ESC              quit\n"
2978            "f                   toggle full screen\n"
2979            "p, SPC              pause\n"
2980            "a                   cycle audio channel\n"
2981            "v                   cycle video channel\n"
2982            "t                   cycle subtitle channel\n"
2983            "w                   show audio waves\n"
2984            "s                   activate frame-step mode\n"
2985            "left/right          seek backward/forward 10 seconds\n"
2986            "down/up             seek backward/forward 1 minute\n"
2987            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2988            );
2989 }
2990
2991 static void opt_input_file(void *optctx, const char *filename)
2992 {
2993     if (input_filename) {
2994         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2995                 filename, input_filename);
2996         exit(1);
2997     }
2998     if (!strcmp(filename, "-"))
2999         filename = "pipe:";
3000     input_filename = filename;
3001 }
3002
3003 /* Called from the main */
3004 int main(int argc, char **argv)
3005 {
3006     int flags;
3007
3008     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3009     parse_loglevel(argc, argv, options);
3010
3011     /* register all codecs, demux and protocols */
3012     avcodec_register_all();
3013 #if CONFIG_AVDEVICE
3014     avdevice_register_all();
3015 #endif
3016 #if CONFIG_AVFILTER
3017     avfilter_register_all();
3018 #endif
3019     av_register_all();
3020     avformat_network_init();
3021
3022     init_opts();
3023
3024     show_banner();
3025
3026     parse_options(NULL, argc, argv, options, opt_input_file);
3027
3028     if (!input_filename) {
3029         show_usage();
3030         fprintf(stderr, "An input file must be specified\n");
3031         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3032         exit(1);
3033     }
3034
3035     if (display_disable) {
3036         video_disable = 1;
3037     }
3038     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3039 #if !defined(__MINGW32__) && !defined(__APPLE__)
3040     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3041 #endif
3042     if (SDL_Init (flags)) {
3043         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3044         exit(1);
3045     }
3046
3047     if (!display_disable) {
3048         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3049         fs_screen_width = vi->current_w;
3050         fs_screen_height = vi->current_h;
3051     }
3052
3053     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3054     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3055     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3056
3057     av_init_packet(&flush_pkt);
3058     flush_pkt.data = (uint8_t *)&flush_pkt;
3059
3060     cur_stream = stream_open(input_filename, file_iformat);
3061
3062     event_loop();
3063
3064     /* never returns */
3065
3066     return 0;
3067 }