]> git.sesse.net Git - ffmpeg/blob - avplay.c
lavu/audioconvert: add a second low frequency channel.
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///< byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///< current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
217     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
218     int use_dr1;
219     FrameBuffer *buffer_pool;
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts = -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop = 1;
264 static int framedrop = 1;
265 static int infinite_buffer = 0;
266
267 static int rdftspeed = 20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 void exit_program(int ret)
286 {
287     exit(ret);
288 }
289
290 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
291
292 /* packet queue handling */
293 static void packet_queue_init(PacketQueue *q)
294 {
295     memset(q, 0, sizeof(PacketQueue));
296     q->mutex = SDL_CreateMutex();
297     q->cond = SDL_CreateCond();
298     packet_queue_put(q, &flush_pkt);
299 }
300
301 static void packet_queue_flush(PacketQueue *q)
302 {
303     AVPacketList *pkt, *pkt1;
304
305     SDL_LockMutex(q->mutex);
306     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
307         pkt1 = pkt->next;
308         av_free_packet(&pkt->pkt);
309         av_freep(&pkt);
310     }
311     q->last_pkt = NULL;
312     q->first_pkt = NULL;
313     q->nb_packets = 0;
314     q->size = 0;
315     SDL_UnlockMutex(q->mutex);
316 }
317
318 static void packet_queue_end(PacketQueue *q)
319 {
320     packet_queue_flush(q);
321     SDL_DestroyMutex(q->mutex);
322     SDL_DestroyCond(q->cond);
323 }
324
325 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
326 {
327     AVPacketList *pkt1;
328
329     /* duplicate the packet */
330     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
331         return -1;
332
333     pkt1 = av_malloc(sizeof(AVPacketList));
334     if (!pkt1)
335         return -1;
336     pkt1->pkt = *pkt;
337     pkt1->next = NULL;
338
339
340     SDL_LockMutex(q->mutex);
341
342     if (!q->last_pkt)
343
344         q->first_pkt = pkt1;
345     else
346         q->last_pkt->next = pkt1;
347     q->last_pkt = pkt1;
348     q->nb_packets++;
349     q->size += pkt1->pkt.size + sizeof(*pkt1);
350     /* XXX: should duplicate packet data in DV case */
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354     return 0;
355 }
356
357 static void packet_queue_abort(PacketQueue *q)
358 {
359     SDL_LockMutex(q->mutex);
360
361     q->abort_request = 1;
362
363     SDL_CondSignal(q->cond);
364
365     SDL_UnlockMutex(q->mutex);
366 }
367
368 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
369 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
370 {
371     AVPacketList *pkt1;
372     int ret;
373
374     SDL_LockMutex(q->mutex);
375
376     for (;;) {
377         if (q->abort_request) {
378             ret = -1;
379             break;
380         }
381
382         pkt1 = q->first_pkt;
383         if (pkt1) {
384             q->first_pkt = pkt1->next;
385             if (!q->first_pkt)
386                 q->last_pkt = NULL;
387             q->nb_packets--;
388             q->size -= pkt1->pkt.size + sizeof(*pkt1);
389             *pkt = pkt1->pkt;
390             av_free(pkt1);
391             ret = 1;
392             break;
393         } else if (!block) {
394             ret = 0;
395             break;
396         } else {
397             SDL_CondWait(q->cond, q->mutex);
398         }
399     }
400     SDL_UnlockMutex(q->mutex);
401     return ret;
402 }
403
404 static inline void fill_rectangle(SDL_Surface *screen,
405                                   int x, int y, int w, int h, int color)
406 {
407     SDL_Rect rect;
408     rect.x = x;
409     rect.y = y;
410     rect.w = w;
411     rect.h = h;
412     SDL_FillRect(screen, &rect, color);
413 }
414
415 #define ALPHA_BLEND(a, oldp, newp, s)\
416 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
417
418 #define RGBA_IN(r, g, b, a, s)\
419 {\
420     unsigned int v = ((const uint32_t *)(s))[0];\
421     a = (v >> 24) & 0xff;\
422     r = (v >> 16) & 0xff;\
423     g = (v >> 8) & 0xff;\
424     b = v & 0xff;\
425 }
426
427 #define YUVA_IN(y, u, v, a, s, pal)\
428 {\
429     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
430     a = (val >> 24) & 0xff;\
431     y = (val >> 16) & 0xff;\
432     u = (val >> 8) & 0xff;\
433     v = val & 0xff;\
434 }
435
436 #define YUVA_OUT(d, y, u, v, a)\
437 {\
438     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
439 }
440
441
442 #define BPP 1
443
444 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
445 {
446     int wrap, wrap3, width2, skip2;
447     int y, u, v, a, u1, v1, a1, w, h;
448     uint8_t *lum, *cb, *cr;
449     const uint8_t *p;
450     const uint32_t *pal;
451     int dstx, dsty, dstw, dsth;
452
453     dstw = av_clip(rect->w, 0, imgw);
454     dsth = av_clip(rect->h, 0, imgh);
455     dstx = av_clip(rect->x, 0, imgw - dstw);
456     dsty = av_clip(rect->y, 0, imgh - dsth);
457     lum = dst->data[0] + dsty * dst->linesize[0];
458     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
459     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
460
461     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
462     skip2 = dstx >> 1;
463     wrap = dst->linesize[0];
464     wrap3 = rect->pict.linesize[0];
465     p = rect->pict.data[0];
466     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
467
468     if (dsty & 1) {
469         lum += dstx;
470         cb += skip2;
471         cr += skip2;
472
473         if (dstx & 1) {
474             YUVA_IN(y, u, v, a, p, pal);
475             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
476             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
477             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
478             cb++;
479             cr++;
480             lum++;
481             p += BPP;
482         }
483         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
484             YUVA_IN(y, u, v, a, p, pal);
485             u1 = u;
486             v1 = v;
487             a1 = a;
488             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
489
490             YUVA_IN(y, u, v, a, p + BPP, pal);
491             u1 += u;
492             v1 += v;
493             a1 += a;
494             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
495             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
496             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
497             cb++;
498             cr++;
499             p += 2 * BPP;
500             lum += 2;
501         }
502         if (w) {
503             YUVA_IN(y, u, v, a, p, pal);
504             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
505             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
506             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
507             p++;
508             lum++;
509         }
510         p += wrap3 - dstw * BPP;
511         lum += wrap - dstw - dstx;
512         cb += dst->linesize[1] - width2 - skip2;
513         cr += dst->linesize[2] - width2 - skip2;
514     }
515     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
516         lum += dstx;
517         cb += skip2;
518         cr += skip2;
519
520         if (dstx & 1) {
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 = u;
523             v1 = v;
524             a1 = a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526             p += wrap3;
527             lum += wrap;
528             YUVA_IN(y, u, v, a, p, pal);
529             u1 += u;
530             v1 += v;
531             a1 += a;
532             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
534             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
535             cb++;
536             cr++;
537             p += -wrap3 + BPP;
538             lum += -wrap + 1;
539         }
540         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
541             YUVA_IN(y, u, v, a, p, pal);
542             u1 = u;
543             v1 = v;
544             a1 = a;
545             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
547             YUVA_IN(y, u, v, a, p + BPP, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
552             p += wrap3;
553             lum += wrap;
554
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
561             YUVA_IN(y, u, v, a, p + BPP, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
566
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
569
570             cb++;
571             cr++;
572             p += -wrap3 + 2 * BPP;
573             lum += -wrap + 2;
574         }
575         if (w) {
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 = u;
578             v1 = v;
579             a1 = a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581             p += wrap3;
582             lum += wrap;
583             YUVA_IN(y, u, v, a, p, pal);
584             u1 += u;
585             v1 += v;
586             a1 += a;
587             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
588             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
589             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
590             cb++;
591             cr++;
592             p += -wrap3 + BPP;
593             lum += -wrap + 1;
594         }
595         p += wrap3 + (wrap3 - dstw * BPP);
596         lum += wrap + (wrap - dstw - dstx);
597         cb += dst->linesize[1] - width2 - skip2;
598         cr += dst->linesize[2] - width2 - skip2;
599     }
600     /* handle odd height */
601     if (h) {
602         lum += dstx;
603         cb += skip2;
604         cr += skip2;
605
606         if (dstx & 1) {
607             YUVA_IN(y, u, v, a, p, pal);
608             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
609             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
610             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
611             cb++;
612             cr++;
613             lum++;
614             p += BPP;
615         }
616         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 = u;
619             v1 = v;
620             a1 = a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622
623             YUVA_IN(y, u, v, a, p + BPP, pal);
624             u1 += u;
625             v1 += v;
626             a1 += a;
627             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
628             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
629             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
630             cb++;
631             cr++;
632             p += 2 * BPP;
633             lum += 2;
634         }
635         if (w) {
636             YUVA_IN(y, u, v, a, p, pal);
637             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
638             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
639             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
640         }
641     }
642 }
643
644 static void free_subpicture(SubPicture *sp)
645 {
646     avsubtitle_free(&sp->sub);
647 }
648
649 static void video_image_display(VideoState *is)
650 {
651     VideoPicture *vp;
652     SubPicture *sp;
653     AVPicture pict;
654     float aspect_ratio;
655     int width, height, x, y;
656     SDL_Rect rect;
657     int i;
658
659     vp = &is->pictq[is->pictq_rindex];
660     if (vp->bmp) {
661 #if CONFIG_AVFILTER
662          if (vp->picref->video->pixel_aspect.num == 0)
663              aspect_ratio = 0;
664          else
665              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
666 #else
667
668         /* XXX: use variable in the frame */
669         if (is->video_st->sample_aspect_ratio.num)
670             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
671         else if (is->video_st->codec->sample_aspect_ratio.num)
672             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
673         else
674             aspect_ratio = 0;
675 #endif
676         if (aspect_ratio <= 0.0)
677             aspect_ratio = 1.0;
678         aspect_ratio *= (float)vp->width / (float)vp->height;
679
680         if (is->subtitle_st)
681         {
682             if (is->subpq_size > 0)
683             {
684                 sp = &is->subpq[is->subpq_rindex];
685
686                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
687                 {
688                     SDL_LockYUVOverlay (vp->bmp);
689
690                     pict.data[0] = vp->bmp->pixels[0];
691                     pict.data[1] = vp->bmp->pixels[2];
692                     pict.data[2] = vp->bmp->pixels[1];
693
694                     pict.linesize[0] = vp->bmp->pitches[0];
695                     pict.linesize[1] = vp->bmp->pitches[2];
696                     pict.linesize[2] = vp->bmp->pitches[1];
697
698                     for (i = 0; i < sp->sub.num_rects; i++)
699                         blend_subrect(&pict, sp->sub.rects[i],
700                                       vp->bmp->w, vp->bmp->h);
701
702                     SDL_UnlockYUVOverlay (vp->bmp);
703                 }
704             }
705         }
706
707
708         /* XXX: we suppose the screen has a 1.0 pixel ratio */
709         height = is->height;
710         width = ((int)rint(height * aspect_ratio)) & ~1;
711         if (width > is->width) {
712             width = is->width;
713             height = ((int)rint(width / aspect_ratio)) & ~1;
714         }
715         x = (is->width - width) / 2;
716         y = (is->height - height) / 2;
717         is->no_background = 0;
718         rect.x = is->xleft + x;
719         rect.y = is->ytop  + y;
720         rect.w = width;
721         rect.h = height;
722         SDL_DisplayYUVOverlay(vp->bmp, &rect);
723     }
724 }
725
726 /* get the current audio output buffer size, in samples. With SDL, we
727    cannot have a precise information */
728 static int audio_write_get_buf_size(VideoState *is)
729 {
730     return is->audio_buf_size - is->audio_buf_index;
731 }
732
733 static inline int compute_mod(int a, int b)
734 {
735     a = a % b;
736     if (a >= 0)
737         return a;
738     else
739         return a + b;
740 }
741
742 static void video_audio_display(VideoState *s)
743 {
744     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
745     int ch, channels, h, h2, bgcolor, fgcolor;
746     int16_t time_diff;
747     int rdft_bits, nb_freq;
748
749     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
750         ;
751     nb_freq = 1 << (rdft_bits - 1);
752
753     /* compute display index : center on currently output samples */
754     channels = s->sdl_channels;
755     nb_display_channels = channels;
756     if (!s->paused) {
757         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
758         n = 2 * channels;
759         delay = audio_write_get_buf_size(s);
760         delay /= n;
761
762         /* to be more precise, we take into account the time spent since
763            the last buffer computation */
764         if (audio_callback_time) {
765             time_diff = av_gettime() - audio_callback_time;
766             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
767         }
768
769         delay += 2 * data_used;
770         if (delay < data_used)
771             delay = data_used;
772
773         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
774         if (s->show_audio == 1) {
775             h = INT_MIN;
776             for (i = 0; i < 1000; i += channels) {
777                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
778                 int a = s->sample_array[idx];
779                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
780                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
781                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
782                 int score = a - d;
783                 if (h < score && (b ^ c) < 0) {
784                     h = score;
785                     i_start = idx;
786                 }
787             }
788         }
789
790         s->last_i_start = i_start;
791     } else {
792         i_start = s->last_i_start;
793     }
794
795     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
796     if (s->show_audio == 1) {
797         fill_rectangle(screen,
798                        s->xleft, s->ytop, s->width, s->height,
799                        bgcolor);
800
801         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
802
803         /* total height for one channel */
804         h = s->height / nb_display_channels;
805         /* graph height / 2 */
806         h2 = (h * 9) / 20;
807         for (ch = 0; ch < nb_display_channels; ch++) {
808             i = i_start + ch;
809             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
810             for (x = 0; x < s->width; x++) {
811                 y = (s->sample_array[i] * h2) >> 15;
812                 if (y < 0) {
813                     y = -y;
814                     ys = y1 - y;
815                 } else {
816                     ys = y1;
817                 }
818                 fill_rectangle(screen,
819                                s->xleft + x, ys, 1, y,
820                                fgcolor);
821                 i += channels;
822                 if (i >= SAMPLE_ARRAY_SIZE)
823                     i -= SAMPLE_ARRAY_SIZE;
824             }
825         }
826
827         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
828
829         for (ch = 1; ch < nb_display_channels; ch++) {
830             y = s->ytop + ch * h;
831             fill_rectangle(screen,
832                            s->xleft, y, s->width, 1,
833                            fgcolor);
834         }
835         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
836     } else {
837         nb_display_channels= FFMIN(nb_display_channels, 2);
838         if (rdft_bits != s->rdft_bits) {
839             av_rdft_end(s->rdft);
840             av_free(s->rdft_data);
841             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
842             s->rdft_bits = rdft_bits;
843             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
844         }
845         {
846             FFTSample *data[2];
847             for (ch = 0; ch < nb_display_channels; ch++) {
848                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
849                 i = i_start + ch;
850                 for (x = 0; x < 2 * nb_freq; x++) {
851                     double w = (x-nb_freq) * (1.0 / nb_freq);
852                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
853                     i += channels;
854                     if (i >= SAMPLE_ARRAY_SIZE)
855                         i -= SAMPLE_ARRAY_SIZE;
856                 }
857                 av_rdft_calc(s->rdft, data[ch]);
858             }
859             // least efficient way to do this, we should of course directly access it but its more than fast enough
860             for (y = 0; y < s->height; y++) {
861                 double w = 1 / sqrt(nb_freq);
862                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
863                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
864                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
865                 a = FFMIN(a, 255);
866                 b = FFMIN(b, 255);
867                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
868
869                 fill_rectangle(screen,
870                             s->xpos, s->height-y, 1, 1,
871                             fgcolor);
872             }
873         }
874         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
875         s->xpos++;
876         if (s->xpos >= s->width)
877             s->xpos= s->xleft;
878     }
879 }
880
881 static int video_open(VideoState *is)
882 {
883     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
884     int w,h;
885
886     if (is_full_screen) flags |= SDL_FULLSCREEN;
887     else                flags |= SDL_RESIZABLE;
888
889     if (is_full_screen && fs_screen_width) {
890         w = fs_screen_width;
891         h = fs_screen_height;
892     } else if (!is_full_screen && screen_width) {
893         w = screen_width;
894         h = screen_height;
895 #if CONFIG_AVFILTER
896     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
897         w = is->out_video_filter->inputs[0]->w;
898         h = is->out_video_filter->inputs[0]->h;
899 #else
900     } else if (is->video_st && is->video_st->codec->width) {
901         w = is->video_st->codec->width;
902         h = is->video_st->codec->height;
903 #endif
904     } else {
905         w = 640;
906         h = 480;
907     }
908     if (screen && is->width == screen->w && screen->w == w
909        && is->height== screen->h && screen->h == h)
910         return 0;
911
912 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
913     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
914     screen = SDL_SetVideoMode(w, h, 24, flags);
915 #else
916     screen = SDL_SetVideoMode(w, h, 0, flags);
917 #endif
918     if (!screen) {
919         fprintf(stderr, "SDL: could not set video mode - exiting\n");
920         return -1;
921     }
922     if (!window_title)
923         window_title = input_filename;
924     SDL_WM_SetCaption(window_title, window_title);
925
926     is->width  = screen->w;
927     is->height = screen->h;
928
929     return 0;
930 }
931
932 /* display the current picture, if any */
933 static void video_display(VideoState *is)
934 {
935     if (!screen)
936         video_open(cur_stream);
937     if (is->audio_st && is->show_audio)
938         video_audio_display(is);
939     else if (is->video_st)
940         video_image_display(is);
941 }
942
943 static int refresh_thread(void *opaque)
944 {
945     VideoState *is= opaque;
946     while (!is->abort_request) {
947         SDL_Event event;
948         event.type = FF_REFRESH_EVENT;
949         event.user.data1 = opaque;
950         if (!is->refresh) {
951             is->refresh = 1;
952             SDL_PushEvent(&event);
953         }
954         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
955     }
956     return 0;
957 }
958
959 /* get the current audio clock value */
960 static double get_audio_clock(VideoState *is)
961 {
962     double pts;
963     int hw_buf_size, bytes_per_sec;
964     pts = is->audio_clock;
965     hw_buf_size = audio_write_get_buf_size(is);
966     bytes_per_sec = 0;
967     if (is->audio_st) {
968         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
969                         av_get_bytes_per_sample(is->sdl_sample_fmt);
970     }
971     if (bytes_per_sec)
972         pts -= (double)hw_buf_size / bytes_per_sec;
973     return pts;
974 }
975
976 /* get the current video clock value */
977 static double get_video_clock(VideoState *is)
978 {
979     if (is->paused) {
980         return is->video_current_pts;
981     } else {
982         return is->video_current_pts_drift + av_gettime() / 1000000.0;
983     }
984 }
985
986 /* get the current external clock value */
987 static double get_external_clock(VideoState *is)
988 {
989     int64_t ti;
990     ti = av_gettime();
991     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
992 }
993
994 /* get the current master clock value */
995 static double get_master_clock(VideoState *is)
996 {
997     double val;
998
999     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1000         if (is->video_st)
1001             val = get_video_clock(is);
1002         else
1003             val = get_audio_clock(is);
1004     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1005         if (is->audio_st)
1006             val = get_audio_clock(is);
1007         else
1008             val = get_video_clock(is);
1009     } else {
1010         val = get_external_clock(is);
1011     }
1012     return val;
1013 }
1014
1015 /* seek in the stream */
1016 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1017 {
1018     if (!is->seek_req) {
1019         is->seek_pos = pos;
1020         is->seek_rel = rel;
1021         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1022         if (seek_by_bytes)
1023             is->seek_flags |= AVSEEK_FLAG_BYTE;
1024         is->seek_req = 1;
1025     }
1026 }
1027
1028 /* pause or resume the video */
1029 static void stream_pause(VideoState *is)
1030 {
1031     if (is->paused) {
1032         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1033         if (is->read_pause_return != AVERROR(ENOSYS)) {
1034             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1035         }
1036         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1037     }
1038     is->paused = !is->paused;
1039 }
1040
1041 static double compute_target_time(double frame_current_pts, VideoState *is)
1042 {
1043     double delay, sync_threshold, diff;
1044
1045     /* compute nominal delay */
1046     delay = frame_current_pts - is->frame_last_pts;
1047     if (delay <= 0 || delay >= 10.0) {
1048         /* if incorrect delay, use previous one */
1049         delay = is->frame_last_delay;
1050     } else {
1051         is->frame_last_delay = delay;
1052     }
1053     is->frame_last_pts = frame_current_pts;
1054
1055     /* update delay to follow master synchronisation source */
1056     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1057          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1058         /* if video is slave, we try to correct big delays by
1059            duplicating or deleting a frame */
1060         diff = get_video_clock(is) - get_master_clock(is);
1061
1062         /* skip or repeat frame. We take into account the
1063            delay to compute the threshold. I still don't know
1064            if it is the best guess */
1065         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1066         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1067             if (diff <= -sync_threshold)
1068                 delay = 0;
1069             else if (diff >= sync_threshold)
1070                 delay = 2 * delay;
1071         }
1072     }
1073     is->frame_timer += delay;
1074
1075     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1076             delay, frame_current_pts, -diff);
1077
1078     return is->frame_timer;
1079 }
1080
1081 /* called to display each frame */
1082 static void video_refresh_timer(void *opaque)
1083 {
1084     VideoState *is = opaque;
1085     VideoPicture *vp;
1086
1087     SubPicture *sp, *sp2;
1088
1089     if (is->video_st) {
1090 retry:
1091         if (is->pictq_size == 0) {
1092             // nothing to do, no picture to display in the que
1093         } else {
1094             double time = av_gettime() / 1000000.0;
1095             double next_target;
1096             /* dequeue the picture */
1097             vp = &is->pictq[is->pictq_rindex];
1098
1099             if (time < vp->target_clock)
1100                 return;
1101             /* update current video pts */
1102             is->video_current_pts = vp->pts;
1103             is->video_current_pts_drift = is->video_current_pts - time;
1104             is->video_current_pos = vp->pos;
1105             if (is->pictq_size > 1) {
1106                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1107                 assert(nextvp->target_clock >= vp->target_clock);
1108                 next_target= nextvp->target_clock;
1109             } else {
1110                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1111             }
1112             if (framedrop && time > next_target) {
1113                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1114                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1115                     /* update queue size and signal for next picture */
1116                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1117                         is->pictq_rindex = 0;
1118
1119                     SDL_LockMutex(is->pictq_mutex);
1120                     is->pictq_size--;
1121                     SDL_CondSignal(is->pictq_cond);
1122                     SDL_UnlockMutex(is->pictq_mutex);
1123                     goto retry;
1124                 }
1125             }
1126
1127             if (is->subtitle_st) {
1128                 if (is->subtitle_stream_changed) {
1129                     SDL_LockMutex(is->subpq_mutex);
1130
1131                     while (is->subpq_size) {
1132                         free_subpicture(&is->subpq[is->subpq_rindex]);
1133
1134                         /* update queue size and signal for next picture */
1135                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1136                             is->subpq_rindex = 0;
1137
1138                         is->subpq_size--;
1139                     }
1140                     is->subtitle_stream_changed = 0;
1141
1142                     SDL_CondSignal(is->subpq_cond);
1143                     SDL_UnlockMutex(is->subpq_mutex);
1144                 } else {
1145                     if (is->subpq_size > 0) {
1146                         sp = &is->subpq[is->subpq_rindex];
1147
1148                         if (is->subpq_size > 1)
1149                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1150                         else
1151                             sp2 = NULL;
1152
1153                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1154                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1155                         {
1156                             free_subpicture(sp);
1157
1158                             /* update queue size and signal for next picture */
1159                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1160                                 is->subpq_rindex = 0;
1161
1162                             SDL_LockMutex(is->subpq_mutex);
1163                             is->subpq_size--;
1164                             SDL_CondSignal(is->subpq_cond);
1165                             SDL_UnlockMutex(is->subpq_mutex);
1166                         }
1167                     }
1168                 }
1169             }
1170
1171             /* display picture */
1172             if (!display_disable)
1173                 video_display(is);
1174
1175             /* update queue size and signal for next picture */
1176             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1177                 is->pictq_rindex = 0;
1178
1179             SDL_LockMutex(is->pictq_mutex);
1180             is->pictq_size--;
1181             SDL_CondSignal(is->pictq_cond);
1182             SDL_UnlockMutex(is->pictq_mutex);
1183         }
1184     } else if (is->audio_st) {
1185         /* draw the next audio frame */
1186
1187         /* if only audio stream, then display the audio bars (better
1188            than nothing, just to test the implementation */
1189
1190         /* display picture */
1191         if (!display_disable)
1192             video_display(is);
1193     }
1194     if (show_status) {
1195         static int64_t last_time;
1196         int64_t cur_time;
1197         int aqsize, vqsize, sqsize;
1198         double av_diff;
1199
1200         cur_time = av_gettime();
1201         if (!last_time || (cur_time - last_time) >= 30000) {
1202             aqsize = 0;
1203             vqsize = 0;
1204             sqsize = 0;
1205             if (is->audio_st)
1206                 aqsize = is->audioq.size;
1207             if (is->video_st)
1208                 vqsize = is->videoq.size;
1209             if (is->subtitle_st)
1210                 sqsize = is->subtitleq.size;
1211             av_diff = 0;
1212             if (is->audio_st && is->video_st)
1213                 av_diff = get_audio_clock(is) - get_video_clock(is);
1214             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1215                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1216                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1217             fflush(stdout);
1218             last_time = cur_time;
1219         }
1220     }
1221 }
1222
1223 static void stream_close(VideoState *is)
1224 {
1225     VideoPicture *vp;
1226     int i;
1227     /* XXX: use a special url_shutdown call to abort parse cleanly */
1228     is->abort_request = 1;
1229     SDL_WaitThread(is->parse_tid, NULL);
1230     SDL_WaitThread(is->refresh_tid, NULL);
1231
1232     /* free all pictures */
1233     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1234         vp = &is->pictq[i];
1235 #if CONFIG_AVFILTER
1236         avfilter_unref_bufferp(&vp->picref);
1237 #endif
1238         if (vp->bmp) {
1239             SDL_FreeYUVOverlay(vp->bmp);
1240             vp->bmp = NULL;
1241         }
1242     }
1243     SDL_DestroyMutex(is->pictq_mutex);
1244     SDL_DestroyCond(is->pictq_cond);
1245     SDL_DestroyMutex(is->subpq_mutex);
1246     SDL_DestroyCond(is->subpq_cond);
1247 #if !CONFIG_AVFILTER
1248     if (is->img_convert_ctx)
1249         sws_freeContext(is->img_convert_ctx);
1250 #endif
1251     av_free(is);
1252 }
1253
1254 static void do_exit(void)
1255 {
1256     if (cur_stream) {
1257         stream_close(cur_stream);
1258         cur_stream = NULL;
1259     }
1260     uninit_opts();
1261 #if CONFIG_AVFILTER
1262     avfilter_uninit();
1263 #endif
1264     avformat_network_deinit();
1265     if (show_status)
1266         printf("\n");
1267     SDL_Quit();
1268     av_log(NULL, AV_LOG_QUIET, "");
1269     exit(0);
1270 }
1271
1272 /* allocate a picture (needs to do that in main thread to avoid
1273    potential locking problems */
1274 static void alloc_picture(void *opaque)
1275 {
1276     VideoState *is = opaque;
1277     VideoPicture *vp;
1278
1279     vp = &is->pictq[is->pictq_windex];
1280
1281     if (vp->bmp)
1282         SDL_FreeYUVOverlay(vp->bmp);
1283
1284 #if CONFIG_AVFILTER
1285     avfilter_unref_bufferp(&vp->picref);
1286
1287     vp->width   = is->out_video_filter->inputs[0]->w;
1288     vp->height  = is->out_video_filter->inputs[0]->h;
1289     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1290 #else
1291     vp->width   = is->video_st->codec->width;
1292     vp->height  = is->video_st->codec->height;
1293     vp->pix_fmt = is->video_st->codec->pix_fmt;
1294 #endif
1295
1296     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1297                                    SDL_YV12_OVERLAY,
1298                                    screen);
1299     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1300         /* SDL allocates a buffer smaller than requested if the video
1301          * overlay hardware is unable to support the requested size. */
1302         fprintf(stderr, "Error: the video system does not support an image\n"
1303                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1304                         "to reduce the image size.\n", vp->width, vp->height );
1305         do_exit();
1306     }
1307
1308     SDL_LockMutex(is->pictq_mutex);
1309     vp->allocated = 1;
1310     SDL_CondSignal(is->pictq_cond);
1311     SDL_UnlockMutex(is->pictq_mutex);
1312 }
1313
1314 /**
1315  *
1316  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1317  */
1318 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1319 {
1320     VideoPicture *vp;
1321 #if CONFIG_AVFILTER
1322     AVPicture pict_src;
1323 #else
1324     int dst_pix_fmt = PIX_FMT_YUV420P;
1325 #endif
1326     /* wait until we have space to put a new picture */
1327     SDL_LockMutex(is->pictq_mutex);
1328
1329     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1330         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1331
1332     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1333            !is->videoq.abort_request) {
1334         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1335     }
1336     SDL_UnlockMutex(is->pictq_mutex);
1337
1338     if (is->videoq.abort_request)
1339         return -1;
1340
1341     vp = &is->pictq[is->pictq_windex];
1342
1343     /* alloc or resize hardware picture buffer */
1344     if (!vp->bmp || vp->reallocate ||
1345 #if CONFIG_AVFILTER
1346         vp->width  != is->out_video_filter->inputs[0]->w ||
1347         vp->height != is->out_video_filter->inputs[0]->h) {
1348 #else
1349         vp->width != is->video_st->codec->width ||
1350         vp->height != is->video_st->codec->height) {
1351 #endif
1352         SDL_Event event;
1353
1354         vp->allocated  = 0;
1355         vp->reallocate = 0;
1356
1357         /* the allocation must be done in the main thread to avoid
1358            locking problems */
1359         event.type = FF_ALLOC_EVENT;
1360         event.user.data1 = is;
1361         SDL_PushEvent(&event);
1362
1363         /* wait until the picture is allocated */
1364         SDL_LockMutex(is->pictq_mutex);
1365         while (!vp->allocated && !is->videoq.abort_request) {
1366             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367         }
1368         SDL_UnlockMutex(is->pictq_mutex);
1369
1370         if (is->videoq.abort_request)
1371             return -1;
1372     }
1373
1374     /* if the frame is not skipped, then display it */
1375     if (vp->bmp) {
1376         AVPicture pict = { { 0 } };
1377 #if CONFIG_AVFILTER
1378         avfilter_unref_bufferp(&vp->picref);
1379         vp->picref = src_frame->opaque;
1380 #endif
1381
1382         /* get a pointer on the bitmap */
1383         SDL_LockYUVOverlay (vp->bmp);
1384
1385         pict.data[0] = vp->bmp->pixels[0];
1386         pict.data[1] = vp->bmp->pixels[2];
1387         pict.data[2] = vp->bmp->pixels[1];
1388
1389         pict.linesize[0] = vp->bmp->pitches[0];
1390         pict.linesize[1] = vp->bmp->pitches[2];
1391         pict.linesize[2] = vp->bmp->pitches[1];
1392
1393 #if CONFIG_AVFILTER
1394         pict_src.data[0] = src_frame->data[0];
1395         pict_src.data[1] = src_frame->data[1];
1396         pict_src.data[2] = src_frame->data[2];
1397
1398         pict_src.linesize[0] = src_frame->linesize[0];
1399         pict_src.linesize[1] = src_frame->linesize[1];
1400         pict_src.linesize[2] = src_frame->linesize[2];
1401
1402         // FIXME use direct rendering
1403         av_picture_copy(&pict, &pict_src,
1404                         vp->pix_fmt, vp->width, vp->height);
1405 #else
1406         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1407         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1408             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1409             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1410         if (is->img_convert_ctx == NULL) {
1411             fprintf(stderr, "Cannot initialize the conversion context\n");
1412             exit(1);
1413         }
1414         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1415                   0, vp->height, pict.data, pict.linesize);
1416 #endif
1417         /* update the bitmap content */
1418         SDL_UnlockYUVOverlay(vp->bmp);
1419
1420         vp->pts = pts;
1421         vp->pos = pos;
1422
1423         /* now we can update the picture count */
1424         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1425             is->pictq_windex = 0;
1426         SDL_LockMutex(is->pictq_mutex);
1427         vp->target_clock = compute_target_time(vp->pts, is);
1428
1429         is->pictq_size++;
1430         SDL_UnlockMutex(is->pictq_mutex);
1431     }
1432     return 0;
1433 }
1434
1435 /**
1436  * compute the exact PTS for the picture if it is omitted in the stream
1437  * @param pts1 the dts of the pkt / pts of the frame
1438  */
1439 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1440 {
1441     double frame_delay, pts;
1442
1443     pts = pts1;
1444
1445     if (pts != 0) {
1446         /* update video clock with pts, if present */
1447         is->video_clock = pts;
1448     } else {
1449         pts = is->video_clock;
1450     }
1451     /* update video clock for next frame */
1452     frame_delay = av_q2d(is->video_st->codec->time_base);
1453     /* for MPEG2, the frame can be repeated, so we update the
1454        clock accordingly */
1455     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1456     is->video_clock += frame_delay;
1457
1458     return queue_picture(is, src_frame, pts, pos);
1459 }
1460
1461 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1462 {
1463     int got_picture, i;
1464
1465     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1466         return -1;
1467
1468     if (pkt->data == flush_pkt.data) {
1469         avcodec_flush_buffers(is->video_st->codec);
1470
1471         SDL_LockMutex(is->pictq_mutex);
1472         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1473         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1474             is->pictq[i].target_clock= 0;
1475         }
1476         while (is->pictq_size && !is->videoq.abort_request) {
1477             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1478         }
1479         is->video_current_pos = -1;
1480         SDL_UnlockMutex(is->pictq_mutex);
1481
1482         init_pts_correction(&is->pts_ctx);
1483         is->frame_last_pts = AV_NOPTS_VALUE;
1484         is->frame_last_delay = 0;
1485         is->frame_timer = (double)av_gettime() / 1000000.0;
1486         is->skip_frames = 1;
1487         is->skip_frames_index = 0;
1488         return 0;
1489     }
1490
1491     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1492
1493     if (got_picture) {
1494         if (decoder_reorder_pts == -1) {
1495             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1496         } else if (decoder_reorder_pts) {
1497             *pts = frame->pkt_pts;
1498         } else {
1499             *pts = frame->pkt_dts;
1500         }
1501
1502         if (*pts == AV_NOPTS_VALUE) {
1503             *pts = 0;
1504         }
1505
1506         is->skip_frames_index += 1;
1507         if (is->skip_frames_index >= is->skip_frames) {
1508             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1509             return 1;
1510         }
1511
1512     }
1513     return 0;
1514 }
1515
1516 #if CONFIG_AVFILTER
1517 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1518 {
1519     char sws_flags_str[128];
1520     char buffersrc_args[256];
1521     int ret;
1522     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1523     AVCodecContext *codec = is->video_st->codec;
1524
1525     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1526     graph->scale_sws_opts = av_strdup(sws_flags_str);
1527
1528     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1529              codec->width, codec->height, codec->pix_fmt,
1530              is->video_st->time_base.num, is->video_st->time_base.den,
1531              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1532
1533
1534     if ((ret = avfilter_graph_create_filter(&filt_src,
1535                                             avfilter_get_by_name("buffer"),
1536                                             "src", buffersrc_args, NULL,
1537                                             graph)) < 0)
1538         return ret;
1539     if ((ret = avfilter_graph_create_filter(&filt_out,
1540                                             avfilter_get_by_name("buffersink"),
1541                                             "out", NULL, NULL, graph)) < 0)
1542         return ret;
1543
1544     if ((ret = avfilter_graph_create_filter(&filt_format,
1545                                             avfilter_get_by_name("format"),
1546                                             "format", "yuv420p", NULL, graph)) < 0)
1547         return ret;
1548     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1549         return ret;
1550
1551
1552     if (vfilters) {
1553         AVFilterInOut *outputs = avfilter_inout_alloc();
1554         AVFilterInOut *inputs  = avfilter_inout_alloc();
1555
1556         outputs->name    = av_strdup("in");
1557         outputs->filter_ctx = filt_src;
1558         outputs->pad_idx = 0;
1559         outputs->next    = NULL;
1560
1561         inputs->name    = av_strdup("out");
1562         inputs->filter_ctx = filt_format;
1563         inputs->pad_idx = 0;
1564         inputs->next    = NULL;
1565
1566         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1567             return ret;
1568     } else {
1569         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1570             return ret;
1571     }
1572
1573     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1574         return ret;
1575
1576     is->in_video_filter  = filt_src;
1577     is->out_video_filter = filt_out;
1578
1579     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1580         is->use_dr1 = 1;
1581         codec->get_buffer     = codec_get_buffer;
1582         codec->release_buffer = codec_release_buffer;
1583         codec->opaque         = &is->buffer_pool;
1584     }
1585
1586     return ret;
1587 }
1588
1589 #endif  /* CONFIG_AVFILTER */
1590
1591 static int video_thread(void *arg)
1592 {
1593     AVPacket pkt = { 0 };
1594     VideoState *is = arg;
1595     AVFrame *frame = avcodec_alloc_frame();
1596     int64_t pts_int;
1597     double pts;
1598     int ret;
1599
1600 #if CONFIG_AVFILTER
1601     AVFilterGraph *graph = avfilter_graph_alloc();
1602     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1603     int64_t pos;
1604     int last_w = is->video_st->codec->width;
1605     int last_h = is->video_st->codec->height;
1606
1607     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1608         goto the_end;
1609     filt_in  = is->in_video_filter;
1610     filt_out = is->out_video_filter;
1611 #endif
1612
1613     for (;;) {
1614 #if CONFIG_AVFILTER
1615         AVFilterBufferRef *picref;
1616         AVRational tb;
1617 #endif
1618         while (is->paused && !is->videoq.abort_request)
1619             SDL_Delay(10);
1620
1621         av_free_packet(&pkt);
1622
1623         ret = get_video_frame(is, frame, &pts_int, &pkt);
1624         if (ret < 0)
1625             goto the_end;
1626
1627         if (!ret)
1628             continue;
1629
1630 #if CONFIG_AVFILTER
1631         if (   last_w != is->video_st->codec->width
1632             || last_h != is->video_st->codec->height) {
1633             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1634                     is->video_st->codec->width, is->video_st->codec->height);
1635             avfilter_graph_free(&graph);
1636             graph = avfilter_graph_alloc();
1637             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1638                 goto the_end;
1639             filt_in  = is->in_video_filter;
1640             filt_out = is->out_video_filter;
1641             last_w = is->video_st->codec->width;
1642             last_h = is->video_st->codec->height;
1643         }
1644
1645         frame->pts = pts_int;
1646         if (is->use_dr1) {
1647             FrameBuffer      *buf = frame->opaque;
1648             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1649                                         frame->data, frame->linesize,
1650                                         AV_PERM_READ | AV_PERM_PRESERVE,
1651                                         frame->width, frame->height,
1652                                         frame->format);
1653
1654             avfilter_copy_frame_props(fb, frame);
1655             fb->buf->priv           = buf;
1656             fb->buf->free           = filter_release_buffer;
1657
1658             buf->refcount++;
1659             av_buffersrc_buffer(filt_in, fb);
1660
1661         } else
1662             av_buffersrc_write_frame(filt_in, frame);
1663
1664         while (ret >= 0) {
1665             ret = av_buffersink_read(filt_out, &picref);
1666             if (ret < 0) {
1667                 ret = 0;
1668                 break;
1669             }
1670
1671             avfilter_copy_buf_props(frame, picref);
1672
1673             pts_int = picref->pts;
1674             tb      = filt_out->inputs[0]->time_base;
1675             pos     = picref->pos;
1676             frame->opaque = picref;
1677
1678             if (av_cmp_q(tb, is->video_st->time_base)) {
1679                 av_unused int64_t pts1 = pts_int;
1680                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1681                 av_dlog(NULL, "video_thread(): "
1682                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1683                         tb.num, tb.den, pts1,
1684                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1685             }
1686             pts = pts_int * av_q2d(is->video_st->time_base);
1687             ret = output_picture2(is, frame, pts, pos);
1688         }
1689 #else
1690         pts = pts_int * av_q2d(is->video_st->time_base);
1691         ret = output_picture2(is, frame, pts,  pkt.pos);
1692 #endif
1693
1694         if (ret < 0)
1695             goto the_end;
1696
1697         if (step)
1698             if (cur_stream)
1699                 stream_pause(cur_stream);
1700     }
1701  the_end:
1702 #if CONFIG_AVFILTER
1703     av_freep(&vfilters);
1704     avfilter_graph_free(&graph);
1705 #endif
1706     av_free_packet(&pkt);
1707     av_free(frame);
1708     return 0;
1709 }
1710
1711 static int subtitle_thread(void *arg)
1712 {
1713     VideoState *is = arg;
1714     SubPicture *sp;
1715     AVPacket pkt1, *pkt = &pkt1;
1716     int got_subtitle;
1717     double pts;
1718     int i, j;
1719     int r, g, b, y, u, v, a;
1720
1721     for (;;) {
1722         while (is->paused && !is->subtitleq.abort_request) {
1723             SDL_Delay(10);
1724         }
1725         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1726             break;
1727
1728         if (pkt->data == flush_pkt.data) {
1729             avcodec_flush_buffers(is->subtitle_st->codec);
1730             continue;
1731         }
1732         SDL_LockMutex(is->subpq_mutex);
1733         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1734                !is->subtitleq.abort_request) {
1735             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1736         }
1737         SDL_UnlockMutex(is->subpq_mutex);
1738
1739         if (is->subtitleq.abort_request)
1740             return 0;
1741
1742         sp = &is->subpq[is->subpq_windex];
1743
1744        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1745            this packet, if any */
1746         pts = 0;
1747         if (pkt->pts != AV_NOPTS_VALUE)
1748             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1749
1750         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1751                                  &got_subtitle, pkt);
1752
1753         if (got_subtitle && sp->sub.format == 0) {
1754             sp->pts = pts;
1755
1756             for (i = 0; i < sp->sub.num_rects; i++)
1757             {
1758                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1759                 {
1760                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1761                     y = RGB_TO_Y_CCIR(r, g, b);
1762                     u = RGB_TO_U_CCIR(r, g, b, 0);
1763                     v = RGB_TO_V_CCIR(r, g, b, 0);
1764                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1765                 }
1766             }
1767
1768             /* now we can update the picture count */
1769             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1770                 is->subpq_windex = 0;
1771             SDL_LockMutex(is->subpq_mutex);
1772             is->subpq_size++;
1773             SDL_UnlockMutex(is->subpq_mutex);
1774         }
1775         av_free_packet(pkt);
1776     }
1777     return 0;
1778 }
1779
1780 /* copy samples for viewing in editor window */
1781 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1782 {
1783     int size, len;
1784
1785     size = samples_size / sizeof(short);
1786     while (size > 0) {
1787         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1788         if (len > size)
1789             len = size;
1790         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1791         samples += len;
1792         is->sample_array_index += len;
1793         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1794             is->sample_array_index = 0;
1795         size -= len;
1796     }
1797 }
1798
1799 /* return the new audio buffer size (samples can be added or deleted
1800    to get better sync if video or external master clock) */
1801 static int synchronize_audio(VideoState *is, short *samples,
1802                              int samples_size1, double pts)
1803 {
1804     int n, samples_size;
1805     double ref_clock;
1806
1807     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1808     samples_size = samples_size1;
1809
1810     /* if not master, then we try to remove or add samples to correct the clock */
1811     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1812          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1813         double diff, avg_diff;
1814         int wanted_size, min_size, max_size, nb_samples;
1815
1816         ref_clock = get_master_clock(is);
1817         diff = get_audio_clock(is) - ref_clock;
1818
1819         if (diff < AV_NOSYNC_THRESHOLD) {
1820             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1821             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1822                 /* not enough measures to have a correct estimate */
1823                 is->audio_diff_avg_count++;
1824             } else {
1825                 /* estimate the A-V difference */
1826                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1827
1828                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1829                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1830                     nb_samples = samples_size / n;
1831
1832                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1833                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1834                     if (wanted_size < min_size)
1835                         wanted_size = min_size;
1836                     else if (wanted_size > max_size)
1837                         wanted_size = max_size;
1838
1839                     /* add or remove samples to correction the synchro */
1840                     if (wanted_size < samples_size) {
1841                         /* remove samples */
1842                         samples_size = wanted_size;
1843                     } else if (wanted_size > samples_size) {
1844                         uint8_t *samples_end, *q;
1845                         int nb;
1846
1847                         /* add samples */
1848                         nb = (samples_size - wanted_size);
1849                         samples_end = (uint8_t *)samples + samples_size - n;
1850                         q = samples_end + n;
1851                         while (nb > 0) {
1852                             memcpy(q, samples_end, n);
1853                             q += n;
1854                             nb -= n;
1855                         }
1856                         samples_size = wanted_size;
1857                     }
1858                 }
1859                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1860                         diff, avg_diff, samples_size - samples_size1,
1861                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1862             }
1863         } else {
1864             /* too big difference : may be initial PTS errors, so
1865                reset A-V filter */
1866             is->audio_diff_avg_count = 0;
1867             is->audio_diff_cum       = 0;
1868         }
1869     }
1870
1871     return samples_size;
1872 }
1873
1874 /* decode one audio frame and returns its uncompressed size */
1875 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1876 {
1877     AVPacket *pkt_temp = &is->audio_pkt_temp;
1878     AVPacket *pkt = &is->audio_pkt;
1879     AVCodecContext *dec = is->audio_st->codec;
1880     int n, len1, data_size, got_frame;
1881     double pts;
1882     int new_packet = 0;
1883     int flush_complete = 0;
1884
1885     for (;;) {
1886         /* NOTE: the audio packet can contain several frames */
1887         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1888             int resample_changed, audio_resample;
1889
1890             if (!is->frame) {
1891                 if (!(is->frame = avcodec_alloc_frame()))
1892                     return AVERROR(ENOMEM);
1893             } else
1894                 avcodec_get_frame_defaults(is->frame);
1895
1896             if (flush_complete)
1897                 break;
1898             new_packet = 0;
1899             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1900             if (len1 < 0) {
1901                 /* if error, we skip the frame */
1902                 pkt_temp->size = 0;
1903                 break;
1904             }
1905
1906             pkt_temp->data += len1;
1907             pkt_temp->size -= len1;
1908
1909             if (!got_frame) {
1910                 /* stop sending empty packets if the decoder is finished */
1911                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1912                     flush_complete = 1;
1913                 continue;
1914             }
1915             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1916                                                    is->frame->nb_samples,
1917                                                    dec->sample_fmt, 1);
1918
1919             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
1920                              dec->channel_layout != is->sdl_channel_layout;
1921
1922             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
1923                                dec->channel_layout != is->resample_channel_layout;
1924
1925             if ((!is->avr && audio_resample) || resample_changed) {
1926                 int ret;
1927                 if (is->avr)
1928                     avresample_close(is->avr);
1929                 else if (audio_resample) {
1930                     is->avr = avresample_alloc_context();
1931                     if (!is->avr) {
1932                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1933                         break;
1934                     }
1935                 }
1936                 if (audio_resample) {
1937                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
1938                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
1939                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
1940                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1941                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
1942                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
1943
1944                     if ((ret = avresample_open(is->avr)) < 0) {
1945                         fprintf(stderr, "error initializing libavresample\n");
1946                         break;
1947                     }
1948                 }
1949                 is->resample_sample_fmt     = dec->sample_fmt;
1950                 is->resample_channel_layout = dec->channel_layout;
1951             }
1952
1953             if (audio_resample) {
1954                 void *tmp_out;
1955                 int out_samples, out_size, out_linesize;
1956                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1957                 int nb_samples = is->frame->nb_samples;
1958
1959                 out_size = av_samples_get_buffer_size(&out_linesize,
1960                                                       is->sdl_channels,
1961                                                       nb_samples,
1962                                                       is->sdl_sample_fmt, 0);
1963                 tmp_out = av_realloc(is->audio_buf1, out_size);
1964                 if (!tmp_out)
1965                     return AVERROR(ENOMEM);
1966                 is->audio_buf1 = tmp_out;
1967
1968                 out_samples = avresample_convert(is->avr,
1969                                                  (void **)&is->audio_buf1,
1970                                                  out_linesize, nb_samples,
1971                                                  (void **)is->frame->data,
1972                                                  is->frame->linesize[0],
1973                                                  is->frame->nb_samples);
1974                 if (out_samples < 0) {
1975                     fprintf(stderr, "avresample_convert() failed\n");
1976                     break;
1977                 }
1978                 is->audio_buf = is->audio_buf1;
1979                 data_size = out_samples * osize * is->sdl_channels;
1980             } else {
1981                 is->audio_buf = is->frame->data[0];
1982             }
1983
1984             /* if no pts, then compute it */
1985             pts = is->audio_clock;
1986             *pts_ptr = pts;
1987             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1988             is->audio_clock += (double)data_size /
1989                 (double)(n * dec->sample_rate);
1990 #ifdef DEBUG
1991             {
1992                 static double last_clock;
1993                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1994                        is->audio_clock - last_clock,
1995                        is->audio_clock, pts);
1996                 last_clock = is->audio_clock;
1997             }
1998 #endif
1999             return data_size;
2000         }
2001
2002         /* free the current packet */
2003         if (pkt->data)
2004             av_free_packet(pkt);
2005         memset(pkt_temp, 0, sizeof(*pkt_temp));
2006
2007         if (is->paused || is->audioq.abort_request) {
2008             return -1;
2009         }
2010
2011         /* read next packet */
2012         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2013             return -1;
2014
2015         if (pkt->data == flush_pkt.data) {
2016             avcodec_flush_buffers(dec);
2017             flush_complete = 0;
2018         }
2019
2020         *pkt_temp = *pkt;
2021
2022         /* if update the audio clock with the pts */
2023         if (pkt->pts != AV_NOPTS_VALUE) {
2024             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2025         }
2026     }
2027 }
2028
2029 /* prepare a new audio buffer */
2030 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2031 {
2032     VideoState *is = opaque;
2033     int audio_size, len1;
2034     double pts;
2035
2036     audio_callback_time = av_gettime();
2037
2038     while (len > 0) {
2039         if (is->audio_buf_index >= is->audio_buf_size) {
2040            audio_size = audio_decode_frame(is, &pts);
2041            if (audio_size < 0) {
2042                 /* if error, just output silence */
2043                is->audio_buf      = is->silence_buf;
2044                is->audio_buf_size = sizeof(is->silence_buf);
2045            } else {
2046                if (is->show_audio)
2047                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2048                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2049                                               pts);
2050                is->audio_buf_size = audio_size;
2051            }
2052            is->audio_buf_index = 0;
2053         }
2054         len1 = is->audio_buf_size - is->audio_buf_index;
2055         if (len1 > len)
2056             len1 = len;
2057         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2058         len -= len1;
2059         stream += len1;
2060         is->audio_buf_index += len1;
2061     }
2062 }
2063
2064 /* open a given stream. Return 0 if OK */
2065 static int stream_component_open(VideoState *is, int stream_index)
2066 {
2067     AVFormatContext *ic = is->ic;
2068     AVCodecContext *avctx;
2069     AVCodec *codec;
2070     SDL_AudioSpec wanted_spec, spec;
2071     AVDictionary *opts;
2072     AVDictionaryEntry *t = NULL;
2073
2074     if (stream_index < 0 || stream_index >= ic->nb_streams)
2075         return -1;
2076     avctx = ic->streams[stream_index]->codec;
2077
2078     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2079
2080     codec = avcodec_find_decoder(avctx->codec_id);
2081     avctx->debug_mv          = debug_mv;
2082     avctx->debug             = debug;
2083     avctx->workaround_bugs   = workaround_bugs;
2084     avctx->idct_algo         = idct;
2085     avctx->skip_frame        = skip_frame;
2086     avctx->skip_idct         = skip_idct;
2087     avctx->skip_loop_filter  = skip_loop_filter;
2088     avctx->error_concealment = error_concealment;
2089
2090     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2091
2092     if (!av_dict_get(opts, "threads", NULL, 0))
2093         av_dict_set(&opts, "threads", "auto", 0);
2094     if (!codec ||
2095         avcodec_open2(avctx, codec, &opts) < 0)
2096         return -1;
2097     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2098         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2099         return AVERROR_OPTION_NOT_FOUND;
2100     }
2101
2102     /* prepare audio output */
2103     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2104         wanted_spec.freq = avctx->sample_rate;
2105         wanted_spec.format = AUDIO_S16SYS;
2106
2107         if (!avctx->channel_layout)
2108             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2109         if (!avctx->channel_layout) {
2110             fprintf(stderr, "unable to guess channel layout\n");
2111             return -1;
2112         }
2113         if (avctx->channels == 1)
2114             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2115         else
2116             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2117         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2118
2119         wanted_spec.channels = is->sdl_channels;
2120         wanted_spec.silence = 0;
2121         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2122         wanted_spec.callback = sdl_audio_callback;
2123         wanted_spec.userdata = is;
2124         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2125             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2126             return -1;
2127         }
2128         is->audio_hw_buf_size = spec.size;
2129         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2130         is->resample_sample_fmt     = is->sdl_sample_fmt;
2131         is->resample_channel_layout = is->sdl_channel_layout;
2132     }
2133
2134     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2135     switch (avctx->codec_type) {
2136     case AVMEDIA_TYPE_AUDIO:
2137         is->audio_stream = stream_index;
2138         is->audio_st = ic->streams[stream_index];
2139         is->audio_buf_size  = 0;
2140         is->audio_buf_index = 0;
2141
2142         /* init averaging filter */
2143         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2144         is->audio_diff_avg_count = 0;
2145         /* since we do not have a precise anough audio fifo fullness,
2146            we correct audio sync only if larger than this threshold */
2147         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2148
2149         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2150         packet_queue_init(&is->audioq);
2151         SDL_PauseAudio(0);
2152         break;
2153     case AVMEDIA_TYPE_VIDEO:
2154         is->video_stream = stream_index;
2155         is->video_st = ic->streams[stream_index];
2156
2157         packet_queue_init(&is->videoq);
2158         is->video_tid = SDL_CreateThread(video_thread, is);
2159         break;
2160     case AVMEDIA_TYPE_SUBTITLE:
2161         is->subtitle_stream = stream_index;
2162         is->subtitle_st = ic->streams[stream_index];
2163         packet_queue_init(&is->subtitleq);
2164
2165         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2166         break;
2167     default:
2168         break;
2169     }
2170     return 0;
2171 }
2172
2173 static void stream_component_close(VideoState *is, int stream_index)
2174 {
2175     AVFormatContext *ic = is->ic;
2176     AVCodecContext *avctx;
2177
2178     if (stream_index < 0 || stream_index >= ic->nb_streams)
2179         return;
2180     avctx = ic->streams[stream_index]->codec;
2181
2182     switch (avctx->codec_type) {
2183     case AVMEDIA_TYPE_AUDIO:
2184         packet_queue_abort(&is->audioq);
2185
2186         SDL_CloseAudio();
2187
2188         packet_queue_end(&is->audioq);
2189         av_free_packet(&is->audio_pkt);
2190         if (is->avr)
2191             avresample_free(&is->avr);
2192         av_freep(&is->audio_buf1);
2193         is->audio_buf = NULL;
2194         av_freep(&is->frame);
2195
2196         if (is->rdft) {
2197             av_rdft_end(is->rdft);
2198             av_freep(&is->rdft_data);
2199             is->rdft = NULL;
2200             is->rdft_bits = 0;
2201         }
2202         break;
2203     case AVMEDIA_TYPE_VIDEO:
2204         packet_queue_abort(&is->videoq);
2205
2206         /* note: we also signal this mutex to make sure we deblock the
2207            video thread in all cases */
2208         SDL_LockMutex(is->pictq_mutex);
2209         SDL_CondSignal(is->pictq_cond);
2210         SDL_UnlockMutex(is->pictq_mutex);
2211
2212         SDL_WaitThread(is->video_tid, NULL);
2213
2214         packet_queue_end(&is->videoq);
2215         break;
2216     case AVMEDIA_TYPE_SUBTITLE:
2217         packet_queue_abort(&is->subtitleq);
2218
2219         /* note: we also signal this mutex to make sure we deblock the
2220            video thread in all cases */
2221         SDL_LockMutex(is->subpq_mutex);
2222         is->subtitle_stream_changed = 1;
2223
2224         SDL_CondSignal(is->subpq_cond);
2225         SDL_UnlockMutex(is->subpq_mutex);
2226
2227         SDL_WaitThread(is->subtitle_tid, NULL);
2228
2229         packet_queue_end(&is->subtitleq);
2230         break;
2231     default:
2232         break;
2233     }
2234
2235     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2236     avcodec_close(avctx);
2237 #if CONFIG_AVFILTER
2238     free_buffer_pool(&is->buffer_pool);
2239 #endif
2240     switch (avctx->codec_type) {
2241     case AVMEDIA_TYPE_AUDIO:
2242         is->audio_st = NULL;
2243         is->audio_stream = -1;
2244         break;
2245     case AVMEDIA_TYPE_VIDEO:
2246         is->video_st = NULL;
2247         is->video_stream = -1;
2248         break;
2249     case AVMEDIA_TYPE_SUBTITLE:
2250         is->subtitle_st = NULL;
2251         is->subtitle_stream = -1;
2252         break;
2253     default:
2254         break;
2255     }
2256 }
2257
2258 /* since we have only one decoding thread, we can use a global
2259    variable instead of a thread local variable */
2260 static VideoState *global_video_state;
2261
2262 static int decode_interrupt_cb(void *ctx)
2263 {
2264     return global_video_state && global_video_state->abort_request;
2265 }
2266
2267 /* this thread gets the stream from the disk or the network */
2268 static int decode_thread(void *arg)
2269 {
2270     VideoState *is = arg;
2271     AVFormatContext *ic = NULL;
2272     int err, i, ret;
2273     int st_index[AVMEDIA_TYPE_NB];
2274     AVPacket pkt1, *pkt = &pkt1;
2275     int eof = 0;
2276     int pkt_in_play_range = 0;
2277     AVDictionaryEntry *t;
2278     AVDictionary **opts;
2279     int orig_nb_streams;
2280
2281     memset(st_index, -1, sizeof(st_index));
2282     is->video_stream = -1;
2283     is->audio_stream = -1;
2284     is->subtitle_stream = -1;
2285
2286     global_video_state = is;
2287
2288     ic = avformat_alloc_context();
2289     ic->interrupt_callback.callback = decode_interrupt_cb;
2290     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2291     if (err < 0) {
2292         print_error(is->filename, err);
2293         ret = -1;
2294         goto fail;
2295     }
2296     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2297         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2298         ret = AVERROR_OPTION_NOT_FOUND;
2299         goto fail;
2300     }
2301     is->ic = ic;
2302
2303     if (genpts)
2304         ic->flags |= AVFMT_FLAG_GENPTS;
2305
2306     opts = setup_find_stream_info_opts(ic, codec_opts);
2307     orig_nb_streams = ic->nb_streams;
2308
2309     err = avformat_find_stream_info(ic, opts);
2310     if (err < 0) {
2311         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2312         ret = -1;
2313         goto fail;
2314     }
2315     for (i = 0; i < orig_nb_streams; i++)
2316         av_dict_free(&opts[i]);
2317     av_freep(&opts);
2318
2319     if (ic->pb)
2320         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2321
2322     if (seek_by_bytes < 0)
2323         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2324
2325     /* if seeking requested, we execute it */
2326     if (start_time != AV_NOPTS_VALUE) {
2327         int64_t timestamp;
2328
2329         timestamp = start_time;
2330         /* add the stream start time */
2331         if (ic->start_time != AV_NOPTS_VALUE)
2332             timestamp += ic->start_time;
2333         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2334         if (ret < 0) {
2335             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2336                     is->filename, (double)timestamp / AV_TIME_BASE);
2337         }
2338     }
2339
2340     for (i = 0; i < ic->nb_streams; i++)
2341         ic->streams[i]->discard = AVDISCARD_ALL;
2342     if (!video_disable)
2343         st_index[AVMEDIA_TYPE_VIDEO] =
2344             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2345                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2346     if (!audio_disable)
2347         st_index[AVMEDIA_TYPE_AUDIO] =
2348             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2349                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2350                                 st_index[AVMEDIA_TYPE_VIDEO],
2351                                 NULL, 0);
2352     if (!video_disable)
2353         st_index[AVMEDIA_TYPE_SUBTITLE] =
2354             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2355                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2356                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2357                                  st_index[AVMEDIA_TYPE_AUDIO] :
2358                                  st_index[AVMEDIA_TYPE_VIDEO]),
2359                                 NULL, 0);
2360     if (show_status) {
2361         av_dump_format(ic, 0, is->filename, 0);
2362     }
2363
2364     /* open the streams */
2365     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2366         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2367     }
2368
2369     ret = -1;
2370     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2371         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2372     }
2373     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2374     if (ret < 0) {
2375         if (!display_disable)
2376             is->show_audio = 2;
2377     }
2378
2379     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2380         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2381     }
2382
2383     if (is->video_stream < 0 && is->audio_stream < 0) {
2384         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2385         ret = -1;
2386         goto fail;
2387     }
2388
2389     for (;;) {
2390         if (is->abort_request)
2391             break;
2392         if (is->paused != is->last_paused) {
2393             is->last_paused = is->paused;
2394             if (is->paused)
2395                 is->read_pause_return = av_read_pause(ic);
2396             else
2397                 av_read_play(ic);
2398         }
2399 #if CONFIG_RTSP_DEMUXER
2400         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2401             /* wait 10 ms to avoid trying to get another packet */
2402             /* XXX: horrible */
2403             SDL_Delay(10);
2404             continue;
2405         }
2406 #endif
2407         if (is->seek_req) {
2408             int64_t seek_target = is->seek_pos;
2409             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2410             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2411 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2412 //      of the seek_pos/seek_rel variables
2413
2414             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2415             if (ret < 0) {
2416                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2417             } else {
2418                 if (is->audio_stream >= 0) {
2419                     packet_queue_flush(&is->audioq);
2420                     packet_queue_put(&is->audioq, &flush_pkt);
2421                 }
2422                 if (is->subtitle_stream >= 0) {
2423                     packet_queue_flush(&is->subtitleq);
2424                     packet_queue_put(&is->subtitleq, &flush_pkt);
2425                 }
2426                 if (is->video_stream >= 0) {
2427                     packet_queue_flush(&is->videoq);
2428                     packet_queue_put(&is->videoq, &flush_pkt);
2429                 }
2430             }
2431             is->seek_req = 0;
2432             eof = 0;
2433         }
2434
2435         /* if the queue are full, no need to read more */
2436         if (!infinite_buffer &&
2437               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2438             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2439                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2440                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2441             /* wait 10 ms */
2442             SDL_Delay(10);
2443             continue;
2444         }
2445         if (eof) {
2446             if (is->video_stream >= 0) {
2447                 av_init_packet(pkt);
2448                 pkt->data = NULL;
2449                 pkt->size = 0;
2450                 pkt->stream_index = is->video_stream;
2451                 packet_queue_put(&is->videoq, pkt);
2452             }
2453             if (is->audio_stream >= 0 &&
2454                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2455                 av_init_packet(pkt);
2456                 pkt->data = NULL;
2457                 pkt->size = 0;
2458                 pkt->stream_index = is->audio_stream;
2459                 packet_queue_put(&is->audioq, pkt);
2460             }
2461             SDL_Delay(10);
2462             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2463                 if (loop != 1 && (!loop || --loop)) {
2464                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2465                 } else if (autoexit) {
2466                     ret = AVERROR_EOF;
2467                     goto fail;
2468                 }
2469             }
2470             continue;
2471         }
2472         ret = av_read_frame(ic, pkt);
2473         if (ret < 0) {
2474             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2475                 eof = 1;
2476             if (ic->pb && ic->pb->error)
2477                 break;
2478             SDL_Delay(100); /* wait for user event */
2479             continue;
2480         }
2481         /* check if packet is in play range specified by user, then queue, otherwise discard */
2482         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2483                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2484                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2485                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2486                 <= ((double)duration / 1000000);
2487         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2488             packet_queue_put(&is->audioq, pkt);
2489         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2490             packet_queue_put(&is->videoq, pkt);
2491         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2492             packet_queue_put(&is->subtitleq, pkt);
2493         } else {
2494             av_free_packet(pkt);
2495         }
2496     }
2497     /* wait until the end */
2498     while (!is->abort_request) {
2499         SDL_Delay(100);
2500     }
2501
2502     ret = 0;
2503  fail:
2504     /* disable interrupting */
2505     global_video_state = NULL;
2506
2507     /* close each stream */
2508     if (is->audio_stream >= 0)
2509         stream_component_close(is, is->audio_stream);
2510     if (is->video_stream >= 0)
2511         stream_component_close(is, is->video_stream);
2512     if (is->subtitle_stream >= 0)
2513         stream_component_close(is, is->subtitle_stream);
2514     if (is->ic) {
2515         avformat_close_input(&is->ic);
2516     }
2517
2518     if (ret != 0) {
2519         SDL_Event event;
2520
2521         event.type = FF_QUIT_EVENT;
2522         event.user.data1 = is;
2523         SDL_PushEvent(&event);
2524     }
2525     return 0;
2526 }
2527
2528 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2529 {
2530     VideoState *is;
2531
2532     is = av_mallocz(sizeof(VideoState));
2533     if (!is)
2534         return NULL;
2535     av_strlcpy(is->filename, filename, sizeof(is->filename));
2536     is->iformat = iformat;
2537     is->ytop    = 0;
2538     is->xleft   = 0;
2539
2540     /* start video display */
2541     is->pictq_mutex = SDL_CreateMutex();
2542     is->pictq_cond  = SDL_CreateCond();
2543
2544     is->subpq_mutex = SDL_CreateMutex();
2545     is->subpq_cond  = SDL_CreateCond();
2546
2547     is->av_sync_type = av_sync_type;
2548     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2549     if (!is->parse_tid) {
2550         av_free(is);
2551         return NULL;
2552     }
2553     return is;
2554 }
2555
2556 static void stream_cycle_channel(VideoState *is, int codec_type)
2557 {
2558     AVFormatContext *ic = is->ic;
2559     int start_index, stream_index;
2560     AVStream *st;
2561
2562     if (codec_type == AVMEDIA_TYPE_VIDEO)
2563         start_index = is->video_stream;
2564     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2565         start_index = is->audio_stream;
2566     else
2567         start_index = is->subtitle_stream;
2568     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2569         return;
2570     stream_index = start_index;
2571     for (;;) {
2572         if (++stream_index >= is->ic->nb_streams)
2573         {
2574             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2575             {
2576                 stream_index = -1;
2577                 goto the_end;
2578             } else
2579                 stream_index = 0;
2580         }
2581         if (stream_index == start_index)
2582             return;
2583         st = ic->streams[stream_index];
2584         if (st->codec->codec_type == codec_type) {
2585             /* check that parameters are OK */
2586             switch (codec_type) {
2587             case AVMEDIA_TYPE_AUDIO:
2588                 if (st->codec->sample_rate != 0 &&
2589                     st->codec->channels != 0)
2590                     goto the_end;
2591                 break;
2592             case AVMEDIA_TYPE_VIDEO:
2593             case AVMEDIA_TYPE_SUBTITLE:
2594                 goto the_end;
2595             default:
2596                 break;
2597             }
2598         }
2599     }
2600  the_end:
2601     stream_component_close(is, start_index);
2602     stream_component_open(is, stream_index);
2603 }
2604
2605
2606 static void toggle_full_screen(void)
2607 {
2608 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2609     /* OS X needs to empty the picture_queue */
2610     int i;
2611     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2612         cur_stream->pictq[i].reallocate = 1;
2613 #endif
2614     is_full_screen = !is_full_screen;
2615     video_open(cur_stream);
2616 }
2617
2618 static void toggle_pause(void)
2619 {
2620     if (cur_stream)
2621         stream_pause(cur_stream);
2622     step = 0;
2623 }
2624
2625 static void step_to_next_frame(void)
2626 {
2627     if (cur_stream) {
2628         /* if the stream is paused unpause it, then step */
2629         if (cur_stream->paused)
2630             stream_pause(cur_stream);
2631     }
2632     step = 1;
2633 }
2634
2635 static void toggle_audio_display(void)
2636 {
2637     if (cur_stream) {
2638         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2639         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2640         fill_rectangle(screen,
2641                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2642                        bgcolor);
2643         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2644     }
2645 }
2646
2647 /* handle an event sent by the GUI */
2648 static void event_loop(void)
2649 {
2650     SDL_Event event;
2651     double incr, pos, frac;
2652
2653     for (;;) {
2654         double x;
2655         SDL_WaitEvent(&event);
2656         switch (event.type) {
2657         case SDL_KEYDOWN:
2658             if (exit_on_keydown) {
2659                 do_exit();
2660                 break;
2661             }
2662             switch (event.key.keysym.sym) {
2663             case SDLK_ESCAPE:
2664             case SDLK_q:
2665                 do_exit();
2666                 break;
2667             case SDLK_f:
2668                 toggle_full_screen();
2669                 break;
2670             case SDLK_p:
2671             case SDLK_SPACE:
2672                 toggle_pause();
2673                 break;
2674             case SDLK_s: // S: Step to next frame
2675                 step_to_next_frame();
2676                 break;
2677             case SDLK_a:
2678                 if (cur_stream)
2679                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2680                 break;
2681             case SDLK_v:
2682                 if (cur_stream)
2683                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2684                 break;
2685             case SDLK_t:
2686                 if (cur_stream)
2687                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2688                 break;
2689             case SDLK_w:
2690                 toggle_audio_display();
2691                 break;
2692             case SDLK_LEFT:
2693                 incr = -10.0;
2694                 goto do_seek;
2695             case SDLK_RIGHT:
2696                 incr = 10.0;
2697                 goto do_seek;
2698             case SDLK_UP:
2699                 incr = 60.0;
2700                 goto do_seek;
2701             case SDLK_DOWN:
2702                 incr = -60.0;
2703             do_seek:
2704                 if (cur_stream) {
2705                     if (seek_by_bytes) {
2706                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2707                             pos = cur_stream->video_current_pos;
2708                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2709                             pos = cur_stream->audio_pkt.pos;
2710                         } else
2711                             pos = avio_tell(cur_stream->ic->pb);
2712                         if (cur_stream->ic->bit_rate)
2713                             incr *= cur_stream->ic->bit_rate / 8.0;
2714                         else
2715                             incr *= 180000.0;
2716                         pos += incr;
2717                         stream_seek(cur_stream, pos, incr, 1);
2718                     } else {
2719                         pos = get_master_clock(cur_stream);
2720                         pos += incr;
2721                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2722                     }
2723                 }
2724                 break;
2725             default:
2726                 break;
2727             }
2728             break;
2729         case SDL_MOUSEBUTTONDOWN:
2730             if (exit_on_mousedown) {
2731                 do_exit();
2732                 break;
2733             }
2734         case SDL_MOUSEMOTION:
2735             if (event.type == SDL_MOUSEBUTTONDOWN) {
2736                 x = event.button.x;
2737             } else {
2738                 if (event.motion.state != SDL_PRESSED)
2739                     break;
2740                 x = event.motion.x;
2741             }
2742             if (cur_stream) {
2743                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2744                     uint64_t size =  avio_size(cur_stream->ic->pb);
2745                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2746                 } else {
2747                     int64_t ts;
2748                     int ns, hh, mm, ss;
2749                     int tns, thh, tmm, tss;
2750                     tns  = cur_stream->ic->duration / 1000000LL;
2751                     thh  = tns / 3600;
2752                     tmm  = (tns % 3600) / 60;
2753                     tss  = (tns % 60);
2754                     frac = x / cur_stream->width;
2755                     ns   = frac * tns;
2756                     hh   = ns / 3600;
2757                     mm   = (ns % 3600) / 60;
2758                     ss   = (ns % 60);
2759                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2760                             hh, mm, ss, thh, tmm, tss);
2761                     ts = frac * cur_stream->ic->duration;
2762                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2763                         ts += cur_stream->ic->start_time;
2764                     stream_seek(cur_stream, ts, 0, 0);
2765                 }
2766             }
2767             break;
2768         case SDL_VIDEORESIZE:
2769             if (cur_stream) {
2770                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2771                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2772                 screen_width  = cur_stream->width  = event.resize.w;
2773                 screen_height = cur_stream->height = event.resize.h;
2774             }
2775             break;
2776         case SDL_QUIT:
2777         case FF_QUIT_EVENT:
2778             do_exit();
2779             break;
2780         case FF_ALLOC_EVENT:
2781             video_open(event.user.data1);
2782             alloc_picture(event.user.data1);
2783             break;
2784         case FF_REFRESH_EVENT:
2785             video_refresh_timer(event.user.data1);
2786             cur_stream->refresh = 0;
2787             break;
2788         default:
2789             break;
2790         }
2791     }
2792 }
2793
2794 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2795 {
2796     av_log(NULL, AV_LOG_ERROR,
2797            "Option '%s' has been removed, use private format options instead\n", opt);
2798     return AVERROR(EINVAL);
2799 }
2800
2801 static int opt_width(void *optctx, const char *opt, const char *arg)
2802 {
2803     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2804     return 0;
2805 }
2806
2807 static int opt_height(void *optctx, const char *opt, const char *arg)
2808 {
2809     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2810     return 0;
2811 }
2812
2813 static int opt_format(void *optctx, const char *opt, const char *arg)
2814 {
2815     file_iformat = av_find_input_format(arg);
2816     if (!file_iformat) {
2817         fprintf(stderr, "Unknown input format: %s\n", arg);
2818         return AVERROR(EINVAL);
2819     }
2820     return 0;
2821 }
2822
2823 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2824 {
2825     av_log(NULL, AV_LOG_ERROR,
2826            "Option '%s' has been removed, use private format options instead\n", opt);
2827     return AVERROR(EINVAL);
2828 }
2829
2830 static int opt_sync(void *optctx, const char *opt, const char *arg)
2831 {
2832     if (!strcmp(arg, "audio"))
2833         av_sync_type = AV_SYNC_AUDIO_MASTER;
2834     else if (!strcmp(arg, "video"))
2835         av_sync_type = AV_SYNC_VIDEO_MASTER;
2836     else if (!strcmp(arg, "ext"))
2837         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2838     else {
2839         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2840         exit(1);
2841     }
2842     return 0;
2843 }
2844
2845 static int opt_seek(void *optctx, const char *opt, const char *arg)
2846 {
2847     start_time = parse_time_or_die(opt, arg, 1);
2848     return 0;
2849 }
2850
2851 static int opt_duration(void *optctx, const char *opt, const char *arg)
2852 {
2853     duration = parse_time_or_die(opt, arg, 1);
2854     return 0;
2855 }
2856
2857 static int opt_debug(void *optctx, const char *opt, const char *arg)
2858 {
2859     av_log_set_level(99);
2860     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2861     return 0;
2862 }
2863
2864 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2865 {
2866     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2867     return 0;
2868 }
2869
2870 static const OptionDef options[] = {
2871 #include "cmdutils_common_opts.h"
2872     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2873     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2874     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2875     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2876     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2877     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2878     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2879     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2880     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2881     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2882     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2883     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2884     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2885     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2886     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2887     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2888     { "debug", HAS_ARG | OPT_EXPERT, { .func_arg = opt_debug }, "print specific debug info", "" },
2889     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2890     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2891     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2892     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2893     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2894     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2895     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2896     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2897     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2898     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2899     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2900     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2901     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2902     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2903     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2904     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2905     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2906     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2907 #if CONFIG_AVFILTER
2908     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2909 #endif
2910     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2911     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2912     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2913     { NULL, },
2914 };
2915
2916 static void show_usage(void)
2917 {
2918     printf("Simple media player\n");
2919     printf("usage: %s [options] input_file\n", program_name);
2920     printf("\n");
2921 }
2922
2923 void show_help_default(const char *opt, const char *arg)
2924 {
2925     av_log_set_callback(log_callback_help);
2926     show_usage();
2927     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2928     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2929     printf("\n");
2930     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2931     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2932 #if !CONFIG_AVFILTER
2933     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2934 #endif
2935     printf("\nWhile playing:\n"
2936            "q, ESC              quit\n"
2937            "f                   toggle full screen\n"
2938            "p, SPC              pause\n"
2939            "a                   cycle audio channel\n"
2940            "v                   cycle video channel\n"
2941            "t                   cycle subtitle channel\n"
2942            "w                   show audio waves\n"
2943            "s                   activate frame-step mode\n"
2944            "left/right          seek backward/forward 10 seconds\n"
2945            "down/up             seek backward/forward 1 minute\n"
2946            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2947            );
2948 }
2949
2950 static void opt_input_file(void *optctx, const char *filename)
2951 {
2952     if (input_filename) {
2953         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2954                 filename, input_filename);
2955         exit(1);
2956     }
2957     if (!strcmp(filename, "-"))
2958         filename = "pipe:";
2959     input_filename = filename;
2960 }
2961
2962 /* Called from the main */
2963 int main(int argc, char **argv)
2964 {
2965     int flags;
2966
2967     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2968     parse_loglevel(argc, argv, options);
2969
2970     /* register all codecs, demux and protocols */
2971     avcodec_register_all();
2972 #if CONFIG_AVDEVICE
2973     avdevice_register_all();
2974 #endif
2975 #if CONFIG_AVFILTER
2976     avfilter_register_all();
2977 #endif
2978     av_register_all();
2979     avformat_network_init();
2980
2981     init_opts();
2982
2983     show_banner();
2984
2985     parse_options(NULL, argc, argv, options, opt_input_file);
2986
2987     if (!input_filename) {
2988         show_usage();
2989         fprintf(stderr, "An input file must be specified\n");
2990         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2991         exit(1);
2992     }
2993
2994     if (display_disable) {
2995         video_disable = 1;
2996     }
2997     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2998 #if !defined(__MINGW32__) && !defined(__APPLE__)
2999     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3000 #endif
3001     if (SDL_Init (flags)) {
3002         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3003         exit(1);
3004     }
3005
3006     if (!display_disable) {
3007 #if HAVE_SDL_VIDEO_SIZE
3008         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3009         fs_screen_width = vi->current_w;
3010         fs_screen_height = vi->current_h;
3011 #endif
3012     }
3013
3014     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3015     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3016     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3017
3018     av_init_packet(&flush_pkt);
3019     flush_pkt.data = "FLUSH";
3020
3021     cur_stream = stream_open(input_filename, file_iformat);
3022
3023     event_loop();
3024
3025     /* never returns */
3026
3027     return 0;
3028 }