]> git.sesse.net Git - ffmpeg/blob - avplay.c
configure: Use check_builtin() where appropriate
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int64_t sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;             // presentation timestamp for this picture
103     double target_clock;    // av_gettime() time at which this should be displayed ideally
104     int64_t pos;            // byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum AVPixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     int sdl_sample_rate;
168     enum AVSampleFormat resample_sample_fmt;
169     uint64_t resample_channel_layout;
170     int resample_sample_rate;
171     AVAudioResampleContext *avr;
172     AVFrame *frame;
173
174     int show_audio; /* if true, display audio samples */
175     int16_t sample_array[SAMPLE_ARRAY_SIZE];
176     int sample_array_index;
177     int last_i_start;
178     RDFTContext *rdft;
179     int rdft_bits;
180     FFTSample *rdft_data;
181     int xpos;
182
183     SDL_Thread *subtitle_tid;
184     int subtitle_stream;
185     int subtitle_stream_changed;
186     AVStream *subtitle_st;
187     PacketQueue subtitleq;
188     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
189     int subpq_size, subpq_rindex, subpq_windex;
190     SDL_mutex *subpq_mutex;
191     SDL_cond *subpq_cond;
192
193     double frame_timer;
194     double frame_last_pts;
195     double frame_last_delay;
196     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
197     int video_stream;
198     AVStream *video_st;
199     PacketQueue videoq;
200     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
201     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
202     int64_t video_current_pos;      // current displayed file pos
203     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204     int pictq_size, pictq_rindex, pictq_windex;
205     SDL_mutex *pictq_mutex;
206     SDL_cond *pictq_cond;
207 #if !CONFIG_AVFILTER
208     struct SwsContext *img_convert_ctx;
209 #endif
210
211     //    QETimer *video_timer;
212     char filename[1024];
213     int width, height, xleft, ytop;
214
215     PtsCorrectionContext pts_ctx;
216
217 #if CONFIG_AVFILTER
218     AVFilterContext *in_video_filter;   // the first filter in the video chain
219     AVFilterContext *out_video_filter;  // the last filter in the video chain
220     int use_dr1;
221     FrameBuffer *buffer_pool;
222 #endif
223
224     float skip_frames;
225     float skip_frames_index;
226     int refresh;
227 } VideoState;
228
229 /* options specified by the user */
230 static AVInputFormat *file_iformat;
231 static const char *input_filename;
232 static const char *window_title;
233 static int fs_screen_width;
234 static int fs_screen_height;
235 static int screen_width  = 0;
236 static int screen_height = 0;
237 static int audio_disable;
238 static int video_disable;
239 static int wanted_stream[AVMEDIA_TYPE_NB] = {
240     [AVMEDIA_TYPE_AUDIO]    = -1,
241     [AVMEDIA_TYPE_VIDEO]    = -1,
242     [AVMEDIA_TYPE_SUBTITLE] = -1,
243 };
244 static int seek_by_bytes = -1;
245 static int display_disable;
246 static int show_status = 1;
247 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
248 static int64_t start_time = AV_NOPTS_VALUE;
249 static int64_t duration = AV_NOPTS_VALUE;
250 static int debug = 0;
251 static int debug_mv = 0;
252 static int step = 0;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts = -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop = 1;
266 static int framedrop = 1;
267 static int infinite_buffer = 0;
268
269 static int rdftspeed = 20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for (;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414
415 #define RGBA_IN(r, g, b, a, s)\
416 {\
417     unsigned int v = ((const uint32_t *)(s))[0];\
418     a = (v >> 24) & 0xff;\
419     r = (v >> 16) & 0xff;\
420     g = (v >> 8) & 0xff;\
421     b = v & 0xff;\
422 }
423
424 #define YUVA_IN(y, u, v, a, s, pal)\
425 {\
426     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427     a = (val >> 24) & 0xff;\
428     y = (val >> 16) & 0xff;\
429     u = (val >> 8) & 0xff;\
430     v = val & 0xff;\
431 }
432
433 #define YUVA_OUT(d, y, u, v, a)\
434 {\
435     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436 }
437
438
439 #define BPP 1
440
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442 {
443     int wrap, wrap3, width2, skip2;
444     int y, u, v, a, u1, v1, a1, w, h;
445     uint8_t *lum, *cb, *cr;
446     const uint8_t *p;
447     const uint32_t *pal;
448     int dstx, dsty, dstw, dsth;
449
450     dstw = av_clip(rect->w, 0, imgw);
451     dsth = av_clip(rect->h, 0, imgh);
452     dstx = av_clip(rect->x, 0, imgw - dstw);
453     dsty = av_clip(rect->y, 0, imgh - dsth);
454     lum = dst->data[0] + dsty * dst->linesize[0];
455     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457
458     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459     skip2 = dstx >> 1;
460     wrap = dst->linesize[0];
461     wrap3 = rect->pict.linesize[0];
462     p = rect->pict.data[0];
463     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
464
465     if (dsty & 1) {
466         lum += dstx;
467         cb += skip2;
468         cr += skip2;
469
470         if (dstx & 1) {
471             YUVA_IN(y, u, v, a, p, pal);
472             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475             cb++;
476             cr++;
477             lum++;
478             p += BPP;
479         }
480         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
481             YUVA_IN(y, u, v, a, p, pal);
482             u1 = u;
483             v1 = v;
484             a1 = a;
485             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
487             YUVA_IN(y, u, v, a, p + BPP, pal);
488             u1 += u;
489             v1 += v;
490             a1 += a;
491             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494             cb++;
495             cr++;
496             p += 2 * BPP;
497             lum += 2;
498         }
499         if (w) {
500             YUVA_IN(y, u, v, a, p, pal);
501             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504             p++;
505             lum++;
506         }
507         p += wrap3 - dstw * BPP;
508         lum += wrap - dstw - dstx;
509         cb += dst->linesize[1] - width2 - skip2;
510         cr += dst->linesize[2] - width2 - skip2;
511     }
512     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
513         lum += dstx;
514         cb += skip2;
515         cr += skip2;
516
517         if (dstx & 1) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             p += wrap3;
524             lum += wrap;
525             YUVA_IN(y, u, v, a, p, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += -wrap3 + BPP;
535             lum += -wrap + 1;
536         }
537         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
538             YUVA_IN(y, u, v, a, p, pal);
539             u1 = u;
540             v1 = v;
541             a1 = a;
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544             YUVA_IN(y, u, v, a, p + BPP, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549             p += wrap3;
550             lum += wrap;
551
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
558             YUVA_IN(y, u, v, a, p + BPP, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566
567             cb++;
568             cr++;
569             p += -wrap3 + 2 * BPP;
570             lum += -wrap + 2;
571         }
572         if (w) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             p += wrap3;
579             lum += wrap;
580             YUVA_IN(y, u, v, a, p, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587             cb++;
588             cr++;
589             p += -wrap3 + BPP;
590             lum += -wrap + 1;
591         }
592         p += wrap3 + (wrap3 - dstw * BPP);
593         lum += wrap + (wrap - dstw - dstx);
594         cb += dst->linesize[1] - width2 - skip2;
595         cr += dst->linesize[2] - width2 - skip2;
596     }
597     /* handle odd height */
598     if (h) {
599         lum += dstx;
600         cb += skip2;
601         cr += skip2;
602
603         if (dstx & 1) {
604             YUVA_IN(y, u, v, a, p, pal);
605             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608             cb++;
609             cr++;
610             lum++;
611             p += BPP;
612         }
613         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
620             YUVA_IN(y, u, v, a, p + BPP, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627             cb++;
628             cr++;
629             p += 2 * BPP;
630             lum += 2;
631         }
632         if (w) {
633             YUVA_IN(y, u, v, a, p, pal);
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637         }
638     }
639 }
640
641 static void free_subpicture(SubPicture *sp)
642 {
643     avsubtitle_free(&sp->sub);
644 }
645
646 static void video_image_display(VideoState *is)
647 {
648     VideoPicture *vp;
649     SubPicture *sp;
650     AVPicture pict;
651     float aspect_ratio;
652     int width, height, x, y;
653     SDL_Rect rect;
654     int i;
655
656     vp = &is->pictq[is->pictq_rindex];
657     if (vp->bmp) {
658 #if CONFIG_AVFILTER
659          if (vp->picref->video->pixel_aspect.num == 0)
660              aspect_ratio = 0;
661          else
662              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
663 #else
664
665         /* XXX: use variable in the frame */
666         if (is->video_st->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
668         else if (is->video_st->codec->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
670         else
671             aspect_ratio = 0;
672 #endif
673         if (aspect_ratio <= 0.0)
674             aspect_ratio = 1.0;
675         aspect_ratio *= (float)vp->width / (float)vp->height;
676
677         if (is->subtitle_st)
678         {
679             if (is->subpq_size > 0)
680             {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
684                 {
685                     SDL_LockYUVOverlay (vp->bmp);
686
687                     pict.data[0] = vp->bmp->pixels[0];
688                     pict.data[1] = vp->bmp->pixels[2];
689                     pict.data[2] = vp->bmp->pixels[1];
690
691                     pict.linesize[0] = vp->bmp->pitches[0];
692                     pict.linesize[1] = vp->bmp->pitches[2];
693                     pict.linesize[2] = vp->bmp->pitches[1];
694
695                     for (i = 0; i < sp->sub.num_rects; i++)
696                         blend_subrect(&pict, sp->sub.rects[i],
697                                       vp->bmp->w, vp->bmp->h);
698
699                     SDL_UnlockYUVOverlay (vp->bmp);
700                 }
701             }
702         }
703
704
705         /* XXX: we suppose the screen has a 1.0 pixel ratio */
706         height = is->height;
707         width = ((int)rint(height * aspect_ratio)) & ~1;
708         if (width > is->width) {
709             width = is->width;
710             height = ((int)rint(width / aspect_ratio)) & ~1;
711         }
712         x = (is->width - width) / 2;
713         y = (is->height - height) / 2;
714         is->no_background = 0;
715         rect.x = is->xleft + x;
716         rect.y = is->ytop  + y;
717         rect.w = width;
718         rect.h = height;
719         SDL_DisplayYUVOverlay(vp->bmp, &rect);
720     }
721 }
722
723 /* get the current audio output buffer size, in samples. With SDL, we
724    cannot have a precise information */
725 static int audio_write_get_buf_size(VideoState *is)
726 {
727     return is->audio_buf_size - is->audio_buf_index;
728 }
729
730 static inline int compute_mod(int a, int b)
731 {
732     a = a % b;
733     if (a >= 0)
734         return a;
735     else
736         return a + b;
737 }
738
739 static void video_audio_display(VideoState *s)
740 {
741     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
742     int ch, channels, h, h2, bgcolor, fgcolor;
743     int16_t time_diff;
744     int rdft_bits, nb_freq;
745
746     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
747         ;
748     nb_freq = 1 << (rdft_bits - 1);
749
750     /* compute display index : center on currently output samples */
751     channels = s->sdl_channels;
752     nb_display_channels = channels;
753     if (!s->paused) {
754         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
755         n = 2 * channels;
756         delay = audio_write_get_buf_size(s);
757         delay /= n;
758
759         /* to be more precise, we take into account the time spent since
760            the last buffer computation */
761         if (audio_callback_time) {
762             time_diff = av_gettime() - audio_callback_time;
763             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
764         }
765
766         delay += 2 * data_used;
767         if (delay < data_used)
768             delay = data_used;
769
770         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
771         if (s->show_audio == 1) {
772             h = INT_MIN;
773             for (i = 0; i < 1000; i += channels) {
774                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
775                 int a = s->sample_array[idx];
776                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
777                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
778                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
779                 int score = a - d;
780                 if (h < score && (b ^ c) < 0) {
781                     h = score;
782                     i_start = idx;
783                 }
784             }
785         }
786
787         s->last_i_start = i_start;
788     } else {
789         i_start = s->last_i_start;
790     }
791
792     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
793     if (s->show_audio == 1) {
794         fill_rectangle(screen,
795                        s->xleft, s->ytop, s->width, s->height,
796                        bgcolor);
797
798         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
799
800         /* total height for one channel */
801         h = s->height / nb_display_channels;
802         /* graph height / 2 */
803         h2 = (h * 9) / 20;
804         for (ch = 0; ch < nb_display_channels; ch++) {
805             i = i_start + ch;
806             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
807             for (x = 0; x < s->width; x++) {
808                 y = (s->sample_array[i] * h2) >> 15;
809                 if (y < 0) {
810                     y = -y;
811                     ys = y1 - y;
812                 } else {
813                     ys = y1;
814                 }
815                 fill_rectangle(screen,
816                                s->xleft + x, ys, 1, y,
817                                fgcolor);
818                 i += channels;
819                 if (i >= SAMPLE_ARRAY_SIZE)
820                     i -= SAMPLE_ARRAY_SIZE;
821             }
822         }
823
824         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
825
826         for (ch = 1; ch < nb_display_channels; ch++) {
827             y = s->ytop + ch * h;
828             fill_rectangle(screen,
829                            s->xleft, y, s->width, 1,
830                            fgcolor);
831         }
832         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
833     } else {
834         nb_display_channels= FFMIN(nb_display_channels, 2);
835         if (rdft_bits != s->rdft_bits) {
836             av_rdft_end(s->rdft);
837             av_free(s->rdft_data);
838             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
839             s->rdft_bits = rdft_bits;
840             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
841         }
842         {
843             FFTSample *data[2];
844             for (ch = 0; ch < nb_display_channels; ch++) {
845                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
846                 i = i_start + ch;
847                 for (x = 0; x < 2 * nb_freq; x++) {
848                     double w = (x-nb_freq) * (1.0 / nb_freq);
849                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
850                     i += channels;
851                     if (i >= SAMPLE_ARRAY_SIZE)
852                         i -= SAMPLE_ARRAY_SIZE;
853                 }
854                 av_rdft_calc(s->rdft, data[ch]);
855             }
856             // least efficient way to do this, we should of course directly access it but its more than fast enough
857             for (y = 0; y < s->height; y++) {
858                 double w = 1 / sqrt(nb_freq);
859                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
860                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
861                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
862                 a = FFMIN(a, 255);
863                 b = FFMIN(b, 255);
864                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
865
866                 fill_rectangle(screen,
867                             s->xpos, s->height-y, 1, 1,
868                             fgcolor);
869             }
870         }
871         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
872         s->xpos++;
873         if (s->xpos >= s->width)
874             s->xpos= s->xleft;
875     }
876 }
877
878 static int video_open(VideoState *is)
879 {
880     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
881     int w,h;
882
883     if (is_full_screen) flags |= SDL_FULLSCREEN;
884     else                flags |= SDL_RESIZABLE;
885
886     if (is_full_screen && fs_screen_width) {
887         w = fs_screen_width;
888         h = fs_screen_height;
889     } else if (!is_full_screen && screen_width) {
890         w = screen_width;
891         h = screen_height;
892 #if CONFIG_AVFILTER
893     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
894         w = is->out_video_filter->inputs[0]->w;
895         h = is->out_video_filter->inputs[0]->h;
896 #else
897     } else if (is->video_st && is->video_st->codec->width) {
898         w = is->video_st->codec->width;
899         h = is->video_st->codec->height;
900 #endif
901     } else {
902         w = 640;
903         h = 480;
904     }
905     if (screen && is->width == screen->w && screen->w == w
906        && is->height== screen->h && screen->h == h)
907         return 0;
908
909 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
910     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
911     screen = SDL_SetVideoMode(w, h, 24, flags);
912 #else
913     screen = SDL_SetVideoMode(w, h, 0, flags);
914 #endif
915     if (!screen) {
916         fprintf(stderr, "SDL: could not set video mode - exiting\n");
917         return -1;
918     }
919     if (!window_title)
920         window_title = input_filename;
921     SDL_WM_SetCaption(window_title, window_title);
922
923     is->width  = screen->w;
924     is->height = screen->h;
925
926     return 0;
927 }
928
929 /* display the current picture, if any */
930 static void video_display(VideoState *is)
931 {
932     if (!screen)
933         video_open(cur_stream);
934     if (is->audio_st && is->show_audio)
935         video_audio_display(is);
936     else if (is->video_st)
937         video_image_display(is);
938 }
939
940 static int refresh_thread(void *opaque)
941 {
942     VideoState *is= opaque;
943     while (!is->abort_request) {
944         SDL_Event event;
945         event.type = FF_REFRESH_EVENT;
946         event.user.data1 = opaque;
947         if (!is->refresh) {
948             is->refresh = 1;
949             SDL_PushEvent(&event);
950         }
951         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
952     }
953     return 0;
954 }
955
956 /* get the current audio clock value */
957 static double get_audio_clock(VideoState *is)
958 {
959     double pts;
960     int hw_buf_size, bytes_per_sec;
961     pts = is->audio_clock;
962     hw_buf_size = audio_write_get_buf_size(is);
963     bytes_per_sec = 0;
964     if (is->audio_st) {
965         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
966                         av_get_bytes_per_sample(is->sdl_sample_fmt);
967     }
968     if (bytes_per_sec)
969         pts -= (double)hw_buf_size / bytes_per_sec;
970     return pts;
971 }
972
973 /* get the current video clock value */
974 static double get_video_clock(VideoState *is)
975 {
976     if (is->paused) {
977         return is->video_current_pts;
978     } else {
979         return is->video_current_pts_drift + av_gettime() / 1000000.0;
980     }
981 }
982
983 /* get the current external clock value */
984 static double get_external_clock(VideoState *is)
985 {
986     int64_t ti;
987     ti = av_gettime();
988     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
989 }
990
991 /* get the current master clock value */
992 static double get_master_clock(VideoState *is)
993 {
994     double val;
995
996     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
997         if (is->video_st)
998             val = get_video_clock(is);
999         else
1000             val = get_audio_clock(is);
1001     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1002         if (is->audio_st)
1003             val = get_audio_clock(is);
1004         else
1005             val = get_video_clock(is);
1006     } else {
1007         val = get_external_clock(is);
1008     }
1009     return val;
1010 }
1011
1012 /* seek in the stream */
1013 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1014 {
1015     if (!is->seek_req) {
1016         is->seek_pos = pos;
1017         is->seek_rel = rel;
1018         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1019         if (seek_by_bytes)
1020             is->seek_flags |= AVSEEK_FLAG_BYTE;
1021         is->seek_req = 1;
1022     }
1023 }
1024
1025 /* pause or resume the video */
1026 static void stream_pause(VideoState *is)
1027 {
1028     if (is->paused) {
1029         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1030         if (is->read_pause_return != AVERROR(ENOSYS)) {
1031             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1032         }
1033         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1034     }
1035     is->paused = !is->paused;
1036 }
1037
1038 static double compute_target_time(double frame_current_pts, VideoState *is)
1039 {
1040     double delay, sync_threshold, diff;
1041
1042     /* compute nominal delay */
1043     delay = frame_current_pts - is->frame_last_pts;
1044     if (delay <= 0 || delay >= 10.0) {
1045         /* if incorrect delay, use previous one */
1046         delay = is->frame_last_delay;
1047     } else {
1048         is->frame_last_delay = delay;
1049     }
1050     is->frame_last_pts = frame_current_pts;
1051
1052     /* update delay to follow master synchronisation source */
1053     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1054          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1055         /* if video is slave, we try to correct big delays by
1056            duplicating or deleting a frame */
1057         diff = get_video_clock(is) - get_master_clock(is);
1058
1059         /* skip or repeat frame. We take into account the
1060            delay to compute the threshold. I still don't know
1061            if it is the best guess */
1062         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1063         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1064             if (diff <= -sync_threshold)
1065                 delay = 0;
1066             else if (diff >= sync_threshold)
1067                 delay = 2 * delay;
1068         }
1069     }
1070     is->frame_timer += delay;
1071
1072     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1073             delay, frame_current_pts, -diff);
1074
1075     return is->frame_timer;
1076 }
1077
1078 /* called to display each frame */
1079 static void video_refresh_timer(void *opaque)
1080 {
1081     VideoState *is = opaque;
1082     VideoPicture *vp;
1083
1084     SubPicture *sp, *sp2;
1085
1086     if (is->video_st) {
1087 retry:
1088         if (is->pictq_size == 0) {
1089             // nothing to do, no picture to display in the que
1090         } else {
1091             double time = av_gettime() / 1000000.0;
1092             double next_target;
1093             /* dequeue the picture */
1094             vp = &is->pictq[is->pictq_rindex];
1095
1096             if (time < vp->target_clock)
1097                 return;
1098             /* update current video pts */
1099             is->video_current_pts = vp->pts;
1100             is->video_current_pts_drift = is->video_current_pts - time;
1101             is->video_current_pos = vp->pos;
1102             if (is->pictq_size > 1) {
1103                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1104                 assert(nextvp->target_clock >= vp->target_clock);
1105                 next_target= nextvp->target_clock;
1106             } else {
1107                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1108             }
1109             if (framedrop && time > next_target) {
1110                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1111                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1112                     /* update queue size and signal for next picture */
1113                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114                         is->pictq_rindex = 0;
1115
1116                     SDL_LockMutex(is->pictq_mutex);
1117                     is->pictq_size--;
1118                     SDL_CondSignal(is->pictq_cond);
1119                     SDL_UnlockMutex(is->pictq_mutex);
1120                     goto retry;
1121                 }
1122             }
1123
1124             if (is->subtitle_st) {
1125                 if (is->subtitle_stream_changed) {
1126                     SDL_LockMutex(is->subpq_mutex);
1127
1128                     while (is->subpq_size) {
1129                         free_subpicture(&is->subpq[is->subpq_rindex]);
1130
1131                         /* update queue size and signal for next picture */
1132                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1133                             is->subpq_rindex = 0;
1134
1135                         is->subpq_size--;
1136                     }
1137                     is->subtitle_stream_changed = 0;
1138
1139                     SDL_CondSignal(is->subpq_cond);
1140                     SDL_UnlockMutex(is->subpq_mutex);
1141                 } else {
1142                     if (is->subpq_size > 0) {
1143                         sp = &is->subpq[is->subpq_rindex];
1144
1145                         if (is->subpq_size > 1)
1146                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1147                         else
1148                             sp2 = NULL;
1149
1150                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1151                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1152                         {
1153                             free_subpicture(sp);
1154
1155                             /* update queue size and signal for next picture */
1156                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1157                                 is->subpq_rindex = 0;
1158
1159                             SDL_LockMutex(is->subpq_mutex);
1160                             is->subpq_size--;
1161                             SDL_CondSignal(is->subpq_cond);
1162                             SDL_UnlockMutex(is->subpq_mutex);
1163                         }
1164                     }
1165                 }
1166             }
1167
1168             /* display picture */
1169             if (!display_disable)
1170                 video_display(is);
1171
1172             /* update queue size and signal for next picture */
1173             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1174                 is->pictq_rindex = 0;
1175
1176             SDL_LockMutex(is->pictq_mutex);
1177             is->pictq_size--;
1178             SDL_CondSignal(is->pictq_cond);
1179             SDL_UnlockMutex(is->pictq_mutex);
1180         }
1181     } else if (is->audio_st) {
1182         /* draw the next audio frame */
1183
1184         /* if only audio stream, then display the audio bars (better
1185            than nothing, just to test the implementation */
1186
1187         /* display picture */
1188         if (!display_disable)
1189             video_display(is);
1190     }
1191     if (show_status) {
1192         static int64_t last_time;
1193         int64_t cur_time;
1194         int aqsize, vqsize, sqsize;
1195         double av_diff;
1196
1197         cur_time = av_gettime();
1198         if (!last_time || (cur_time - last_time) >= 30000) {
1199             aqsize = 0;
1200             vqsize = 0;
1201             sqsize = 0;
1202             if (is->audio_st)
1203                 aqsize = is->audioq.size;
1204             if (is->video_st)
1205                 vqsize = is->videoq.size;
1206             if (is->subtitle_st)
1207                 sqsize = is->subtitleq.size;
1208             av_diff = 0;
1209             if (is->audio_st && is->video_st)
1210                 av_diff = get_audio_clock(is) - get_video_clock(is);
1211             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1212                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1213                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1214             fflush(stdout);
1215             last_time = cur_time;
1216         }
1217     }
1218 }
1219
1220 static void stream_close(VideoState *is)
1221 {
1222     VideoPicture *vp;
1223     int i;
1224     /* XXX: use a special url_shutdown call to abort parse cleanly */
1225     is->abort_request = 1;
1226     SDL_WaitThread(is->parse_tid, NULL);
1227     SDL_WaitThread(is->refresh_tid, NULL);
1228
1229     /* free all pictures */
1230     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1231         vp = &is->pictq[i];
1232 #if CONFIG_AVFILTER
1233         avfilter_unref_bufferp(&vp->picref);
1234 #endif
1235         if (vp->bmp) {
1236             SDL_FreeYUVOverlay(vp->bmp);
1237             vp->bmp = NULL;
1238         }
1239     }
1240     SDL_DestroyMutex(is->pictq_mutex);
1241     SDL_DestroyCond(is->pictq_cond);
1242     SDL_DestroyMutex(is->subpq_mutex);
1243     SDL_DestroyCond(is->subpq_cond);
1244 #if !CONFIG_AVFILTER
1245     if (is->img_convert_ctx)
1246         sws_freeContext(is->img_convert_ctx);
1247 #endif
1248     av_free(is);
1249 }
1250
1251 static void do_exit(void)
1252 {
1253     if (cur_stream) {
1254         stream_close(cur_stream);
1255         cur_stream = NULL;
1256     }
1257     uninit_opts();
1258 #if CONFIG_AVFILTER
1259     avfilter_uninit();
1260 #endif
1261     avformat_network_deinit();
1262     if (show_status)
1263         printf("\n");
1264     SDL_Quit();
1265     av_log(NULL, AV_LOG_QUIET, "");
1266     exit(0);
1267 }
1268
1269 /* allocate a picture (needs to do that in main thread to avoid
1270    potential locking problems */
1271 static void alloc_picture(void *opaque)
1272 {
1273     VideoState *is = opaque;
1274     VideoPicture *vp;
1275
1276     vp = &is->pictq[is->pictq_windex];
1277
1278     if (vp->bmp)
1279         SDL_FreeYUVOverlay(vp->bmp);
1280
1281 #if CONFIG_AVFILTER
1282     avfilter_unref_bufferp(&vp->picref);
1283
1284     vp->width   = is->out_video_filter->inputs[0]->w;
1285     vp->height  = is->out_video_filter->inputs[0]->h;
1286     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1287 #else
1288     vp->width   = is->video_st->codec->width;
1289     vp->height  = is->video_st->codec->height;
1290     vp->pix_fmt = is->video_st->codec->pix_fmt;
1291 #endif
1292
1293     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1294                                    SDL_YV12_OVERLAY,
1295                                    screen);
1296     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1297         /* SDL allocates a buffer smaller than requested if the video
1298          * overlay hardware is unable to support the requested size. */
1299         fprintf(stderr, "Error: the video system does not support an image\n"
1300                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1301                         "to reduce the image size.\n", vp->width, vp->height );
1302         do_exit();
1303     }
1304
1305     SDL_LockMutex(is->pictq_mutex);
1306     vp->allocated = 1;
1307     SDL_CondSignal(is->pictq_cond);
1308     SDL_UnlockMutex(is->pictq_mutex);
1309 }
1310
1311 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1312  * guessed if not known. */
1313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1314 {
1315     VideoPicture *vp;
1316 #if CONFIG_AVFILTER
1317     AVPicture pict_src;
1318 #else
1319     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1320 #endif
1321     /* wait until we have space to put a new picture */
1322     SDL_LockMutex(is->pictq_mutex);
1323
1324     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1325         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     /* alloc or resize hardware picture buffer */
1339     if (!vp->bmp || vp->reallocate ||
1340 #if CONFIG_AVFILTER
1341         vp->width  != is->out_video_filter->inputs[0]->w ||
1342         vp->height != is->out_video_filter->inputs[0]->h) {
1343 #else
1344         vp->width != is->video_st->codec->width ||
1345         vp->height != is->video_st->codec->height) {
1346 #endif
1347         SDL_Event event;
1348
1349         vp->allocated  = 0;
1350         vp->reallocate = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         SDL_UnlockMutex(is->pictq_mutex);
1364
1365         if (is->videoq.abort_request)
1366             return -1;
1367     }
1368
1369     /* if the frame is not skipped, then display it */
1370     if (vp->bmp) {
1371         AVPicture pict = { { 0 } };
1372 #if CONFIG_AVFILTER
1373         avfilter_unref_bufferp(&vp->picref);
1374         vp->picref = src_frame->opaque;
1375 #endif
1376
1377         /* get a pointer on the bitmap */
1378         SDL_LockYUVOverlay (vp->bmp);
1379
1380         pict.data[0] = vp->bmp->pixels[0];
1381         pict.data[1] = vp->bmp->pixels[2];
1382         pict.data[2] = vp->bmp->pixels[1];
1383
1384         pict.linesize[0] = vp->bmp->pitches[0];
1385         pict.linesize[1] = vp->bmp->pitches[2];
1386         pict.linesize[2] = vp->bmp->pitches[1];
1387
1388 #if CONFIG_AVFILTER
1389         pict_src.data[0] = src_frame->data[0];
1390         pict_src.data[1] = src_frame->data[1];
1391         pict_src.data[2] = src_frame->data[2];
1392
1393         pict_src.linesize[0] = src_frame->linesize[0];
1394         pict_src.linesize[1] = src_frame->linesize[1];
1395         pict_src.linesize[2] = src_frame->linesize[2];
1396
1397         // FIXME use direct rendering
1398         av_picture_copy(&pict, &pict_src,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock = compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 /* Compute the exact PTS for the picture if it is omitted in the stream.
1431  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1432 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1433 {
1434     double frame_delay, pts;
1435
1436     pts = pts1;
1437
1438     if (pts != 0) {
1439         /* update video clock with pts, if present */
1440         is->video_clock = pts;
1441     } else {
1442         pts = is->video_clock;
1443     }
1444     /* update video clock for next frame */
1445     frame_delay = av_q2d(is->video_st->codec->time_base);
1446     /* for MPEG2, the frame can be repeated, so we update the
1447        clock accordingly */
1448     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1449     is->video_clock += frame_delay;
1450
1451     return queue_picture(is, src_frame, pts, pos);
1452 }
1453
1454 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1455 {
1456     int got_picture, i;
1457
1458     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1459         return -1;
1460
1461     if (pkt->data == flush_pkt.data) {
1462         avcodec_flush_buffers(is->video_st->codec);
1463
1464         SDL_LockMutex(is->pictq_mutex);
1465         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1466         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1467             is->pictq[i].target_clock= 0;
1468         }
1469         while (is->pictq_size && !is->videoq.abort_request) {
1470             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1471         }
1472         is->video_current_pos = -1;
1473         SDL_UnlockMutex(is->pictq_mutex);
1474
1475         init_pts_correction(&is->pts_ctx);
1476         is->frame_last_pts = AV_NOPTS_VALUE;
1477         is->frame_last_delay = 0;
1478         is->frame_timer = (double)av_gettime() / 1000000.0;
1479         is->skip_frames = 1;
1480         is->skip_frames_index = 0;
1481         return 0;
1482     }
1483
1484     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1485
1486     if (got_picture) {
1487         if (decoder_reorder_pts == -1) {
1488             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1489         } else if (decoder_reorder_pts) {
1490             *pts = frame->pkt_pts;
1491         } else {
1492             *pts = frame->pkt_dts;
1493         }
1494
1495         if (*pts == AV_NOPTS_VALUE) {
1496             *pts = 0;
1497         }
1498         if (is->video_st->sample_aspect_ratio.num) {
1499             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1500         }
1501
1502         is->skip_frames_index += 1;
1503         if (is->skip_frames_index >= is->skip_frames) {
1504             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1505             return 1;
1506         }
1507
1508     }
1509     return 0;
1510 }
1511
1512 #if CONFIG_AVFILTER
1513 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1514 {
1515     char sws_flags_str[128];
1516     char buffersrc_args[256];
1517     int ret;
1518     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1519     AVCodecContext *codec = is->video_st->codec;
1520
1521     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1522     graph->scale_sws_opts = av_strdup(sws_flags_str);
1523
1524     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1525              codec->width, codec->height, codec->pix_fmt,
1526              is->video_st->time_base.num, is->video_st->time_base.den,
1527              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1528
1529
1530     if ((ret = avfilter_graph_create_filter(&filt_src,
1531                                             avfilter_get_by_name("buffer"),
1532                                             "src", buffersrc_args, NULL,
1533                                             graph)) < 0)
1534         return ret;
1535     if ((ret = avfilter_graph_create_filter(&filt_out,
1536                                             avfilter_get_by_name("buffersink"),
1537                                             "out", NULL, NULL, graph)) < 0)
1538         return ret;
1539
1540     if ((ret = avfilter_graph_create_filter(&filt_format,
1541                                             avfilter_get_by_name("format"),
1542                                             "format", "yuv420p", NULL, graph)) < 0)
1543         return ret;
1544     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1545         return ret;
1546
1547
1548     if (vfilters) {
1549         AVFilterInOut *outputs = avfilter_inout_alloc();
1550         AVFilterInOut *inputs  = avfilter_inout_alloc();
1551
1552         outputs->name    = av_strdup("in");
1553         outputs->filter_ctx = filt_src;
1554         outputs->pad_idx = 0;
1555         outputs->next    = NULL;
1556
1557         inputs->name    = av_strdup("out");
1558         inputs->filter_ctx = filt_format;
1559         inputs->pad_idx = 0;
1560         inputs->next    = NULL;
1561
1562         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1563             return ret;
1564     } else {
1565         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1566             return ret;
1567     }
1568
1569     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1570         return ret;
1571
1572     is->in_video_filter  = filt_src;
1573     is->out_video_filter = filt_out;
1574
1575     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1576         is->use_dr1 = 1;
1577         codec->get_buffer     = codec_get_buffer;
1578         codec->release_buffer = codec_release_buffer;
1579         codec->opaque         = &is->buffer_pool;
1580     }
1581
1582     return ret;
1583 }
1584
1585 #endif  /* CONFIG_AVFILTER */
1586
1587 static int video_thread(void *arg)
1588 {
1589     AVPacket pkt = { 0 };
1590     VideoState *is = arg;
1591     AVFrame *frame = avcodec_alloc_frame();
1592     int64_t pts_int;
1593     double pts;
1594     int ret;
1595
1596 #if CONFIG_AVFILTER
1597     AVFilterGraph *graph = avfilter_graph_alloc();
1598     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1599     int64_t pos;
1600     int last_w = is->video_st->codec->width;
1601     int last_h = is->video_st->codec->height;
1602
1603     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1604         goto the_end;
1605     filt_in  = is->in_video_filter;
1606     filt_out = is->out_video_filter;
1607 #endif
1608
1609     for (;;) {
1610 #if CONFIG_AVFILTER
1611         AVFilterBufferRef *picref;
1612         AVRational tb;
1613 #endif
1614         while (is->paused && !is->videoq.abort_request)
1615             SDL_Delay(10);
1616
1617         av_free_packet(&pkt);
1618
1619         ret = get_video_frame(is, frame, &pts_int, &pkt);
1620         if (ret < 0)
1621             goto the_end;
1622
1623         if (!ret)
1624             continue;
1625
1626 #if CONFIG_AVFILTER
1627         if (   last_w != is->video_st->codec->width
1628             || last_h != is->video_st->codec->height) {
1629             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1630                     is->video_st->codec->width, is->video_st->codec->height);
1631             avfilter_graph_free(&graph);
1632             graph = avfilter_graph_alloc();
1633             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1634                 goto the_end;
1635             filt_in  = is->in_video_filter;
1636             filt_out = is->out_video_filter;
1637             last_w = is->video_st->codec->width;
1638             last_h = is->video_st->codec->height;
1639         }
1640
1641         frame->pts = pts_int;
1642         if (is->use_dr1) {
1643             FrameBuffer      *buf = frame->opaque;
1644             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1645                                         frame->data, frame->linesize,
1646                                         AV_PERM_READ | AV_PERM_PRESERVE,
1647                                         frame->width, frame->height,
1648                                         frame->format);
1649
1650             avfilter_copy_frame_props(fb, frame);
1651             fb->buf->priv           = buf;
1652             fb->buf->free           = filter_release_buffer;
1653
1654             buf->refcount++;
1655             av_buffersrc_buffer(filt_in, fb);
1656
1657         } else
1658             av_buffersrc_write_frame(filt_in, frame);
1659
1660         while (ret >= 0) {
1661             ret = av_buffersink_read(filt_out, &picref);
1662             if (ret < 0) {
1663                 ret = 0;
1664                 break;
1665             }
1666
1667             avfilter_copy_buf_props(frame, picref);
1668
1669             pts_int = picref->pts;
1670             tb      = filt_out->inputs[0]->time_base;
1671             pos     = picref->pos;
1672             frame->opaque = picref;
1673
1674             if (av_cmp_q(tb, is->video_st->time_base)) {
1675                 av_unused int64_t pts1 = pts_int;
1676                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1677                 av_dlog(NULL, "video_thread(): "
1678                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1679                         tb.num, tb.den, pts1,
1680                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1681             }
1682             pts = pts_int * av_q2d(is->video_st->time_base);
1683             ret = output_picture2(is, frame, pts, pos);
1684         }
1685 #else
1686         pts = pts_int * av_q2d(is->video_st->time_base);
1687         ret = output_picture2(is, frame, pts,  pkt.pos);
1688 #endif
1689
1690         if (ret < 0)
1691             goto the_end;
1692
1693         if (step)
1694             if (cur_stream)
1695                 stream_pause(cur_stream);
1696     }
1697  the_end:
1698 #if CONFIG_AVFILTER
1699     av_freep(&vfilters);
1700     avfilter_graph_free(&graph);
1701 #endif
1702     av_free_packet(&pkt);
1703     avcodec_free_frame(&frame);
1704     return 0;
1705 }
1706
1707 static int subtitle_thread(void *arg)
1708 {
1709     VideoState *is = arg;
1710     SubPicture *sp;
1711     AVPacket pkt1, *pkt = &pkt1;
1712     int got_subtitle;
1713     double pts;
1714     int i, j;
1715     int r, g, b, y, u, v, a;
1716
1717     for (;;) {
1718         while (is->paused && !is->subtitleq.abort_request) {
1719             SDL_Delay(10);
1720         }
1721         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1722             break;
1723
1724         if (pkt->data == flush_pkt.data) {
1725             avcodec_flush_buffers(is->subtitle_st->codec);
1726             continue;
1727         }
1728         SDL_LockMutex(is->subpq_mutex);
1729         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1730                !is->subtitleq.abort_request) {
1731             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1732         }
1733         SDL_UnlockMutex(is->subpq_mutex);
1734
1735         if (is->subtitleq.abort_request)
1736             return 0;
1737
1738         sp = &is->subpq[is->subpq_windex];
1739
1740        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1741            this packet, if any */
1742         pts = 0;
1743         if (pkt->pts != AV_NOPTS_VALUE)
1744             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1745
1746         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1747                                  &got_subtitle, pkt);
1748
1749         if (got_subtitle && sp->sub.format == 0) {
1750             sp->pts = pts;
1751
1752             for (i = 0; i < sp->sub.num_rects; i++)
1753             {
1754                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1755                 {
1756                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1757                     y = RGB_TO_Y_CCIR(r, g, b);
1758                     u = RGB_TO_U_CCIR(r, g, b, 0);
1759                     v = RGB_TO_V_CCIR(r, g, b, 0);
1760                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1761                 }
1762             }
1763
1764             /* now we can update the picture count */
1765             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1766                 is->subpq_windex = 0;
1767             SDL_LockMutex(is->subpq_mutex);
1768             is->subpq_size++;
1769             SDL_UnlockMutex(is->subpq_mutex);
1770         }
1771         av_free_packet(pkt);
1772     }
1773     return 0;
1774 }
1775
1776 /* copy samples for viewing in editor window */
1777 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1778 {
1779     int size, len;
1780
1781     size = samples_size / sizeof(short);
1782     while (size > 0) {
1783         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1784         if (len > size)
1785             len = size;
1786         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1787         samples += len;
1788         is->sample_array_index += len;
1789         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1790             is->sample_array_index = 0;
1791         size -= len;
1792     }
1793 }
1794
1795 /* return the new audio buffer size (samples can be added or deleted
1796    to get better sync if video or external master clock) */
1797 static int synchronize_audio(VideoState *is, short *samples,
1798                              int samples_size1, double pts)
1799 {
1800     int n, samples_size;
1801     double ref_clock;
1802
1803     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1804     samples_size = samples_size1;
1805
1806     /* if not master, then we try to remove or add samples to correct the clock */
1807     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1808          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1809         double diff, avg_diff;
1810         int wanted_size, min_size, max_size, nb_samples;
1811
1812         ref_clock = get_master_clock(is);
1813         diff = get_audio_clock(is) - ref_clock;
1814
1815         if (diff < AV_NOSYNC_THRESHOLD) {
1816             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1817             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1818                 /* not enough measures to have a correct estimate */
1819                 is->audio_diff_avg_count++;
1820             } else {
1821                 /* estimate the A-V difference */
1822                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1823
1824                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1825                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1826                     nb_samples = samples_size / n;
1827
1828                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1829                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1830                     if (wanted_size < min_size)
1831                         wanted_size = min_size;
1832                     else if (wanted_size > max_size)
1833                         wanted_size = max_size;
1834
1835                     /* add or remove samples to correction the synchro */
1836                     if (wanted_size < samples_size) {
1837                         /* remove samples */
1838                         samples_size = wanted_size;
1839                     } else if (wanted_size > samples_size) {
1840                         uint8_t *samples_end, *q;
1841                         int nb;
1842
1843                         /* add samples */
1844                         nb = (samples_size - wanted_size);
1845                         samples_end = (uint8_t *)samples + samples_size - n;
1846                         q = samples_end + n;
1847                         while (nb > 0) {
1848                             memcpy(q, samples_end, n);
1849                             q += n;
1850                             nb -= n;
1851                         }
1852                         samples_size = wanted_size;
1853                     }
1854                 }
1855                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1856                         diff, avg_diff, samples_size - samples_size1,
1857                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1858             }
1859         } else {
1860             /* too big difference : may be initial PTS errors, so
1861                reset A-V filter */
1862             is->audio_diff_avg_count = 0;
1863             is->audio_diff_cum       = 0;
1864         }
1865     }
1866
1867     return samples_size;
1868 }
1869
1870 /* decode one audio frame and returns its uncompressed size */
1871 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1872 {
1873     AVPacket *pkt_temp = &is->audio_pkt_temp;
1874     AVPacket *pkt = &is->audio_pkt;
1875     AVCodecContext *dec = is->audio_st->codec;
1876     int n, len1, data_size, got_frame;
1877     double pts;
1878     int new_packet = 0;
1879     int flush_complete = 0;
1880
1881     for (;;) {
1882         /* NOTE: the audio packet can contain several frames */
1883         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1884             int resample_changed, audio_resample;
1885
1886             if (!is->frame) {
1887                 if (!(is->frame = avcodec_alloc_frame()))
1888                     return AVERROR(ENOMEM);
1889             } else
1890                 avcodec_get_frame_defaults(is->frame);
1891
1892             if (flush_complete)
1893                 break;
1894             new_packet = 0;
1895             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1896             if (len1 < 0) {
1897                 /* if error, we skip the frame */
1898                 pkt_temp->size = 0;
1899                 break;
1900             }
1901
1902             pkt_temp->data += len1;
1903             pkt_temp->size -= len1;
1904
1905             if (!got_frame) {
1906                 /* stop sending empty packets if the decoder is finished */
1907                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1908                     flush_complete = 1;
1909                 continue;
1910             }
1911             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1912                                                    is->frame->nb_samples,
1913                                                    is->frame->format, 1);
1914
1915             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1916                              is->frame->channel_layout != is->sdl_channel_layout ||
1917                              is->frame->sample_rate    != is->sdl_sample_rate;
1918
1919             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1920                                is->frame->channel_layout != is->resample_channel_layout ||
1921                                is->frame->sample_rate    != is->resample_sample_rate;
1922
1923             if ((!is->avr && audio_resample) || resample_changed) {
1924                 int ret;
1925                 if (is->avr)
1926                     avresample_close(is->avr);
1927                 else if (audio_resample) {
1928                     is->avr = avresample_alloc_context();
1929                     if (!is->avr) {
1930                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1931                         break;
1932                     }
1933                 }
1934                 if (audio_resample) {
1935                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1936                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1937                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1938                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1939                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1940                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1941
1942                     if ((ret = avresample_open(is->avr)) < 0) {
1943                         fprintf(stderr, "error initializing libavresample\n");
1944                         break;
1945                     }
1946                 }
1947                 is->resample_sample_fmt     = is->frame->format;
1948                 is->resample_channel_layout = is->frame->channel_layout;
1949                 is->resample_sample_rate    = is->frame->sample_rate;
1950             }
1951
1952             if (audio_resample) {
1953                 void *tmp_out;
1954                 int out_samples, out_size, out_linesize;
1955                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1956                 int nb_samples = is->frame->nb_samples;
1957
1958                 out_size = av_samples_get_buffer_size(&out_linesize,
1959                                                       is->sdl_channels,
1960                                                       nb_samples,
1961                                                       is->sdl_sample_fmt, 0);
1962                 tmp_out = av_realloc(is->audio_buf1, out_size);
1963                 if (!tmp_out)
1964                     return AVERROR(ENOMEM);
1965                 is->audio_buf1 = tmp_out;
1966
1967                 out_samples = avresample_convert(is->avr,
1968                                                  &is->audio_buf1,
1969                                                  out_linesize, nb_samples,
1970                                                  is->frame->data,
1971                                                  is->frame->linesize[0],
1972                                                  is->frame->nb_samples);
1973                 if (out_samples < 0) {
1974                     fprintf(stderr, "avresample_convert() failed\n");
1975                     break;
1976                 }
1977                 is->audio_buf = is->audio_buf1;
1978                 data_size = out_samples * osize * is->sdl_channels;
1979             } else {
1980                 is->audio_buf = is->frame->data[0];
1981             }
1982
1983             /* if no pts, then compute it */
1984             pts = is->audio_clock;
1985             *pts_ptr = pts;
1986             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1987             is->audio_clock += (double)data_size /
1988                 (double)(n * is->sdl_sample_rate);
1989 #ifdef DEBUG
1990             {
1991                 static double last_clock;
1992                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1993                        is->audio_clock - last_clock,
1994                        is->audio_clock, pts);
1995                 last_clock = is->audio_clock;
1996             }
1997 #endif
1998             return data_size;
1999         }
2000
2001         /* free the current packet */
2002         if (pkt->data)
2003             av_free_packet(pkt);
2004         memset(pkt_temp, 0, sizeof(*pkt_temp));
2005
2006         if (is->paused || is->audioq.abort_request) {
2007             return -1;
2008         }
2009
2010         /* read next packet */
2011         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2012             return -1;
2013
2014         if (pkt->data == flush_pkt.data) {
2015             avcodec_flush_buffers(dec);
2016             flush_complete = 0;
2017         }
2018
2019         *pkt_temp = *pkt;
2020
2021         /* if update the audio clock with the pts */
2022         if (pkt->pts != AV_NOPTS_VALUE) {
2023             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2024         }
2025     }
2026 }
2027
2028 /* prepare a new audio buffer */
2029 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2030 {
2031     VideoState *is = opaque;
2032     int audio_size, len1;
2033     double pts;
2034
2035     audio_callback_time = av_gettime();
2036
2037     while (len > 0) {
2038         if (is->audio_buf_index >= is->audio_buf_size) {
2039            audio_size = audio_decode_frame(is, &pts);
2040            if (audio_size < 0) {
2041                 /* if error, just output silence */
2042                is->audio_buf      = is->silence_buf;
2043                is->audio_buf_size = sizeof(is->silence_buf);
2044            } else {
2045                if (is->show_audio)
2046                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2047                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2048                                               pts);
2049                is->audio_buf_size = audio_size;
2050            }
2051            is->audio_buf_index = 0;
2052         }
2053         len1 = is->audio_buf_size - is->audio_buf_index;
2054         if (len1 > len)
2055             len1 = len;
2056         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2057         len -= len1;
2058         stream += len1;
2059         is->audio_buf_index += len1;
2060     }
2061 }
2062
2063 /* open a given stream. Return 0 if OK */
2064 static int stream_component_open(VideoState *is, int stream_index)
2065 {
2066     AVFormatContext *ic = is->ic;
2067     AVCodecContext *avctx;
2068     AVCodec *codec;
2069     SDL_AudioSpec wanted_spec, spec;
2070     AVDictionary *opts;
2071     AVDictionaryEntry *t = NULL;
2072
2073     if (stream_index < 0 || stream_index >= ic->nb_streams)
2074         return -1;
2075     avctx = ic->streams[stream_index]->codec;
2076
2077     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2078
2079     codec = avcodec_find_decoder(avctx->codec_id);
2080     avctx->debug_mv          = debug_mv;
2081     avctx->debug             = debug;
2082     avctx->workaround_bugs   = workaround_bugs;
2083     avctx->idct_algo         = idct;
2084     avctx->skip_frame        = skip_frame;
2085     avctx->skip_idct         = skip_idct;
2086     avctx->skip_loop_filter  = skip_loop_filter;
2087     avctx->error_concealment = error_concealment;
2088
2089     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2090
2091     if (!av_dict_get(opts, "threads", NULL, 0))
2092         av_dict_set(&opts, "threads", "auto", 0);
2093     if (!codec ||
2094         avcodec_open2(avctx, codec, &opts) < 0)
2095         return -1;
2096     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2097         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2098         return AVERROR_OPTION_NOT_FOUND;
2099     }
2100
2101     /* prepare audio output */
2102     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2103         is->sdl_sample_rate = avctx->sample_rate;
2104
2105         if (!avctx->channel_layout)
2106             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2107         if (!avctx->channel_layout) {
2108             fprintf(stderr, "unable to guess channel layout\n");
2109             return -1;
2110         }
2111         if (avctx->channels == 1)
2112             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2113         else
2114             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2115         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2116
2117         wanted_spec.format = AUDIO_S16SYS;
2118         wanted_spec.freq = is->sdl_sample_rate;
2119         wanted_spec.channels = is->sdl_channels;
2120         wanted_spec.silence = 0;
2121         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2122         wanted_spec.callback = sdl_audio_callback;
2123         wanted_spec.userdata = is;
2124         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2125             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2126             return -1;
2127         }
2128         is->audio_hw_buf_size = spec.size;
2129         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2130         is->resample_sample_fmt     = is->sdl_sample_fmt;
2131         is->resample_channel_layout = avctx->channel_layout;
2132         is->resample_sample_rate    = avctx->sample_rate;
2133     }
2134
2135     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2136     switch (avctx->codec_type) {
2137     case AVMEDIA_TYPE_AUDIO:
2138         is->audio_stream = stream_index;
2139         is->audio_st = ic->streams[stream_index];
2140         is->audio_buf_size  = 0;
2141         is->audio_buf_index = 0;
2142
2143         /* init averaging filter */
2144         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2145         is->audio_diff_avg_count = 0;
2146         /* since we do not have a precise anough audio fifo fullness,
2147            we correct audio sync only if larger than this threshold */
2148         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2149
2150         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2151         packet_queue_init(&is->audioq);
2152         SDL_PauseAudio(0);
2153         break;
2154     case AVMEDIA_TYPE_VIDEO:
2155         is->video_stream = stream_index;
2156         is->video_st = ic->streams[stream_index];
2157
2158         packet_queue_init(&is->videoq);
2159         is->video_tid = SDL_CreateThread(video_thread, is);
2160         break;
2161     case AVMEDIA_TYPE_SUBTITLE:
2162         is->subtitle_stream = stream_index;
2163         is->subtitle_st = ic->streams[stream_index];
2164         packet_queue_init(&is->subtitleq);
2165
2166         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2167         break;
2168     default:
2169         break;
2170     }
2171     return 0;
2172 }
2173
2174 static void stream_component_close(VideoState *is, int stream_index)
2175 {
2176     AVFormatContext *ic = is->ic;
2177     AVCodecContext *avctx;
2178
2179     if (stream_index < 0 || stream_index >= ic->nb_streams)
2180         return;
2181     avctx = ic->streams[stream_index]->codec;
2182
2183     switch (avctx->codec_type) {
2184     case AVMEDIA_TYPE_AUDIO:
2185         packet_queue_abort(&is->audioq);
2186
2187         SDL_CloseAudio();
2188
2189         packet_queue_end(&is->audioq);
2190         av_free_packet(&is->audio_pkt);
2191         if (is->avr)
2192             avresample_free(&is->avr);
2193         av_freep(&is->audio_buf1);
2194         is->audio_buf = NULL;
2195         avcodec_free_frame(&is->frame);
2196
2197         if (is->rdft) {
2198             av_rdft_end(is->rdft);
2199             av_freep(&is->rdft_data);
2200             is->rdft = NULL;
2201             is->rdft_bits = 0;
2202         }
2203         break;
2204     case AVMEDIA_TYPE_VIDEO:
2205         packet_queue_abort(&is->videoq);
2206
2207         /* note: we also signal this mutex to make sure we deblock the
2208            video thread in all cases */
2209         SDL_LockMutex(is->pictq_mutex);
2210         SDL_CondSignal(is->pictq_cond);
2211         SDL_UnlockMutex(is->pictq_mutex);
2212
2213         SDL_WaitThread(is->video_tid, NULL);
2214
2215         packet_queue_end(&is->videoq);
2216         break;
2217     case AVMEDIA_TYPE_SUBTITLE:
2218         packet_queue_abort(&is->subtitleq);
2219
2220         /* note: we also signal this mutex to make sure we deblock the
2221            video thread in all cases */
2222         SDL_LockMutex(is->subpq_mutex);
2223         is->subtitle_stream_changed = 1;
2224
2225         SDL_CondSignal(is->subpq_cond);
2226         SDL_UnlockMutex(is->subpq_mutex);
2227
2228         SDL_WaitThread(is->subtitle_tid, NULL);
2229
2230         packet_queue_end(&is->subtitleq);
2231         break;
2232     default:
2233         break;
2234     }
2235
2236     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2237     avcodec_close(avctx);
2238 #if CONFIG_AVFILTER
2239     free_buffer_pool(&is->buffer_pool);
2240 #endif
2241     switch (avctx->codec_type) {
2242     case AVMEDIA_TYPE_AUDIO:
2243         is->audio_st = NULL;
2244         is->audio_stream = -1;
2245         break;
2246     case AVMEDIA_TYPE_VIDEO:
2247         is->video_st = NULL;
2248         is->video_stream = -1;
2249         break;
2250     case AVMEDIA_TYPE_SUBTITLE:
2251         is->subtitle_st = NULL;
2252         is->subtitle_stream = -1;
2253         break;
2254     default:
2255         break;
2256     }
2257 }
2258
2259 /* since we have only one decoding thread, we can use a global
2260    variable instead of a thread local variable */
2261 static VideoState *global_video_state;
2262
2263 static int decode_interrupt_cb(void *ctx)
2264 {
2265     return global_video_state && global_video_state->abort_request;
2266 }
2267
2268 /* this thread gets the stream from the disk or the network */
2269 static int decode_thread(void *arg)
2270 {
2271     VideoState *is = arg;
2272     AVFormatContext *ic = NULL;
2273     int err, i, ret;
2274     int st_index[AVMEDIA_TYPE_NB];
2275     AVPacket pkt1, *pkt = &pkt1;
2276     int eof = 0;
2277     int pkt_in_play_range = 0;
2278     AVDictionaryEntry *t;
2279     AVDictionary **opts;
2280     int orig_nb_streams;
2281
2282     memset(st_index, -1, sizeof(st_index));
2283     is->video_stream = -1;
2284     is->audio_stream = -1;
2285     is->subtitle_stream = -1;
2286
2287     global_video_state = is;
2288
2289     ic = avformat_alloc_context();
2290     ic->interrupt_callback.callback = decode_interrupt_cb;
2291     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2292     if (err < 0) {
2293         print_error(is->filename, err);
2294         ret = -1;
2295         goto fail;
2296     }
2297     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2298         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2299         ret = AVERROR_OPTION_NOT_FOUND;
2300         goto fail;
2301     }
2302     is->ic = ic;
2303
2304     if (genpts)
2305         ic->flags |= AVFMT_FLAG_GENPTS;
2306
2307     opts = setup_find_stream_info_opts(ic, codec_opts);
2308     orig_nb_streams = ic->nb_streams;
2309
2310     err = avformat_find_stream_info(ic, opts);
2311     if (err < 0) {
2312         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2313         ret = -1;
2314         goto fail;
2315     }
2316     for (i = 0; i < orig_nb_streams; i++)
2317         av_dict_free(&opts[i]);
2318     av_freep(&opts);
2319
2320     if (ic->pb)
2321         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2322
2323     if (seek_by_bytes < 0)
2324         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2325
2326     /* if seeking requested, we execute it */
2327     if (start_time != AV_NOPTS_VALUE) {
2328         int64_t timestamp;
2329
2330         timestamp = start_time;
2331         /* add the stream start time */
2332         if (ic->start_time != AV_NOPTS_VALUE)
2333             timestamp += ic->start_time;
2334         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2335         if (ret < 0) {
2336             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2337                     is->filename, (double)timestamp / AV_TIME_BASE);
2338         }
2339     }
2340
2341     for (i = 0; i < ic->nb_streams; i++)
2342         ic->streams[i]->discard = AVDISCARD_ALL;
2343     if (!video_disable)
2344         st_index[AVMEDIA_TYPE_VIDEO] =
2345             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2346                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2347     if (!audio_disable)
2348         st_index[AVMEDIA_TYPE_AUDIO] =
2349             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2350                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2351                                 st_index[AVMEDIA_TYPE_VIDEO],
2352                                 NULL, 0);
2353     if (!video_disable)
2354         st_index[AVMEDIA_TYPE_SUBTITLE] =
2355             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2356                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2357                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2358                                  st_index[AVMEDIA_TYPE_AUDIO] :
2359                                  st_index[AVMEDIA_TYPE_VIDEO]),
2360                                 NULL, 0);
2361     if (show_status) {
2362         av_dump_format(ic, 0, is->filename, 0);
2363     }
2364
2365     /* open the streams */
2366     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2367         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2368     }
2369
2370     ret = -1;
2371     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2372         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2373     }
2374     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2375     if (ret < 0) {
2376         if (!display_disable)
2377             is->show_audio = 2;
2378     }
2379
2380     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2381         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2382     }
2383
2384     if (is->video_stream < 0 && is->audio_stream < 0) {
2385         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2386         ret = -1;
2387         goto fail;
2388     }
2389
2390     for (;;) {
2391         if (is->abort_request)
2392             break;
2393         if (is->paused != is->last_paused) {
2394             is->last_paused = is->paused;
2395             if (is->paused)
2396                 is->read_pause_return = av_read_pause(ic);
2397             else
2398                 av_read_play(ic);
2399         }
2400 #if CONFIG_RTSP_DEMUXER
2401         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2402             /* wait 10 ms to avoid trying to get another packet */
2403             /* XXX: horrible */
2404             SDL_Delay(10);
2405             continue;
2406         }
2407 #endif
2408         if (is->seek_req) {
2409             int64_t seek_target = is->seek_pos;
2410             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2411             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2412 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2413 //      of the seek_pos/seek_rel variables
2414
2415             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2416             if (ret < 0) {
2417                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2418             } else {
2419                 if (is->audio_stream >= 0) {
2420                     packet_queue_flush(&is->audioq);
2421                     packet_queue_put(&is->audioq, &flush_pkt);
2422                 }
2423                 if (is->subtitle_stream >= 0) {
2424                     packet_queue_flush(&is->subtitleq);
2425                     packet_queue_put(&is->subtitleq, &flush_pkt);
2426                 }
2427                 if (is->video_stream >= 0) {
2428                     packet_queue_flush(&is->videoq);
2429                     packet_queue_put(&is->videoq, &flush_pkt);
2430                 }
2431             }
2432             is->seek_req = 0;
2433             eof = 0;
2434         }
2435
2436         /* if the queue are full, no need to read more */
2437         if (!infinite_buffer &&
2438               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2439             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2440                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2441                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2442             /* wait 10 ms */
2443             SDL_Delay(10);
2444             continue;
2445         }
2446         if (eof) {
2447             if (is->video_stream >= 0) {
2448                 av_init_packet(pkt);
2449                 pkt->data = NULL;
2450                 pkt->size = 0;
2451                 pkt->stream_index = is->video_stream;
2452                 packet_queue_put(&is->videoq, pkt);
2453             }
2454             if (is->audio_stream >= 0 &&
2455                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2456                 av_init_packet(pkt);
2457                 pkt->data = NULL;
2458                 pkt->size = 0;
2459                 pkt->stream_index = is->audio_stream;
2460                 packet_queue_put(&is->audioq, pkt);
2461             }
2462             SDL_Delay(10);
2463             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2464                 if (loop != 1 && (!loop || --loop)) {
2465                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2466                 } else if (autoexit) {
2467                     ret = AVERROR_EOF;
2468                     goto fail;
2469                 }
2470             }
2471             continue;
2472         }
2473         ret = av_read_frame(ic, pkt);
2474         if (ret < 0) {
2475             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2476                 eof = 1;
2477             if (ic->pb && ic->pb->error)
2478                 break;
2479             SDL_Delay(100); /* wait for user event */
2480             continue;
2481         }
2482         /* check if packet is in play range specified by user, then queue, otherwise discard */
2483         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2484                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2485                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2486                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2487                 <= ((double)duration / 1000000);
2488         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2489             packet_queue_put(&is->audioq, pkt);
2490         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2491             packet_queue_put(&is->videoq, pkt);
2492         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2493             packet_queue_put(&is->subtitleq, pkt);
2494         } else {
2495             av_free_packet(pkt);
2496         }
2497     }
2498     /* wait until the end */
2499     while (!is->abort_request) {
2500         SDL_Delay(100);
2501     }
2502
2503     ret = 0;
2504  fail:
2505     /* disable interrupting */
2506     global_video_state = NULL;
2507
2508     /* close each stream */
2509     if (is->audio_stream >= 0)
2510         stream_component_close(is, is->audio_stream);
2511     if (is->video_stream >= 0)
2512         stream_component_close(is, is->video_stream);
2513     if (is->subtitle_stream >= 0)
2514         stream_component_close(is, is->subtitle_stream);
2515     if (is->ic) {
2516         avformat_close_input(&is->ic);
2517     }
2518
2519     if (ret != 0) {
2520         SDL_Event event;
2521
2522         event.type = FF_QUIT_EVENT;
2523         event.user.data1 = is;
2524         SDL_PushEvent(&event);
2525     }
2526     return 0;
2527 }
2528
2529 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2530 {
2531     VideoState *is;
2532
2533     is = av_mallocz(sizeof(VideoState));
2534     if (!is)
2535         return NULL;
2536     av_strlcpy(is->filename, filename, sizeof(is->filename));
2537     is->iformat = iformat;
2538     is->ytop    = 0;
2539     is->xleft   = 0;
2540
2541     /* start video display */
2542     is->pictq_mutex = SDL_CreateMutex();
2543     is->pictq_cond  = SDL_CreateCond();
2544
2545     is->subpq_mutex = SDL_CreateMutex();
2546     is->subpq_cond  = SDL_CreateCond();
2547
2548     is->av_sync_type = av_sync_type;
2549     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2550     if (!is->parse_tid) {
2551         av_free(is);
2552         return NULL;
2553     }
2554     return is;
2555 }
2556
2557 static void stream_cycle_channel(VideoState *is, int codec_type)
2558 {
2559     AVFormatContext *ic = is->ic;
2560     int start_index, stream_index;
2561     AVStream *st;
2562
2563     if (codec_type == AVMEDIA_TYPE_VIDEO)
2564         start_index = is->video_stream;
2565     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2566         start_index = is->audio_stream;
2567     else
2568         start_index = is->subtitle_stream;
2569     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2570         return;
2571     stream_index = start_index;
2572     for (;;) {
2573         if (++stream_index >= is->ic->nb_streams)
2574         {
2575             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2576             {
2577                 stream_index = -1;
2578                 goto the_end;
2579             } else
2580                 stream_index = 0;
2581         }
2582         if (stream_index == start_index)
2583             return;
2584         st = ic->streams[stream_index];
2585         if (st->codec->codec_type == codec_type) {
2586             /* check that parameters are OK */
2587             switch (codec_type) {
2588             case AVMEDIA_TYPE_AUDIO:
2589                 if (st->codec->sample_rate != 0 &&
2590                     st->codec->channels != 0)
2591                     goto the_end;
2592                 break;
2593             case AVMEDIA_TYPE_VIDEO:
2594             case AVMEDIA_TYPE_SUBTITLE:
2595                 goto the_end;
2596             default:
2597                 break;
2598             }
2599         }
2600     }
2601  the_end:
2602     stream_component_close(is, start_index);
2603     stream_component_open(is, stream_index);
2604 }
2605
2606
2607 static void toggle_full_screen(void)
2608 {
2609 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2610     /* OS X needs to empty the picture_queue */
2611     int i;
2612     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2613         cur_stream->pictq[i].reallocate = 1;
2614 #endif
2615     is_full_screen = !is_full_screen;
2616     video_open(cur_stream);
2617 }
2618
2619 static void toggle_pause(void)
2620 {
2621     if (cur_stream)
2622         stream_pause(cur_stream);
2623     step = 0;
2624 }
2625
2626 static void step_to_next_frame(void)
2627 {
2628     if (cur_stream) {
2629         /* if the stream is paused unpause it, then step */
2630         if (cur_stream->paused)
2631             stream_pause(cur_stream);
2632     }
2633     step = 1;
2634 }
2635
2636 static void toggle_audio_display(void)
2637 {
2638     if (cur_stream) {
2639         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2640         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2641         fill_rectangle(screen,
2642                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2643                        bgcolor);
2644         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2645     }
2646 }
2647
2648 /* handle an event sent by the GUI */
2649 static void event_loop(void)
2650 {
2651     SDL_Event event;
2652     double incr, pos, frac;
2653
2654     for (;;) {
2655         double x;
2656         SDL_WaitEvent(&event);
2657         switch (event.type) {
2658         case SDL_KEYDOWN:
2659             if (exit_on_keydown) {
2660                 do_exit();
2661                 break;
2662             }
2663             switch (event.key.keysym.sym) {
2664             case SDLK_ESCAPE:
2665             case SDLK_q:
2666                 do_exit();
2667                 break;
2668             case SDLK_f:
2669                 toggle_full_screen();
2670                 break;
2671             case SDLK_p:
2672             case SDLK_SPACE:
2673                 toggle_pause();
2674                 break;
2675             case SDLK_s: // S: Step to next frame
2676                 step_to_next_frame();
2677                 break;
2678             case SDLK_a:
2679                 if (cur_stream)
2680                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2681                 break;
2682             case SDLK_v:
2683                 if (cur_stream)
2684                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2685                 break;
2686             case SDLK_t:
2687                 if (cur_stream)
2688                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2689                 break;
2690             case SDLK_w:
2691                 toggle_audio_display();
2692                 break;
2693             case SDLK_LEFT:
2694                 incr = -10.0;
2695                 goto do_seek;
2696             case SDLK_RIGHT:
2697                 incr = 10.0;
2698                 goto do_seek;
2699             case SDLK_UP:
2700                 incr = 60.0;
2701                 goto do_seek;
2702             case SDLK_DOWN:
2703                 incr = -60.0;
2704             do_seek:
2705                 if (cur_stream) {
2706                     if (seek_by_bytes) {
2707                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2708                             pos = cur_stream->video_current_pos;
2709                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2710                             pos = cur_stream->audio_pkt.pos;
2711                         } else
2712                             pos = avio_tell(cur_stream->ic->pb);
2713                         if (cur_stream->ic->bit_rate)
2714                             incr *= cur_stream->ic->bit_rate / 8.0;
2715                         else
2716                             incr *= 180000.0;
2717                         pos += incr;
2718                         stream_seek(cur_stream, pos, incr, 1);
2719                     } else {
2720                         pos = get_master_clock(cur_stream);
2721                         pos += incr;
2722                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2723                     }
2724                 }
2725                 break;
2726             default:
2727                 break;
2728             }
2729             break;
2730         case SDL_MOUSEBUTTONDOWN:
2731             if (exit_on_mousedown) {
2732                 do_exit();
2733                 break;
2734             }
2735         case SDL_MOUSEMOTION:
2736             if (event.type == SDL_MOUSEBUTTONDOWN) {
2737                 x = event.button.x;
2738             } else {
2739                 if (event.motion.state != SDL_PRESSED)
2740                     break;
2741                 x = event.motion.x;
2742             }
2743             if (cur_stream) {
2744                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2745                     uint64_t size =  avio_size(cur_stream->ic->pb);
2746                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2747                 } else {
2748                     int64_t ts;
2749                     int ns, hh, mm, ss;
2750                     int tns, thh, tmm, tss;
2751                     tns  = cur_stream->ic->duration / 1000000LL;
2752                     thh  = tns / 3600;
2753                     tmm  = (tns % 3600) / 60;
2754                     tss  = (tns % 60);
2755                     frac = x / cur_stream->width;
2756                     ns   = frac * tns;
2757                     hh   = ns / 3600;
2758                     mm   = (ns % 3600) / 60;
2759                     ss   = (ns % 60);
2760                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2761                             hh, mm, ss, thh, tmm, tss);
2762                     ts = frac * cur_stream->ic->duration;
2763                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2764                         ts += cur_stream->ic->start_time;
2765                     stream_seek(cur_stream, ts, 0, 0);
2766                 }
2767             }
2768             break;
2769         case SDL_VIDEORESIZE:
2770             if (cur_stream) {
2771                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2772                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2773                 screen_width  = cur_stream->width  = event.resize.w;
2774                 screen_height = cur_stream->height = event.resize.h;
2775             }
2776             break;
2777         case SDL_QUIT:
2778         case FF_QUIT_EVENT:
2779             do_exit();
2780             break;
2781         case FF_ALLOC_EVENT:
2782             video_open(event.user.data1);
2783             alloc_picture(event.user.data1);
2784             break;
2785         case FF_REFRESH_EVENT:
2786             video_refresh_timer(event.user.data1);
2787             cur_stream->refresh = 0;
2788             break;
2789         default:
2790             break;
2791         }
2792     }
2793 }
2794
2795 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2796 {
2797     av_log(NULL, AV_LOG_ERROR,
2798            "Option '%s' has been removed, use private format options instead\n", opt);
2799     return AVERROR(EINVAL);
2800 }
2801
2802 static int opt_width(void *optctx, const char *opt, const char *arg)
2803 {
2804     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2805     return 0;
2806 }
2807
2808 static int opt_height(void *optctx, const char *opt, const char *arg)
2809 {
2810     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2811     return 0;
2812 }
2813
2814 static int opt_format(void *optctx, const char *opt, const char *arg)
2815 {
2816     file_iformat = av_find_input_format(arg);
2817     if (!file_iformat) {
2818         fprintf(stderr, "Unknown input format: %s\n", arg);
2819         return AVERROR(EINVAL);
2820     }
2821     return 0;
2822 }
2823
2824 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2825 {
2826     av_log(NULL, AV_LOG_ERROR,
2827            "Option '%s' has been removed, use private format options instead\n", opt);
2828     return AVERROR(EINVAL);
2829 }
2830
2831 static int opt_sync(void *optctx, const char *opt, const char *arg)
2832 {
2833     if (!strcmp(arg, "audio"))
2834         av_sync_type = AV_SYNC_AUDIO_MASTER;
2835     else if (!strcmp(arg, "video"))
2836         av_sync_type = AV_SYNC_VIDEO_MASTER;
2837     else if (!strcmp(arg, "ext"))
2838         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2839     else {
2840         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2841         exit(1);
2842     }
2843     return 0;
2844 }
2845
2846 static int opt_seek(void *optctx, const char *opt, const char *arg)
2847 {
2848     start_time = parse_time_or_die(opt, arg, 1);
2849     return 0;
2850 }
2851
2852 static int opt_duration(void *optctx, const char *opt, const char *arg)
2853 {
2854     duration = parse_time_or_die(opt, arg, 1);
2855     return 0;
2856 }
2857
2858 static int opt_debug(void *optctx, const char *opt, const char *arg)
2859 {
2860     av_log_set_level(99);
2861     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2862     return 0;
2863 }
2864
2865 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2866 {
2867     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2868     return 0;
2869 }
2870
2871 static const OptionDef options[] = {
2872 #include "cmdutils_common_opts.h"
2873     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2874     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2875     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2876     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2877     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2878     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2879     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2880     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2881     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2882     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2883     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2884     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2885     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2886     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2887     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2888     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2889     { "debug", HAS_ARG | OPT_EXPERT, { .func_arg = opt_debug }, "print specific debug info", "" },
2890     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2891     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2892     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2893     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2894     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2895     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2896     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2897     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2898     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2899     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2900     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2901     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2902     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2903     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2904     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2905     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2906     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2907     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2908 #if CONFIG_AVFILTER
2909     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2910 #endif
2911     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2912     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2913     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2914     { NULL, },
2915 };
2916
2917 static void show_usage(void)
2918 {
2919     printf("Simple media player\n");
2920     printf("usage: %s [options] input_file\n", program_name);
2921     printf("\n");
2922 }
2923
2924 void show_help_default(const char *opt, const char *arg)
2925 {
2926     av_log_set_callback(log_callback_help);
2927     show_usage();
2928     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2929     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2930     printf("\n");
2931     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2932     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2933 #if !CONFIG_AVFILTER
2934     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2935 #endif
2936     printf("\nWhile playing:\n"
2937            "q, ESC              quit\n"
2938            "f                   toggle full screen\n"
2939            "p, SPC              pause\n"
2940            "a                   cycle audio channel\n"
2941            "v                   cycle video channel\n"
2942            "t                   cycle subtitle channel\n"
2943            "w                   show audio waves\n"
2944            "s                   activate frame-step mode\n"
2945            "left/right          seek backward/forward 10 seconds\n"
2946            "down/up             seek backward/forward 1 minute\n"
2947            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2948            );
2949 }
2950
2951 static void opt_input_file(void *optctx, const char *filename)
2952 {
2953     if (input_filename) {
2954         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2955                 filename, input_filename);
2956         exit(1);
2957     }
2958     if (!strcmp(filename, "-"))
2959         filename = "pipe:";
2960     input_filename = filename;
2961 }
2962
2963 /* Called from the main */
2964 int main(int argc, char **argv)
2965 {
2966     int flags;
2967
2968     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2969     parse_loglevel(argc, argv, options);
2970
2971     /* register all codecs, demux and protocols */
2972     avcodec_register_all();
2973 #if CONFIG_AVDEVICE
2974     avdevice_register_all();
2975 #endif
2976 #if CONFIG_AVFILTER
2977     avfilter_register_all();
2978 #endif
2979     av_register_all();
2980     avformat_network_init();
2981
2982     init_opts();
2983
2984     show_banner();
2985
2986     parse_options(NULL, argc, argv, options, opt_input_file);
2987
2988     if (!input_filename) {
2989         show_usage();
2990         fprintf(stderr, "An input file must be specified\n");
2991         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2992         exit(1);
2993     }
2994
2995     if (display_disable) {
2996         video_disable = 1;
2997     }
2998     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2999 #if !defined(__MINGW32__) && !defined(__APPLE__)
3000     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3001 #endif
3002     if (SDL_Init (flags)) {
3003         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3004         exit(1);
3005     }
3006
3007     if (!display_disable) {
3008 #if HAVE_SDL_VIDEO_SIZE
3009         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3010         fs_screen_width = vi->current_w;
3011         fs_screen_height = vi->current_h;
3012 #endif
3013     }
3014
3015     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3016     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3017     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3018
3019     av_init_packet(&flush_pkt);
3020     flush_pkt.data = "FLUSH";
3021
3022     cur_stream = stream_open(input_filename, file_iformat);
3023
3024     event_loop();
3025
3026     /* never returns */
3027
3028     return 0;
3029 }