]> git.sesse.net Git - ffmpeg/blob - avplay.c
cllc: Implement ARGB support
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///< byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///< current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
217     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
218     int use_dr1;
219     FrameBuffer *buffer_pool;
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 static void show_help(void);
228
229 /* options specified by the user */
230 static AVInputFormat *file_iformat;
231 static const char *input_filename;
232 static const char *window_title;
233 static int fs_screen_width;
234 static int fs_screen_height;
235 static int screen_width  = 0;
236 static int screen_height = 0;
237 static int audio_disable;
238 static int video_disable;
239 static int wanted_stream[AVMEDIA_TYPE_NB] = {
240     [AVMEDIA_TYPE_AUDIO]    = -1,
241     [AVMEDIA_TYPE_VIDEO]    = -1,
242     [AVMEDIA_TYPE_SUBTITLE] = -1,
243 };
244 static int seek_by_bytes = -1;
245 static int display_disable;
246 static int show_status = 1;
247 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
248 static int64_t start_time = AV_NOPTS_VALUE;
249 static int64_t duration = AV_NOPTS_VALUE;
250 static int debug = 0;
251 static int debug_mv = 0;
252 static int step = 0;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts = -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop = 1;
266 static int framedrop = 1;
267 static int infinite_buffer = 0;
268
269 static int rdftspeed = 20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 void exit_program(int ret)
288 {
289     exit(ret);
290 }
291
292 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
293
294 /* packet queue handling */
295 static void packet_queue_init(PacketQueue *q)
296 {
297     memset(q, 0, sizeof(PacketQueue));
298     q->mutex = SDL_CreateMutex();
299     q->cond = SDL_CreateCond();
300     packet_queue_put(q, &flush_pkt);
301 }
302
303 static void packet_queue_flush(PacketQueue *q)
304 {
305     AVPacketList *pkt, *pkt1;
306
307     SDL_LockMutex(q->mutex);
308     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
309         pkt1 = pkt->next;
310         av_free_packet(&pkt->pkt);
311         av_freep(&pkt);
312     }
313     q->last_pkt = NULL;
314     q->first_pkt = NULL;
315     q->nb_packets = 0;
316     q->size = 0;
317     SDL_UnlockMutex(q->mutex);
318 }
319
320 static void packet_queue_end(PacketQueue *q)
321 {
322     packet_queue_flush(q);
323     SDL_DestroyMutex(q->mutex);
324     SDL_DestroyCond(q->cond);
325 }
326
327 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
328 {
329     AVPacketList *pkt1;
330
331     /* duplicate the packet */
332     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
333         return -1;
334
335     pkt1 = av_malloc(sizeof(AVPacketList));
336     if (!pkt1)
337         return -1;
338     pkt1->pkt = *pkt;
339     pkt1->next = NULL;
340
341
342     SDL_LockMutex(q->mutex);
343
344     if (!q->last_pkt)
345
346         q->first_pkt = pkt1;
347     else
348         q->last_pkt->next = pkt1;
349     q->last_pkt = pkt1;
350     q->nb_packets++;
351     q->size += pkt1->pkt.size + sizeof(*pkt1);
352     /* XXX: should duplicate packet data in DV case */
353     SDL_CondSignal(q->cond);
354
355     SDL_UnlockMutex(q->mutex);
356     return 0;
357 }
358
359 static void packet_queue_abort(PacketQueue *q)
360 {
361     SDL_LockMutex(q->mutex);
362
363     q->abort_request = 1;
364
365     SDL_CondSignal(q->cond);
366
367     SDL_UnlockMutex(q->mutex);
368 }
369
370 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
371 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
372 {
373     AVPacketList *pkt1;
374     int ret;
375
376     SDL_LockMutex(q->mutex);
377
378     for (;;) {
379         if (q->abort_request) {
380             ret = -1;
381             break;
382         }
383
384         pkt1 = q->first_pkt;
385         if (pkt1) {
386             q->first_pkt = pkt1->next;
387             if (!q->first_pkt)
388                 q->last_pkt = NULL;
389             q->nb_packets--;
390             q->size -= pkt1->pkt.size + sizeof(*pkt1);
391             *pkt = pkt1->pkt;
392             av_free(pkt1);
393             ret = 1;
394             break;
395         } else if (!block) {
396             ret = 0;
397             break;
398         } else {
399             SDL_CondWait(q->cond, q->mutex);
400         }
401     }
402     SDL_UnlockMutex(q->mutex);
403     return ret;
404 }
405
406 static inline void fill_rectangle(SDL_Surface *screen,
407                                   int x, int y, int w, int h, int color)
408 {
409     SDL_Rect rect;
410     rect.x = x;
411     rect.y = y;
412     rect.w = w;
413     rect.h = h;
414     SDL_FillRect(screen, &rect, color);
415 }
416
417 #define ALPHA_BLEND(a, oldp, newp, s)\
418 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
419
420 #define RGBA_IN(r, g, b, a, s)\
421 {\
422     unsigned int v = ((const uint32_t *)(s))[0];\
423     a = (v >> 24) & 0xff;\
424     r = (v >> 16) & 0xff;\
425     g = (v >> 8) & 0xff;\
426     b = v & 0xff;\
427 }
428
429 #define YUVA_IN(y, u, v, a, s, pal)\
430 {\
431     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
432     a = (val >> 24) & 0xff;\
433     y = (val >> 16) & 0xff;\
434     u = (val >> 8) & 0xff;\
435     v = val & 0xff;\
436 }
437
438 #define YUVA_OUT(d, y, u, v, a)\
439 {\
440     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
441 }
442
443
444 #define BPP 1
445
446 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
447 {
448     int wrap, wrap3, width2, skip2;
449     int y, u, v, a, u1, v1, a1, w, h;
450     uint8_t *lum, *cb, *cr;
451     const uint8_t *p;
452     const uint32_t *pal;
453     int dstx, dsty, dstw, dsth;
454
455     dstw = av_clip(rect->w, 0, imgw);
456     dsth = av_clip(rect->h, 0, imgh);
457     dstx = av_clip(rect->x, 0, imgw - dstw);
458     dsty = av_clip(rect->y, 0, imgh - dsth);
459     lum = dst->data[0] + dsty * dst->linesize[0];
460     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
461     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
462
463     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
464     skip2 = dstx >> 1;
465     wrap = dst->linesize[0];
466     wrap3 = rect->pict.linesize[0];
467     p = rect->pict.data[0];
468     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
469
470     if (dsty & 1) {
471         lum += dstx;
472         cb += skip2;
473         cr += skip2;
474
475         if (dstx & 1) {
476             YUVA_IN(y, u, v, a, p, pal);
477             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
479             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
480             cb++;
481             cr++;
482             lum++;
483             p += BPP;
484         }
485         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
486             YUVA_IN(y, u, v, a, p, pal);
487             u1 = u;
488             v1 = v;
489             a1 = a;
490             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491
492             YUVA_IN(y, u, v, a, p + BPP, pal);
493             u1 += u;
494             v1 += v;
495             a1 += a;
496             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
497             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
498             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
499             cb++;
500             cr++;
501             p += 2 * BPP;
502             lum += 2;
503         }
504         if (w) {
505             YUVA_IN(y, u, v, a, p, pal);
506             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509             p++;
510             lum++;
511         }
512         p += wrap3 - dstw * BPP;
513         lum += wrap - dstw - dstx;
514         cb += dst->linesize[1] - width2 - skip2;
515         cr += dst->linesize[2] - width2 - skip2;
516     }
517     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
518         lum += dstx;
519         cb += skip2;
520         cr += skip2;
521
522         if (dstx & 1) {
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 = u;
525             v1 = v;
526             a1 = a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             p += wrap3;
529             lum += wrap;
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 += u;
532             v1 += v;
533             a1 += a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
536             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
537             cb++;
538             cr++;
539             p += -wrap3 + BPP;
540             lum += -wrap + 1;
541         }
542         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 = u;
545             v1 = v;
546             a1 = a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554             p += wrap3;
555             lum += wrap;
556
557             YUVA_IN(y, u, v, a, p, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562
563             YUVA_IN(y, u, v, a, p + BPP, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
568
569             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
570             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
571
572             cb++;
573             cr++;
574             p += -wrap3 + 2 * BPP;
575             lum += -wrap + 2;
576         }
577         if (w) {
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 = u;
580             v1 = v;
581             a1 = a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             p += wrap3;
584             lum += wrap;
585             YUVA_IN(y, u, v, a, p, pal);
586             u1 += u;
587             v1 += v;
588             a1 += a;
589             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
590             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
591             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
592             cb++;
593             cr++;
594             p += -wrap3 + BPP;
595             lum += -wrap + 1;
596         }
597         p += wrap3 + (wrap3 - dstw * BPP);
598         lum += wrap + (wrap - dstw - dstx);
599         cb += dst->linesize[1] - width2 - skip2;
600         cr += dst->linesize[2] - width2 - skip2;
601     }
602     /* handle odd height */
603     if (h) {
604         lum += dstx;
605         cb += skip2;
606         cr += skip2;
607
608         if (dstx & 1) {
609             YUVA_IN(y, u, v, a, p, pal);
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
612             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
613             cb++;
614             cr++;
615             lum++;
616             p += BPP;
617         }
618         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
619             YUVA_IN(y, u, v, a, p, pal);
620             u1 = u;
621             v1 = v;
622             a1 = a;
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624
625             YUVA_IN(y, u, v, a, p + BPP, pal);
626             u1 += u;
627             v1 += v;
628             a1 += a;
629             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
630             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
631             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
632             cb++;
633             cr++;
634             p += 2 * BPP;
635             lum += 2;
636         }
637         if (w) {
638             YUVA_IN(y, u, v, a, p, pal);
639             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642         }
643     }
644 }
645
646 static void free_subpicture(SubPicture *sp)
647 {
648     avsubtitle_free(&sp->sub);
649 }
650
651 static void video_image_display(VideoState *is)
652 {
653     VideoPicture *vp;
654     SubPicture *sp;
655     AVPicture pict;
656     float aspect_ratio;
657     int width, height, x, y;
658     SDL_Rect rect;
659     int i;
660
661     vp = &is->pictq[is->pictq_rindex];
662     if (vp->bmp) {
663 #if CONFIG_AVFILTER
664          if (vp->picref->video->pixel_aspect.num == 0)
665              aspect_ratio = 0;
666          else
667              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
668 #else
669
670         /* XXX: use variable in the frame */
671         if (is->video_st->sample_aspect_ratio.num)
672             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
673         else if (is->video_st->codec->sample_aspect_ratio.num)
674             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
675         else
676             aspect_ratio = 0;
677 #endif
678         if (aspect_ratio <= 0.0)
679             aspect_ratio = 1.0;
680         aspect_ratio *= (float)vp->width / (float)vp->height;
681
682         if (is->subtitle_st)
683         {
684             if (is->subpq_size > 0)
685             {
686                 sp = &is->subpq[is->subpq_rindex];
687
688                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
689                 {
690                     SDL_LockYUVOverlay (vp->bmp);
691
692                     pict.data[0] = vp->bmp->pixels[0];
693                     pict.data[1] = vp->bmp->pixels[2];
694                     pict.data[2] = vp->bmp->pixels[1];
695
696                     pict.linesize[0] = vp->bmp->pitches[0];
697                     pict.linesize[1] = vp->bmp->pitches[2];
698                     pict.linesize[2] = vp->bmp->pitches[1];
699
700                     for (i = 0; i < sp->sub.num_rects; i++)
701                         blend_subrect(&pict, sp->sub.rects[i],
702                                       vp->bmp->w, vp->bmp->h);
703
704                     SDL_UnlockYUVOverlay (vp->bmp);
705                 }
706             }
707         }
708
709
710         /* XXX: we suppose the screen has a 1.0 pixel ratio */
711         height = is->height;
712         width = ((int)rint(height * aspect_ratio)) & ~1;
713         if (width > is->width) {
714             width = is->width;
715             height = ((int)rint(width / aspect_ratio)) & ~1;
716         }
717         x = (is->width - width) / 2;
718         y = (is->height - height) / 2;
719         is->no_background = 0;
720         rect.x = is->xleft + x;
721         rect.y = is->ytop  + y;
722         rect.w = width;
723         rect.h = height;
724         SDL_DisplayYUVOverlay(vp->bmp, &rect);
725     }
726 }
727
728 /* get the current audio output buffer size, in samples. With SDL, we
729    cannot have a precise information */
730 static int audio_write_get_buf_size(VideoState *is)
731 {
732     return is->audio_buf_size - is->audio_buf_index;
733 }
734
735 static inline int compute_mod(int a, int b)
736 {
737     a = a % b;
738     if (a >= 0)
739         return a;
740     else
741         return a + b;
742 }
743
744 static void video_audio_display(VideoState *s)
745 {
746     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
747     int ch, channels, h, h2, bgcolor, fgcolor;
748     int16_t time_diff;
749     int rdft_bits, nb_freq;
750
751     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
752         ;
753     nb_freq = 1 << (rdft_bits - 1);
754
755     /* compute display index : center on currently output samples */
756     channels = s->sdl_channels;
757     nb_display_channels = channels;
758     if (!s->paused) {
759         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
760         n = 2 * channels;
761         delay = audio_write_get_buf_size(s);
762         delay /= n;
763
764         /* to be more precise, we take into account the time spent since
765            the last buffer computation */
766         if (audio_callback_time) {
767             time_diff = av_gettime() - audio_callback_time;
768             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
769         }
770
771         delay += 2 * data_used;
772         if (delay < data_used)
773             delay = data_used;
774
775         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
776         if (s->show_audio == 1) {
777             h = INT_MIN;
778             for (i = 0; i < 1000; i += channels) {
779                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
780                 int a = s->sample_array[idx];
781                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
782                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
783                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
784                 int score = a - d;
785                 if (h < score && (b ^ c) < 0) {
786                     h = score;
787                     i_start = idx;
788                 }
789             }
790         }
791
792         s->last_i_start = i_start;
793     } else {
794         i_start = s->last_i_start;
795     }
796
797     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
798     if (s->show_audio == 1) {
799         fill_rectangle(screen,
800                        s->xleft, s->ytop, s->width, s->height,
801                        bgcolor);
802
803         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
804
805         /* total height for one channel */
806         h = s->height / nb_display_channels;
807         /* graph height / 2 */
808         h2 = (h * 9) / 20;
809         for (ch = 0; ch < nb_display_channels; ch++) {
810             i = i_start + ch;
811             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
812             for (x = 0; x < s->width; x++) {
813                 y = (s->sample_array[i] * h2) >> 15;
814                 if (y < 0) {
815                     y = -y;
816                     ys = y1 - y;
817                 } else {
818                     ys = y1;
819                 }
820                 fill_rectangle(screen,
821                                s->xleft + x, ys, 1, y,
822                                fgcolor);
823                 i += channels;
824                 if (i >= SAMPLE_ARRAY_SIZE)
825                     i -= SAMPLE_ARRAY_SIZE;
826             }
827         }
828
829         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
830
831         for (ch = 1; ch < nb_display_channels; ch++) {
832             y = s->ytop + ch * h;
833             fill_rectangle(screen,
834                            s->xleft, y, s->width, 1,
835                            fgcolor);
836         }
837         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
838     } else {
839         nb_display_channels= FFMIN(nb_display_channels, 2);
840         if (rdft_bits != s->rdft_bits) {
841             av_rdft_end(s->rdft);
842             av_free(s->rdft_data);
843             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
844             s->rdft_bits = rdft_bits;
845             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
846         }
847         {
848             FFTSample *data[2];
849             for (ch = 0; ch < nb_display_channels; ch++) {
850                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
851                 i = i_start + ch;
852                 for (x = 0; x < 2 * nb_freq; x++) {
853                     double w = (x-nb_freq) * (1.0 / nb_freq);
854                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
855                     i += channels;
856                     if (i >= SAMPLE_ARRAY_SIZE)
857                         i -= SAMPLE_ARRAY_SIZE;
858                 }
859                 av_rdft_calc(s->rdft, data[ch]);
860             }
861             // least efficient way to do this, we should of course directly access it but its more than fast enough
862             for (y = 0; y < s->height; y++) {
863                 double w = 1 / sqrt(nb_freq);
864                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
865                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
866                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
867                 a = FFMIN(a, 255);
868                 b = FFMIN(b, 255);
869                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
870
871                 fill_rectangle(screen,
872                             s->xpos, s->height-y, 1, 1,
873                             fgcolor);
874             }
875         }
876         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
877         s->xpos++;
878         if (s->xpos >= s->width)
879             s->xpos= s->xleft;
880     }
881 }
882
883 static int video_open(VideoState *is)
884 {
885     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
886     int w,h;
887
888     if (is_full_screen) flags |= SDL_FULLSCREEN;
889     else                flags |= SDL_RESIZABLE;
890
891     if (is_full_screen && fs_screen_width) {
892         w = fs_screen_width;
893         h = fs_screen_height;
894     } else if (!is_full_screen && screen_width) {
895         w = screen_width;
896         h = screen_height;
897 #if CONFIG_AVFILTER
898     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
899         w = is->out_video_filter->inputs[0]->w;
900         h = is->out_video_filter->inputs[0]->h;
901 #else
902     } else if (is->video_st && is->video_st->codec->width) {
903         w = is->video_st->codec->width;
904         h = is->video_st->codec->height;
905 #endif
906     } else {
907         w = 640;
908         h = 480;
909     }
910     if (screen && is->width == screen->w && screen->w == w
911        && is->height== screen->h && screen->h == h)
912         return 0;
913
914 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
915     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
916     screen = SDL_SetVideoMode(w, h, 24, flags);
917 #else
918     screen = SDL_SetVideoMode(w, h, 0, flags);
919 #endif
920     if (!screen) {
921         fprintf(stderr, "SDL: could not set video mode - exiting\n");
922         return -1;
923     }
924     if (!window_title)
925         window_title = input_filename;
926     SDL_WM_SetCaption(window_title, window_title);
927
928     is->width  = screen->w;
929     is->height = screen->h;
930
931     return 0;
932 }
933
934 /* display the current picture, if any */
935 static void video_display(VideoState *is)
936 {
937     if (!screen)
938         video_open(cur_stream);
939     if (is->audio_st && is->show_audio)
940         video_audio_display(is);
941     else if (is->video_st)
942         video_image_display(is);
943 }
944
945 static int refresh_thread(void *opaque)
946 {
947     VideoState *is= opaque;
948     while (!is->abort_request) {
949         SDL_Event event;
950         event.type = FF_REFRESH_EVENT;
951         event.user.data1 = opaque;
952         if (!is->refresh) {
953             is->refresh = 1;
954             SDL_PushEvent(&event);
955         }
956         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
957     }
958     return 0;
959 }
960
961 /* get the current audio clock value */
962 static double get_audio_clock(VideoState *is)
963 {
964     double pts;
965     int hw_buf_size, bytes_per_sec;
966     pts = is->audio_clock;
967     hw_buf_size = audio_write_get_buf_size(is);
968     bytes_per_sec = 0;
969     if (is->audio_st) {
970         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
971                         av_get_bytes_per_sample(is->sdl_sample_fmt);
972     }
973     if (bytes_per_sec)
974         pts -= (double)hw_buf_size / bytes_per_sec;
975     return pts;
976 }
977
978 /* get the current video clock value */
979 static double get_video_clock(VideoState *is)
980 {
981     if (is->paused) {
982         return is->video_current_pts;
983     } else {
984         return is->video_current_pts_drift + av_gettime() / 1000000.0;
985     }
986 }
987
988 /* get the current external clock value */
989 static double get_external_clock(VideoState *is)
990 {
991     int64_t ti;
992     ti = av_gettime();
993     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
994 }
995
996 /* get the current master clock value */
997 static double get_master_clock(VideoState *is)
998 {
999     double val;
1000
1001     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1002         if (is->video_st)
1003             val = get_video_clock(is);
1004         else
1005             val = get_audio_clock(is);
1006     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1007         if (is->audio_st)
1008             val = get_audio_clock(is);
1009         else
1010             val = get_video_clock(is);
1011     } else {
1012         val = get_external_clock(is);
1013     }
1014     return val;
1015 }
1016
1017 /* seek in the stream */
1018 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1019 {
1020     if (!is->seek_req) {
1021         is->seek_pos = pos;
1022         is->seek_rel = rel;
1023         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1024         if (seek_by_bytes)
1025             is->seek_flags |= AVSEEK_FLAG_BYTE;
1026         is->seek_req = 1;
1027     }
1028 }
1029
1030 /* pause or resume the video */
1031 static void stream_pause(VideoState *is)
1032 {
1033     if (is->paused) {
1034         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1035         if (is->read_pause_return != AVERROR(ENOSYS)) {
1036             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1037         }
1038         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1039     }
1040     is->paused = !is->paused;
1041 }
1042
1043 static double compute_target_time(double frame_current_pts, VideoState *is)
1044 {
1045     double delay, sync_threshold, diff;
1046
1047     /* compute nominal delay */
1048     delay = frame_current_pts - is->frame_last_pts;
1049     if (delay <= 0 || delay >= 10.0) {
1050         /* if incorrect delay, use previous one */
1051         delay = is->frame_last_delay;
1052     } else {
1053         is->frame_last_delay = delay;
1054     }
1055     is->frame_last_pts = frame_current_pts;
1056
1057     /* update delay to follow master synchronisation source */
1058     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1059          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1060         /* if video is slave, we try to correct big delays by
1061            duplicating or deleting a frame */
1062         diff = get_video_clock(is) - get_master_clock(is);
1063
1064         /* skip or repeat frame. We take into account the
1065            delay to compute the threshold. I still don't know
1066            if it is the best guess */
1067         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1068         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1069             if (diff <= -sync_threshold)
1070                 delay = 0;
1071             else if (diff >= sync_threshold)
1072                 delay = 2 * delay;
1073         }
1074     }
1075     is->frame_timer += delay;
1076
1077     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1078             delay, frame_current_pts, -diff);
1079
1080     return is->frame_timer;
1081 }
1082
1083 /* called to display each frame */
1084 static void video_refresh_timer(void *opaque)
1085 {
1086     VideoState *is = opaque;
1087     VideoPicture *vp;
1088
1089     SubPicture *sp, *sp2;
1090
1091     if (is->video_st) {
1092 retry:
1093         if (is->pictq_size == 0) {
1094             // nothing to do, no picture to display in the que
1095         } else {
1096             double time = av_gettime() / 1000000.0;
1097             double next_target;
1098             /* dequeue the picture */
1099             vp = &is->pictq[is->pictq_rindex];
1100
1101             if (time < vp->target_clock)
1102                 return;
1103             /* update current video pts */
1104             is->video_current_pts = vp->pts;
1105             is->video_current_pts_drift = is->video_current_pts - time;
1106             is->video_current_pos = vp->pos;
1107             if (is->pictq_size > 1) {
1108                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1109                 assert(nextvp->target_clock >= vp->target_clock);
1110                 next_target= nextvp->target_clock;
1111             } else {
1112                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1113             }
1114             if (framedrop && time > next_target) {
1115                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1116                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1117                     /* update queue size and signal for next picture */
1118                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1119                         is->pictq_rindex = 0;
1120
1121                     SDL_LockMutex(is->pictq_mutex);
1122                     is->pictq_size--;
1123                     SDL_CondSignal(is->pictq_cond);
1124                     SDL_UnlockMutex(is->pictq_mutex);
1125                     goto retry;
1126                 }
1127             }
1128
1129             if (is->subtitle_st) {
1130                 if (is->subtitle_stream_changed) {
1131                     SDL_LockMutex(is->subpq_mutex);
1132
1133                     while (is->subpq_size) {
1134                         free_subpicture(&is->subpq[is->subpq_rindex]);
1135
1136                         /* update queue size and signal for next picture */
1137                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1138                             is->subpq_rindex = 0;
1139
1140                         is->subpq_size--;
1141                     }
1142                     is->subtitle_stream_changed = 0;
1143
1144                     SDL_CondSignal(is->subpq_cond);
1145                     SDL_UnlockMutex(is->subpq_mutex);
1146                 } else {
1147                     if (is->subpq_size > 0) {
1148                         sp = &is->subpq[is->subpq_rindex];
1149
1150                         if (is->subpq_size > 1)
1151                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1152                         else
1153                             sp2 = NULL;
1154
1155                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1156                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1157                         {
1158                             free_subpicture(sp);
1159
1160                             /* update queue size and signal for next picture */
1161                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1162                                 is->subpq_rindex = 0;
1163
1164                             SDL_LockMutex(is->subpq_mutex);
1165                             is->subpq_size--;
1166                             SDL_CondSignal(is->subpq_cond);
1167                             SDL_UnlockMutex(is->subpq_mutex);
1168                         }
1169                     }
1170                 }
1171             }
1172
1173             /* display picture */
1174             if (!display_disable)
1175                 video_display(is);
1176
1177             /* update queue size and signal for next picture */
1178             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1179                 is->pictq_rindex = 0;
1180
1181             SDL_LockMutex(is->pictq_mutex);
1182             is->pictq_size--;
1183             SDL_CondSignal(is->pictq_cond);
1184             SDL_UnlockMutex(is->pictq_mutex);
1185         }
1186     } else if (is->audio_st) {
1187         /* draw the next audio frame */
1188
1189         /* if only audio stream, then display the audio bars (better
1190            than nothing, just to test the implementation */
1191
1192         /* display picture */
1193         if (!display_disable)
1194             video_display(is);
1195     }
1196     if (show_status) {
1197         static int64_t last_time;
1198         int64_t cur_time;
1199         int aqsize, vqsize, sqsize;
1200         double av_diff;
1201
1202         cur_time = av_gettime();
1203         if (!last_time || (cur_time - last_time) >= 30000) {
1204             aqsize = 0;
1205             vqsize = 0;
1206             sqsize = 0;
1207             if (is->audio_st)
1208                 aqsize = is->audioq.size;
1209             if (is->video_st)
1210                 vqsize = is->videoq.size;
1211             if (is->subtitle_st)
1212                 sqsize = is->subtitleq.size;
1213             av_diff = 0;
1214             if (is->audio_st && is->video_st)
1215                 av_diff = get_audio_clock(is) - get_video_clock(is);
1216             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1217                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1218                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1219             fflush(stdout);
1220             last_time = cur_time;
1221         }
1222     }
1223 }
1224
1225 static void stream_close(VideoState *is)
1226 {
1227     VideoPicture *vp;
1228     int i;
1229     /* XXX: use a special url_shutdown call to abort parse cleanly */
1230     is->abort_request = 1;
1231     SDL_WaitThread(is->parse_tid, NULL);
1232     SDL_WaitThread(is->refresh_tid, NULL);
1233
1234     /* free all pictures */
1235     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1236         vp = &is->pictq[i];
1237 #if CONFIG_AVFILTER
1238         avfilter_unref_bufferp(&vp->picref);
1239 #endif
1240         if (vp->bmp) {
1241             SDL_FreeYUVOverlay(vp->bmp);
1242             vp->bmp = NULL;
1243         }
1244     }
1245     SDL_DestroyMutex(is->pictq_mutex);
1246     SDL_DestroyCond(is->pictq_cond);
1247     SDL_DestroyMutex(is->subpq_mutex);
1248     SDL_DestroyCond(is->subpq_cond);
1249 #if !CONFIG_AVFILTER
1250     if (is->img_convert_ctx)
1251         sws_freeContext(is->img_convert_ctx);
1252 #endif
1253     av_free(is);
1254 }
1255
1256 static void do_exit(void)
1257 {
1258     if (cur_stream) {
1259         stream_close(cur_stream);
1260         cur_stream = NULL;
1261     }
1262     uninit_opts();
1263 #if CONFIG_AVFILTER
1264     avfilter_uninit();
1265 #endif
1266     avformat_network_deinit();
1267     if (show_status)
1268         printf("\n");
1269     SDL_Quit();
1270     av_log(NULL, AV_LOG_QUIET, "");
1271     exit(0);
1272 }
1273
1274 /* allocate a picture (needs to do that in main thread to avoid
1275    potential locking problems */
1276 static void alloc_picture(void *opaque)
1277 {
1278     VideoState *is = opaque;
1279     VideoPicture *vp;
1280
1281     vp = &is->pictq[is->pictq_windex];
1282
1283     if (vp->bmp)
1284         SDL_FreeYUVOverlay(vp->bmp);
1285
1286 #if CONFIG_AVFILTER
1287     avfilter_unref_bufferp(&vp->picref);
1288
1289     vp->width   = is->out_video_filter->inputs[0]->w;
1290     vp->height  = is->out_video_filter->inputs[0]->h;
1291     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1292 #else
1293     vp->width   = is->video_st->codec->width;
1294     vp->height  = is->video_st->codec->height;
1295     vp->pix_fmt = is->video_st->codec->pix_fmt;
1296 #endif
1297
1298     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1299                                    SDL_YV12_OVERLAY,
1300                                    screen);
1301     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1302         /* SDL allocates a buffer smaller than requested if the video
1303          * overlay hardware is unable to support the requested size. */
1304         fprintf(stderr, "Error: the video system does not support an image\n"
1305                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1306                         "to reduce the image size.\n", vp->width, vp->height );
1307         do_exit();
1308     }
1309
1310     SDL_LockMutex(is->pictq_mutex);
1311     vp->allocated = 1;
1312     SDL_CondSignal(is->pictq_cond);
1313     SDL_UnlockMutex(is->pictq_mutex);
1314 }
1315
1316 /**
1317  *
1318  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1319  */
1320 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1321 {
1322     VideoPicture *vp;
1323 #if CONFIG_AVFILTER
1324     AVPicture pict_src;
1325 #else
1326     int dst_pix_fmt = PIX_FMT_YUV420P;
1327 #endif
1328     /* wait until we have space to put a new picture */
1329     SDL_LockMutex(is->pictq_mutex);
1330
1331     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1332         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1333
1334     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1335            !is->videoq.abort_request) {
1336         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1337     }
1338     SDL_UnlockMutex(is->pictq_mutex);
1339
1340     if (is->videoq.abort_request)
1341         return -1;
1342
1343     vp = &is->pictq[is->pictq_windex];
1344
1345     /* alloc or resize hardware picture buffer */
1346     if (!vp->bmp || vp->reallocate ||
1347 #if CONFIG_AVFILTER
1348         vp->width  != is->out_video_filter->inputs[0]->w ||
1349         vp->height != is->out_video_filter->inputs[0]->h) {
1350 #else
1351         vp->width != is->video_st->codec->width ||
1352         vp->height != is->video_st->codec->height) {
1353 #endif
1354         SDL_Event event;
1355
1356         vp->allocated  = 0;
1357         vp->reallocate = 0;
1358
1359         /* the allocation must be done in the main thread to avoid
1360            locking problems */
1361         event.type = FF_ALLOC_EVENT;
1362         event.user.data1 = is;
1363         SDL_PushEvent(&event);
1364
1365         /* wait until the picture is allocated */
1366         SDL_LockMutex(is->pictq_mutex);
1367         while (!vp->allocated && !is->videoq.abort_request) {
1368             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict = { { 0 } };
1379 #if CONFIG_AVFILTER
1380         avfilter_unref_bufferp(&vp->picref);
1381         vp->picref = src_frame->opaque;
1382 #endif
1383
1384         /* get a pointer on the bitmap */
1385         SDL_LockYUVOverlay (vp->bmp);
1386
1387         pict.data[0] = vp->bmp->pixels[0];
1388         pict.data[1] = vp->bmp->pixels[2];
1389         pict.data[2] = vp->bmp->pixels[1];
1390
1391         pict.linesize[0] = vp->bmp->pitches[0];
1392         pict.linesize[1] = vp->bmp->pitches[2];
1393         pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396         pict_src.data[0] = src_frame->data[0];
1397         pict_src.data[1] = src_frame->data[1];
1398         pict_src.data[2] = src_frame->data[2];
1399
1400         pict_src.linesize[0] = src_frame->linesize[0];
1401         pict_src.linesize[1] = src_frame->linesize[1];
1402         pict_src.linesize[2] = src_frame->linesize[2];
1403
1404         // FIXME use direct rendering
1405         av_picture_copy(&pict, &pict_src,
1406                         vp->pix_fmt, vp->width, vp->height);
1407 #else
1408         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1409         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1410             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1411             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1412         if (is->img_convert_ctx == NULL) {
1413             fprintf(stderr, "Cannot initialize the conversion context\n");
1414             exit(1);
1415         }
1416         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1417                   0, vp->height, pict.data, pict.linesize);
1418 #endif
1419         /* update the bitmap content */
1420         SDL_UnlockYUVOverlay(vp->bmp);
1421
1422         vp->pts = pts;
1423         vp->pos = pos;
1424
1425         /* now we can update the picture count */
1426         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1427             is->pictq_windex = 0;
1428         SDL_LockMutex(is->pictq_mutex);
1429         vp->target_clock = compute_target_time(vp->pts, is);
1430
1431         is->pictq_size++;
1432         SDL_UnlockMutex(is->pictq_mutex);
1433     }
1434     return 0;
1435 }
1436
1437 /**
1438  * compute the exact PTS for the picture if it is omitted in the stream
1439  * @param pts1 the dts of the pkt / pts of the frame
1440  */
1441 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1442 {
1443     double frame_delay, pts;
1444
1445     pts = pts1;
1446
1447     if (pts != 0) {
1448         /* update video clock with pts, if present */
1449         is->video_clock = pts;
1450     } else {
1451         pts = is->video_clock;
1452     }
1453     /* update video clock for next frame */
1454     frame_delay = av_q2d(is->video_st->codec->time_base);
1455     /* for MPEG2, the frame can be repeated, so we update the
1456        clock accordingly */
1457     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1458     is->video_clock += frame_delay;
1459
1460     return queue_picture(is, src_frame, pts, pos);
1461 }
1462
1463 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1464 {
1465     int got_picture, i;
1466
1467     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1468         return -1;
1469
1470     if (pkt->data == flush_pkt.data) {
1471         avcodec_flush_buffers(is->video_st->codec);
1472
1473         SDL_LockMutex(is->pictq_mutex);
1474         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1475         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1476             is->pictq[i].target_clock= 0;
1477         }
1478         while (is->pictq_size && !is->videoq.abort_request) {
1479             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1480         }
1481         is->video_current_pos = -1;
1482         SDL_UnlockMutex(is->pictq_mutex);
1483
1484         init_pts_correction(&is->pts_ctx);
1485         is->frame_last_pts = AV_NOPTS_VALUE;
1486         is->frame_last_delay = 0;
1487         is->frame_timer = (double)av_gettime() / 1000000.0;
1488         is->skip_frames = 1;
1489         is->skip_frames_index = 0;
1490         return 0;
1491     }
1492
1493     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1494
1495     if (got_picture) {
1496         if (decoder_reorder_pts == -1) {
1497             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1498         } else if (decoder_reorder_pts) {
1499             *pts = frame->pkt_pts;
1500         } else {
1501             *pts = frame->pkt_dts;
1502         }
1503
1504         if (*pts == AV_NOPTS_VALUE) {
1505             *pts = 0;
1506         }
1507
1508         is->skip_frames_index += 1;
1509         if (is->skip_frames_index >= is->skip_frames) {
1510             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1511             return 1;
1512         }
1513
1514     }
1515     return 0;
1516 }
1517
1518 #if CONFIG_AVFILTER
1519 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1520 {
1521     char sws_flags_str[128];
1522     char buffersrc_args[256];
1523     int ret;
1524     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1525     AVCodecContext *codec = is->video_st->codec;
1526
1527     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1528     graph->scale_sws_opts = av_strdup(sws_flags_str);
1529
1530     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1531              codec->width, codec->height, codec->pix_fmt,
1532              is->video_st->time_base.num, is->video_st->time_base.den,
1533              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1534
1535
1536     if ((ret = avfilter_graph_create_filter(&filt_src,
1537                                             avfilter_get_by_name("buffer"),
1538                                             "src", buffersrc_args, NULL,
1539                                             graph)) < 0)
1540         return ret;
1541     if ((ret = avfilter_graph_create_filter(&filt_out,
1542                                             avfilter_get_by_name("buffersink"),
1543                                             "out", NULL, NULL, graph)) < 0)
1544         return ret;
1545
1546     if ((ret = avfilter_graph_create_filter(&filt_format,
1547                                             avfilter_get_by_name("format"),
1548                                             "format", "yuv420p", NULL, graph)) < 0)
1549         return ret;
1550     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1551         return ret;
1552
1553
1554     if (vfilters) {
1555         AVFilterInOut *outputs = avfilter_inout_alloc();
1556         AVFilterInOut *inputs  = avfilter_inout_alloc();
1557
1558         outputs->name    = av_strdup("in");
1559         outputs->filter_ctx = filt_src;
1560         outputs->pad_idx = 0;
1561         outputs->next    = NULL;
1562
1563         inputs->name    = av_strdup("out");
1564         inputs->filter_ctx = filt_format;
1565         inputs->pad_idx = 0;
1566         inputs->next    = NULL;
1567
1568         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1569             return ret;
1570     } else {
1571         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1572             return ret;
1573     }
1574
1575     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1576         return ret;
1577
1578     is->in_video_filter  = filt_src;
1579     is->out_video_filter = filt_out;
1580
1581     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1582         is->use_dr1 = 1;
1583         codec->get_buffer     = codec_get_buffer;
1584         codec->release_buffer = codec_release_buffer;
1585         codec->opaque         = &is->buffer_pool;
1586     }
1587
1588     return ret;
1589 }
1590
1591 #endif  /* CONFIG_AVFILTER */
1592
1593 static int video_thread(void *arg)
1594 {
1595     AVPacket pkt = { 0 };
1596     VideoState *is = arg;
1597     AVFrame *frame = avcodec_alloc_frame();
1598     int64_t pts_int;
1599     double pts;
1600     int ret;
1601
1602 #if CONFIG_AVFILTER
1603     AVFilterGraph *graph = avfilter_graph_alloc();
1604     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1605     int64_t pos;
1606     int last_w = is->video_st->codec->width;
1607     int last_h = is->video_st->codec->height;
1608
1609     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1610         goto the_end;
1611     filt_in  = is->in_video_filter;
1612     filt_out = is->out_video_filter;
1613 #endif
1614
1615     for (;;) {
1616 #if CONFIG_AVFILTER
1617         AVFilterBufferRef *picref;
1618         AVRational tb;
1619 #endif
1620         while (is->paused && !is->videoq.abort_request)
1621             SDL_Delay(10);
1622
1623         av_free_packet(&pkt);
1624
1625         ret = get_video_frame(is, frame, &pts_int, &pkt);
1626         if (ret < 0)
1627             goto the_end;
1628
1629         if (!ret)
1630             continue;
1631
1632 #if CONFIG_AVFILTER
1633         if (   last_w != is->video_st->codec->width
1634             || last_h != is->video_st->codec->height) {
1635             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1636                     is->video_st->codec->width, is->video_st->codec->height);
1637             avfilter_graph_free(&graph);
1638             graph = avfilter_graph_alloc();
1639             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1640                 goto the_end;
1641             filt_in  = is->in_video_filter;
1642             filt_out = is->out_video_filter;
1643             last_w = is->video_st->codec->width;
1644             last_h = is->video_st->codec->height;
1645         }
1646
1647         frame->pts = pts_int;
1648         if (is->use_dr1) {
1649             FrameBuffer      *buf = frame->opaque;
1650             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1651                                         frame->data, frame->linesize,
1652                                         AV_PERM_READ | AV_PERM_PRESERVE,
1653                                         frame->width, frame->height,
1654                                         frame->format);
1655
1656             avfilter_copy_frame_props(fb, frame);
1657             fb->buf->priv           = buf;
1658             fb->buf->free           = filter_release_buffer;
1659
1660             buf->refcount++;
1661             av_buffersrc_buffer(filt_in, fb);
1662
1663         } else
1664             av_buffersrc_write_frame(filt_in, frame);
1665
1666         while (ret >= 0) {
1667             ret = av_buffersink_read(filt_out, &picref);
1668             if (ret < 0) {
1669                 ret = 0;
1670                 break;
1671             }
1672
1673             avfilter_copy_buf_props(frame, picref);
1674
1675             pts_int = picref->pts;
1676             tb      = filt_out->inputs[0]->time_base;
1677             pos     = picref->pos;
1678             frame->opaque = picref;
1679
1680             if (av_cmp_q(tb, is->video_st->time_base)) {
1681                 av_unused int64_t pts1 = pts_int;
1682                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1683                 av_dlog(NULL, "video_thread(): "
1684                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1685                         tb.num, tb.den, pts1,
1686                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1687             }
1688             pts = pts_int * av_q2d(is->video_st->time_base);
1689             ret = output_picture2(is, frame, pts, pos);
1690         }
1691 #else
1692         pts = pts_int * av_q2d(is->video_st->time_base);
1693         ret = output_picture2(is, frame, pts,  pkt.pos);
1694 #endif
1695
1696         if (ret < 0)
1697             goto the_end;
1698
1699         if (step)
1700             if (cur_stream)
1701                 stream_pause(cur_stream);
1702     }
1703  the_end:
1704 #if CONFIG_AVFILTER
1705     av_freep(&vfilters);
1706     avfilter_graph_free(&graph);
1707 #endif
1708     av_free_packet(&pkt);
1709     av_free(frame);
1710     return 0;
1711 }
1712
1713 static int subtitle_thread(void *arg)
1714 {
1715     VideoState *is = arg;
1716     SubPicture *sp;
1717     AVPacket pkt1, *pkt = &pkt1;
1718     int got_subtitle;
1719     double pts;
1720     int i, j;
1721     int r, g, b, y, u, v, a;
1722
1723     for (;;) {
1724         while (is->paused && !is->subtitleq.abort_request) {
1725             SDL_Delay(10);
1726         }
1727         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1728             break;
1729
1730         if (pkt->data == flush_pkt.data) {
1731             avcodec_flush_buffers(is->subtitle_st->codec);
1732             continue;
1733         }
1734         SDL_LockMutex(is->subpq_mutex);
1735         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1736                !is->subtitleq.abort_request) {
1737             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1738         }
1739         SDL_UnlockMutex(is->subpq_mutex);
1740
1741         if (is->subtitleq.abort_request)
1742             return 0;
1743
1744         sp = &is->subpq[is->subpq_windex];
1745
1746        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1747            this packet, if any */
1748         pts = 0;
1749         if (pkt->pts != AV_NOPTS_VALUE)
1750             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1751
1752         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1753                                  &got_subtitle, pkt);
1754
1755         if (got_subtitle && sp->sub.format == 0) {
1756             sp->pts = pts;
1757
1758             for (i = 0; i < sp->sub.num_rects; i++)
1759             {
1760                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1761                 {
1762                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1763                     y = RGB_TO_Y_CCIR(r, g, b);
1764                     u = RGB_TO_U_CCIR(r, g, b, 0);
1765                     v = RGB_TO_V_CCIR(r, g, b, 0);
1766                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1767                 }
1768             }
1769
1770             /* now we can update the picture count */
1771             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1772                 is->subpq_windex = 0;
1773             SDL_LockMutex(is->subpq_mutex);
1774             is->subpq_size++;
1775             SDL_UnlockMutex(is->subpq_mutex);
1776         }
1777         av_free_packet(pkt);
1778     }
1779     return 0;
1780 }
1781
1782 /* copy samples for viewing in editor window */
1783 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1784 {
1785     int size, len;
1786
1787     size = samples_size / sizeof(short);
1788     while (size > 0) {
1789         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1790         if (len > size)
1791             len = size;
1792         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1793         samples += len;
1794         is->sample_array_index += len;
1795         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1796             is->sample_array_index = 0;
1797         size -= len;
1798     }
1799 }
1800
1801 /* return the new audio buffer size (samples can be added or deleted
1802    to get better sync if video or external master clock) */
1803 static int synchronize_audio(VideoState *is, short *samples,
1804                              int samples_size1, double pts)
1805 {
1806     int n, samples_size;
1807     double ref_clock;
1808
1809     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1810     samples_size = samples_size1;
1811
1812     /* if not master, then we try to remove or add samples to correct the clock */
1813     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1814          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1815         double diff, avg_diff;
1816         int wanted_size, min_size, max_size, nb_samples;
1817
1818         ref_clock = get_master_clock(is);
1819         diff = get_audio_clock(is) - ref_clock;
1820
1821         if (diff < AV_NOSYNC_THRESHOLD) {
1822             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1823             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1824                 /* not enough measures to have a correct estimate */
1825                 is->audio_diff_avg_count++;
1826             } else {
1827                 /* estimate the A-V difference */
1828                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1829
1830                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1831                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1832                     nb_samples = samples_size / n;
1833
1834                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1835                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1836                     if (wanted_size < min_size)
1837                         wanted_size = min_size;
1838                     else if (wanted_size > max_size)
1839                         wanted_size = max_size;
1840
1841                     /* add or remove samples to correction the synchro */
1842                     if (wanted_size < samples_size) {
1843                         /* remove samples */
1844                         samples_size = wanted_size;
1845                     } else if (wanted_size > samples_size) {
1846                         uint8_t *samples_end, *q;
1847                         int nb;
1848
1849                         /* add samples */
1850                         nb = (samples_size - wanted_size);
1851                         samples_end = (uint8_t *)samples + samples_size - n;
1852                         q = samples_end + n;
1853                         while (nb > 0) {
1854                             memcpy(q, samples_end, n);
1855                             q += n;
1856                             nb -= n;
1857                         }
1858                         samples_size = wanted_size;
1859                     }
1860                 }
1861                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1862                         diff, avg_diff, samples_size - samples_size1,
1863                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1864             }
1865         } else {
1866             /* too big difference : may be initial PTS errors, so
1867                reset A-V filter */
1868             is->audio_diff_avg_count = 0;
1869             is->audio_diff_cum       = 0;
1870         }
1871     }
1872
1873     return samples_size;
1874 }
1875
1876 /* decode one audio frame and returns its uncompressed size */
1877 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1878 {
1879     AVPacket *pkt_temp = &is->audio_pkt_temp;
1880     AVPacket *pkt = &is->audio_pkt;
1881     AVCodecContext *dec = is->audio_st->codec;
1882     int n, len1, data_size, got_frame;
1883     double pts;
1884     int new_packet = 0;
1885     int flush_complete = 0;
1886
1887     for (;;) {
1888         /* NOTE: the audio packet can contain several frames */
1889         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1890             int resample_changed, audio_resample;
1891
1892             if (!is->frame) {
1893                 if (!(is->frame = avcodec_alloc_frame()))
1894                     return AVERROR(ENOMEM);
1895             } else
1896                 avcodec_get_frame_defaults(is->frame);
1897
1898             if (flush_complete)
1899                 break;
1900             new_packet = 0;
1901             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1902             if (len1 < 0) {
1903                 /* if error, we skip the frame */
1904                 pkt_temp->size = 0;
1905                 break;
1906             }
1907
1908             pkt_temp->data += len1;
1909             pkt_temp->size -= len1;
1910
1911             if (!got_frame) {
1912                 /* stop sending empty packets if the decoder is finished */
1913                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1914                     flush_complete = 1;
1915                 continue;
1916             }
1917             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1918                                                    is->frame->nb_samples,
1919                                                    dec->sample_fmt, 1);
1920
1921             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
1922                              dec->channel_layout != is->sdl_channel_layout;
1923
1924             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
1925                                dec->channel_layout != is->resample_channel_layout;
1926
1927             if ((!is->avr && audio_resample) || resample_changed) {
1928                 int ret;
1929                 if (is->avr)
1930                     avresample_close(is->avr);
1931                 else if (audio_resample) {
1932                     is->avr = avresample_alloc_context();
1933                     if (!is->avr) {
1934                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1935                         break;
1936                     }
1937                 }
1938                 if (audio_resample) {
1939                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
1940                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
1941                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
1942                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1943                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
1944                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
1945
1946                     if ((ret = avresample_open(is->avr)) < 0) {
1947                         fprintf(stderr, "error initializing libavresample\n");
1948                         break;
1949                     }
1950                 }
1951                 is->resample_sample_fmt     = dec->sample_fmt;
1952                 is->resample_channel_layout = dec->channel_layout;
1953             }
1954
1955             if (audio_resample) {
1956                 void *tmp_out;
1957                 int out_samples, out_size, out_linesize;
1958                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1959                 int nb_samples = is->frame->nb_samples;
1960
1961                 out_size = av_samples_get_buffer_size(&out_linesize,
1962                                                       is->sdl_channels,
1963                                                       nb_samples,
1964                                                       is->sdl_sample_fmt, 0);
1965                 tmp_out = av_realloc(is->audio_buf1, out_size);
1966                 if (!tmp_out)
1967                     return AVERROR(ENOMEM);
1968                 is->audio_buf1 = tmp_out;
1969
1970                 out_samples = avresample_convert(is->avr,
1971                                                  (void **)&is->audio_buf1,
1972                                                  out_linesize, nb_samples,
1973                                                  (void **)is->frame->data,
1974                                                  is->frame->linesize[0],
1975                                                  is->frame->nb_samples);
1976                 if (out_samples < 0) {
1977                     fprintf(stderr, "avresample_convert() failed\n");
1978                     break;
1979                 }
1980                 is->audio_buf = is->audio_buf1;
1981                 data_size = out_samples * osize * is->sdl_channels;
1982             } else {
1983                 is->audio_buf = is->frame->data[0];
1984             }
1985
1986             /* if no pts, then compute it */
1987             pts = is->audio_clock;
1988             *pts_ptr = pts;
1989             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1990             is->audio_clock += (double)data_size /
1991                 (double)(n * dec->sample_rate);
1992 #ifdef DEBUG
1993             {
1994                 static double last_clock;
1995                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1996                        is->audio_clock - last_clock,
1997                        is->audio_clock, pts);
1998                 last_clock = is->audio_clock;
1999             }
2000 #endif
2001             return data_size;
2002         }
2003
2004         /* free the current packet */
2005         if (pkt->data)
2006             av_free_packet(pkt);
2007         memset(pkt_temp, 0, sizeof(*pkt_temp));
2008
2009         if (is->paused || is->audioq.abort_request) {
2010             return -1;
2011         }
2012
2013         /* read next packet */
2014         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2015             return -1;
2016
2017         if (pkt->data == flush_pkt.data) {
2018             avcodec_flush_buffers(dec);
2019             flush_complete = 0;
2020         }
2021
2022         *pkt_temp = *pkt;
2023
2024         /* if update the audio clock with the pts */
2025         if (pkt->pts != AV_NOPTS_VALUE) {
2026             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2027         }
2028     }
2029 }
2030
2031 /* prepare a new audio buffer */
2032 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2033 {
2034     VideoState *is = opaque;
2035     int audio_size, len1;
2036     double pts;
2037
2038     audio_callback_time = av_gettime();
2039
2040     while (len > 0) {
2041         if (is->audio_buf_index >= is->audio_buf_size) {
2042            audio_size = audio_decode_frame(is, &pts);
2043            if (audio_size < 0) {
2044                 /* if error, just output silence */
2045                is->audio_buf      = is->silence_buf;
2046                is->audio_buf_size = sizeof(is->silence_buf);
2047            } else {
2048                if (is->show_audio)
2049                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2050                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2051                                               pts);
2052                is->audio_buf_size = audio_size;
2053            }
2054            is->audio_buf_index = 0;
2055         }
2056         len1 = is->audio_buf_size - is->audio_buf_index;
2057         if (len1 > len)
2058             len1 = len;
2059         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2060         len -= len1;
2061         stream += len1;
2062         is->audio_buf_index += len1;
2063     }
2064 }
2065
2066 /* open a given stream. Return 0 if OK */
2067 static int stream_component_open(VideoState *is, int stream_index)
2068 {
2069     AVFormatContext *ic = is->ic;
2070     AVCodecContext *avctx;
2071     AVCodec *codec;
2072     SDL_AudioSpec wanted_spec, spec;
2073     AVDictionary *opts;
2074     AVDictionaryEntry *t = NULL;
2075
2076     if (stream_index < 0 || stream_index >= ic->nb_streams)
2077         return -1;
2078     avctx = ic->streams[stream_index]->codec;
2079
2080     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2081
2082     codec = avcodec_find_decoder(avctx->codec_id);
2083     avctx->debug_mv          = debug_mv;
2084     avctx->debug             = debug;
2085     avctx->workaround_bugs   = workaround_bugs;
2086     avctx->idct_algo         = idct;
2087     avctx->skip_frame        = skip_frame;
2088     avctx->skip_idct         = skip_idct;
2089     avctx->skip_loop_filter  = skip_loop_filter;
2090     avctx->error_concealment = error_concealment;
2091
2092     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2093
2094     if (!av_dict_get(opts, "threads", NULL, 0))
2095         av_dict_set(&opts, "threads", "auto", 0);
2096     if (!codec ||
2097         avcodec_open2(avctx, codec, &opts) < 0)
2098         return -1;
2099     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2100         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2101         return AVERROR_OPTION_NOT_FOUND;
2102     }
2103
2104     /* prepare audio output */
2105     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2106         wanted_spec.freq = avctx->sample_rate;
2107         wanted_spec.format = AUDIO_S16SYS;
2108
2109         if (!avctx->channel_layout)
2110             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2111         if (!avctx->channel_layout) {
2112             fprintf(stderr, "unable to guess channel layout\n");
2113             return -1;
2114         }
2115         if (avctx->channels == 1)
2116             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2117         else
2118             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2119         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2120
2121         wanted_spec.channels = is->sdl_channels;
2122         wanted_spec.silence = 0;
2123         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2124         wanted_spec.callback = sdl_audio_callback;
2125         wanted_spec.userdata = is;
2126         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2127             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2128             return -1;
2129         }
2130         is->audio_hw_buf_size = spec.size;
2131         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2132         is->resample_sample_fmt     = is->sdl_sample_fmt;
2133         is->resample_channel_layout = is->sdl_channel_layout;
2134     }
2135
2136     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2137     switch (avctx->codec_type) {
2138     case AVMEDIA_TYPE_AUDIO:
2139         is->audio_stream = stream_index;
2140         is->audio_st = ic->streams[stream_index];
2141         is->audio_buf_size  = 0;
2142         is->audio_buf_index = 0;
2143
2144         /* init averaging filter */
2145         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2146         is->audio_diff_avg_count = 0;
2147         /* since we do not have a precise anough audio fifo fullness,
2148            we correct audio sync only if larger than this threshold */
2149         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2150
2151         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2152         packet_queue_init(&is->audioq);
2153         SDL_PauseAudio(0);
2154         break;
2155     case AVMEDIA_TYPE_VIDEO:
2156         is->video_stream = stream_index;
2157         is->video_st = ic->streams[stream_index];
2158
2159         packet_queue_init(&is->videoq);
2160         is->video_tid = SDL_CreateThread(video_thread, is);
2161         break;
2162     case AVMEDIA_TYPE_SUBTITLE:
2163         is->subtitle_stream = stream_index;
2164         is->subtitle_st = ic->streams[stream_index];
2165         packet_queue_init(&is->subtitleq);
2166
2167         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2168         break;
2169     default:
2170         break;
2171     }
2172     return 0;
2173 }
2174
2175 static void stream_component_close(VideoState *is, int stream_index)
2176 {
2177     AVFormatContext *ic = is->ic;
2178     AVCodecContext *avctx;
2179
2180     if (stream_index < 0 || stream_index >= ic->nb_streams)
2181         return;
2182     avctx = ic->streams[stream_index]->codec;
2183
2184     switch (avctx->codec_type) {
2185     case AVMEDIA_TYPE_AUDIO:
2186         packet_queue_abort(&is->audioq);
2187
2188         SDL_CloseAudio();
2189
2190         packet_queue_end(&is->audioq);
2191         av_free_packet(&is->audio_pkt);
2192         if (is->avr)
2193             avresample_free(&is->avr);
2194         av_freep(&is->audio_buf1);
2195         is->audio_buf = NULL;
2196         av_freep(&is->frame);
2197
2198         if (is->rdft) {
2199             av_rdft_end(is->rdft);
2200             av_freep(&is->rdft_data);
2201             is->rdft = NULL;
2202             is->rdft_bits = 0;
2203         }
2204         break;
2205     case AVMEDIA_TYPE_VIDEO:
2206         packet_queue_abort(&is->videoq);
2207
2208         /* note: we also signal this mutex to make sure we deblock the
2209            video thread in all cases */
2210         SDL_LockMutex(is->pictq_mutex);
2211         SDL_CondSignal(is->pictq_cond);
2212         SDL_UnlockMutex(is->pictq_mutex);
2213
2214         SDL_WaitThread(is->video_tid, NULL);
2215
2216         packet_queue_end(&is->videoq);
2217         break;
2218     case AVMEDIA_TYPE_SUBTITLE:
2219         packet_queue_abort(&is->subtitleq);
2220
2221         /* note: we also signal this mutex to make sure we deblock the
2222            video thread in all cases */
2223         SDL_LockMutex(is->subpq_mutex);
2224         is->subtitle_stream_changed = 1;
2225
2226         SDL_CondSignal(is->subpq_cond);
2227         SDL_UnlockMutex(is->subpq_mutex);
2228
2229         SDL_WaitThread(is->subtitle_tid, NULL);
2230
2231         packet_queue_end(&is->subtitleq);
2232         break;
2233     default:
2234         break;
2235     }
2236
2237     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2238     avcodec_close(avctx);
2239 #if CONFIG_AVFILTER
2240     free_buffer_pool(&is->buffer_pool);
2241 #endif
2242     switch (avctx->codec_type) {
2243     case AVMEDIA_TYPE_AUDIO:
2244         is->audio_st = NULL;
2245         is->audio_stream = -1;
2246         break;
2247     case AVMEDIA_TYPE_VIDEO:
2248         is->video_st = NULL;
2249         is->video_stream = -1;
2250         break;
2251     case AVMEDIA_TYPE_SUBTITLE:
2252         is->subtitle_st = NULL;
2253         is->subtitle_stream = -1;
2254         break;
2255     default:
2256         break;
2257     }
2258 }
2259
2260 /* since we have only one decoding thread, we can use a global
2261    variable instead of a thread local variable */
2262 static VideoState *global_video_state;
2263
2264 static int decode_interrupt_cb(void *ctx)
2265 {
2266     return global_video_state && global_video_state->abort_request;
2267 }
2268
2269 /* this thread gets the stream from the disk or the network */
2270 static int decode_thread(void *arg)
2271 {
2272     VideoState *is = arg;
2273     AVFormatContext *ic = NULL;
2274     int err, i, ret;
2275     int st_index[AVMEDIA_TYPE_NB];
2276     AVPacket pkt1, *pkt = &pkt1;
2277     int eof = 0;
2278     int pkt_in_play_range = 0;
2279     AVDictionaryEntry *t;
2280     AVDictionary **opts;
2281     int orig_nb_streams;
2282
2283     memset(st_index, -1, sizeof(st_index));
2284     is->video_stream = -1;
2285     is->audio_stream = -1;
2286     is->subtitle_stream = -1;
2287
2288     global_video_state = is;
2289
2290     ic = avformat_alloc_context();
2291     ic->interrupt_callback.callback = decode_interrupt_cb;
2292     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2293     if (err < 0) {
2294         print_error(is->filename, err);
2295         ret = -1;
2296         goto fail;
2297     }
2298     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2299         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2300         ret = AVERROR_OPTION_NOT_FOUND;
2301         goto fail;
2302     }
2303     is->ic = ic;
2304
2305     if (genpts)
2306         ic->flags |= AVFMT_FLAG_GENPTS;
2307
2308     opts = setup_find_stream_info_opts(ic, codec_opts);
2309     orig_nb_streams = ic->nb_streams;
2310
2311     err = avformat_find_stream_info(ic, opts);
2312     if (err < 0) {
2313         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2314         ret = -1;
2315         goto fail;
2316     }
2317     for (i = 0; i < orig_nb_streams; i++)
2318         av_dict_free(&opts[i]);
2319     av_freep(&opts);
2320
2321     if (ic->pb)
2322         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2323
2324     if (seek_by_bytes < 0)
2325         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2326
2327     /* if seeking requested, we execute it */
2328     if (start_time != AV_NOPTS_VALUE) {
2329         int64_t timestamp;
2330
2331         timestamp = start_time;
2332         /* add the stream start time */
2333         if (ic->start_time != AV_NOPTS_VALUE)
2334             timestamp += ic->start_time;
2335         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2336         if (ret < 0) {
2337             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2338                     is->filename, (double)timestamp / AV_TIME_BASE);
2339         }
2340     }
2341
2342     for (i = 0; i < ic->nb_streams; i++)
2343         ic->streams[i]->discard = AVDISCARD_ALL;
2344     if (!video_disable)
2345         st_index[AVMEDIA_TYPE_VIDEO] =
2346             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2347                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2348     if (!audio_disable)
2349         st_index[AVMEDIA_TYPE_AUDIO] =
2350             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2351                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2352                                 st_index[AVMEDIA_TYPE_VIDEO],
2353                                 NULL, 0);
2354     if (!video_disable)
2355         st_index[AVMEDIA_TYPE_SUBTITLE] =
2356             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2357                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2358                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2359                                  st_index[AVMEDIA_TYPE_AUDIO] :
2360                                  st_index[AVMEDIA_TYPE_VIDEO]),
2361                                 NULL, 0);
2362     if (show_status) {
2363         av_dump_format(ic, 0, is->filename, 0);
2364     }
2365
2366     /* open the streams */
2367     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2368         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2369     }
2370
2371     ret = -1;
2372     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2373         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2374     }
2375     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2376     if (ret < 0) {
2377         if (!display_disable)
2378             is->show_audio = 2;
2379     }
2380
2381     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2382         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2383     }
2384
2385     if (is->video_stream < 0 && is->audio_stream < 0) {
2386         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2387         ret = -1;
2388         goto fail;
2389     }
2390
2391     for (;;) {
2392         if (is->abort_request)
2393             break;
2394         if (is->paused != is->last_paused) {
2395             is->last_paused = is->paused;
2396             if (is->paused)
2397                 is->read_pause_return = av_read_pause(ic);
2398             else
2399                 av_read_play(ic);
2400         }
2401 #if CONFIG_RTSP_DEMUXER
2402         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2403             /* wait 10 ms to avoid trying to get another packet */
2404             /* XXX: horrible */
2405             SDL_Delay(10);
2406             continue;
2407         }
2408 #endif
2409         if (is->seek_req) {
2410             int64_t seek_target = is->seek_pos;
2411             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2412             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2413 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2414 //      of the seek_pos/seek_rel variables
2415
2416             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2417             if (ret < 0) {
2418                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2419             } else {
2420                 if (is->audio_stream >= 0) {
2421                     packet_queue_flush(&is->audioq);
2422                     packet_queue_put(&is->audioq, &flush_pkt);
2423                 }
2424                 if (is->subtitle_stream >= 0) {
2425                     packet_queue_flush(&is->subtitleq);
2426                     packet_queue_put(&is->subtitleq, &flush_pkt);
2427                 }
2428                 if (is->video_stream >= 0) {
2429                     packet_queue_flush(&is->videoq);
2430                     packet_queue_put(&is->videoq, &flush_pkt);
2431                 }
2432             }
2433             is->seek_req = 0;
2434             eof = 0;
2435         }
2436
2437         /* if the queue are full, no need to read more */
2438         if (!infinite_buffer &&
2439               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2440             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2441                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2442                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2443             /* wait 10 ms */
2444             SDL_Delay(10);
2445             continue;
2446         }
2447         if (eof) {
2448             if (is->video_stream >= 0) {
2449                 av_init_packet(pkt);
2450                 pkt->data = NULL;
2451                 pkt->size = 0;
2452                 pkt->stream_index = is->video_stream;
2453                 packet_queue_put(&is->videoq, pkt);
2454             }
2455             if (is->audio_stream >= 0 &&
2456                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2457                 av_init_packet(pkt);
2458                 pkt->data = NULL;
2459                 pkt->size = 0;
2460                 pkt->stream_index = is->audio_stream;
2461                 packet_queue_put(&is->audioq, pkt);
2462             }
2463             SDL_Delay(10);
2464             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2465                 if (loop != 1 && (!loop || --loop)) {
2466                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2467                 } else if (autoexit) {
2468                     ret = AVERROR_EOF;
2469                     goto fail;
2470                 }
2471             }
2472             continue;
2473         }
2474         ret = av_read_frame(ic, pkt);
2475         if (ret < 0) {
2476             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2477                 eof = 1;
2478             if (ic->pb && ic->pb->error)
2479                 break;
2480             SDL_Delay(100); /* wait for user event */
2481             continue;
2482         }
2483         /* check if packet is in play range specified by user, then queue, otherwise discard */
2484         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2485                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2486                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2487                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2488                 <= ((double)duration / 1000000);
2489         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2490             packet_queue_put(&is->audioq, pkt);
2491         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2492             packet_queue_put(&is->videoq, pkt);
2493         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2494             packet_queue_put(&is->subtitleq, pkt);
2495         } else {
2496             av_free_packet(pkt);
2497         }
2498     }
2499     /* wait until the end */
2500     while (!is->abort_request) {
2501         SDL_Delay(100);
2502     }
2503
2504     ret = 0;
2505  fail:
2506     /* disable interrupting */
2507     global_video_state = NULL;
2508
2509     /* close each stream */
2510     if (is->audio_stream >= 0)
2511         stream_component_close(is, is->audio_stream);
2512     if (is->video_stream >= 0)
2513         stream_component_close(is, is->video_stream);
2514     if (is->subtitle_stream >= 0)
2515         stream_component_close(is, is->subtitle_stream);
2516     if (is->ic) {
2517         avformat_close_input(&is->ic);
2518     }
2519
2520     if (ret != 0) {
2521         SDL_Event event;
2522
2523         event.type = FF_QUIT_EVENT;
2524         event.user.data1 = is;
2525         SDL_PushEvent(&event);
2526     }
2527     return 0;
2528 }
2529
2530 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2531 {
2532     VideoState *is;
2533
2534     is = av_mallocz(sizeof(VideoState));
2535     if (!is)
2536         return NULL;
2537     av_strlcpy(is->filename, filename, sizeof(is->filename));
2538     is->iformat = iformat;
2539     is->ytop    = 0;
2540     is->xleft   = 0;
2541
2542     /* start video display */
2543     is->pictq_mutex = SDL_CreateMutex();
2544     is->pictq_cond  = SDL_CreateCond();
2545
2546     is->subpq_mutex = SDL_CreateMutex();
2547     is->subpq_cond  = SDL_CreateCond();
2548
2549     is->av_sync_type = av_sync_type;
2550     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2551     if (!is->parse_tid) {
2552         av_free(is);
2553         return NULL;
2554     }
2555     return is;
2556 }
2557
2558 static void stream_cycle_channel(VideoState *is, int codec_type)
2559 {
2560     AVFormatContext *ic = is->ic;
2561     int start_index, stream_index;
2562     AVStream *st;
2563
2564     if (codec_type == AVMEDIA_TYPE_VIDEO)
2565         start_index = is->video_stream;
2566     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2567         start_index = is->audio_stream;
2568     else
2569         start_index = is->subtitle_stream;
2570     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2571         return;
2572     stream_index = start_index;
2573     for (;;) {
2574         if (++stream_index >= is->ic->nb_streams)
2575         {
2576             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2577             {
2578                 stream_index = -1;
2579                 goto the_end;
2580             } else
2581                 stream_index = 0;
2582         }
2583         if (stream_index == start_index)
2584             return;
2585         st = ic->streams[stream_index];
2586         if (st->codec->codec_type == codec_type) {
2587             /* check that parameters are OK */
2588             switch (codec_type) {
2589             case AVMEDIA_TYPE_AUDIO:
2590                 if (st->codec->sample_rate != 0 &&
2591                     st->codec->channels != 0)
2592                     goto the_end;
2593                 break;
2594             case AVMEDIA_TYPE_VIDEO:
2595             case AVMEDIA_TYPE_SUBTITLE:
2596                 goto the_end;
2597             default:
2598                 break;
2599             }
2600         }
2601     }
2602  the_end:
2603     stream_component_close(is, start_index);
2604     stream_component_open(is, stream_index);
2605 }
2606
2607
2608 static void toggle_full_screen(void)
2609 {
2610 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2611     /* OS X needs to empty the picture_queue */
2612     int i;
2613     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2614         cur_stream->pictq[i].reallocate = 1;
2615 #endif
2616     is_full_screen = !is_full_screen;
2617     video_open(cur_stream);
2618 }
2619
2620 static void toggle_pause(void)
2621 {
2622     if (cur_stream)
2623         stream_pause(cur_stream);
2624     step = 0;
2625 }
2626
2627 static void step_to_next_frame(void)
2628 {
2629     if (cur_stream) {
2630         /* if the stream is paused unpause it, then step */
2631         if (cur_stream->paused)
2632             stream_pause(cur_stream);
2633     }
2634     step = 1;
2635 }
2636
2637 static void toggle_audio_display(void)
2638 {
2639     if (cur_stream) {
2640         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2641         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2642         fill_rectangle(screen,
2643                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2644                        bgcolor);
2645         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2646     }
2647 }
2648
2649 /* handle an event sent by the GUI */
2650 static void event_loop(void)
2651 {
2652     SDL_Event event;
2653     double incr, pos, frac;
2654
2655     for (;;) {
2656         double x;
2657         SDL_WaitEvent(&event);
2658         switch (event.type) {
2659         case SDL_KEYDOWN:
2660             if (exit_on_keydown) {
2661                 do_exit();
2662                 break;
2663             }
2664             switch (event.key.keysym.sym) {
2665             case SDLK_ESCAPE:
2666             case SDLK_q:
2667                 do_exit();
2668                 break;
2669             case SDLK_f:
2670                 toggle_full_screen();
2671                 break;
2672             case SDLK_p:
2673             case SDLK_SPACE:
2674                 toggle_pause();
2675                 break;
2676             case SDLK_s: // S: Step to next frame
2677                 step_to_next_frame();
2678                 break;
2679             case SDLK_a:
2680                 if (cur_stream)
2681                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2682                 break;
2683             case SDLK_v:
2684                 if (cur_stream)
2685                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2686                 break;
2687             case SDLK_t:
2688                 if (cur_stream)
2689                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2690                 break;
2691             case SDLK_w:
2692                 toggle_audio_display();
2693                 break;
2694             case SDLK_LEFT:
2695                 incr = -10.0;
2696                 goto do_seek;
2697             case SDLK_RIGHT:
2698                 incr = 10.0;
2699                 goto do_seek;
2700             case SDLK_UP:
2701                 incr = 60.0;
2702                 goto do_seek;
2703             case SDLK_DOWN:
2704                 incr = -60.0;
2705             do_seek:
2706                 if (cur_stream) {
2707                     if (seek_by_bytes) {
2708                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2709                             pos = cur_stream->video_current_pos;
2710                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2711                             pos = cur_stream->audio_pkt.pos;
2712                         } else
2713                             pos = avio_tell(cur_stream->ic->pb);
2714                         if (cur_stream->ic->bit_rate)
2715                             incr *= cur_stream->ic->bit_rate / 8.0;
2716                         else
2717                             incr *= 180000.0;
2718                         pos += incr;
2719                         stream_seek(cur_stream, pos, incr, 1);
2720                     } else {
2721                         pos = get_master_clock(cur_stream);
2722                         pos += incr;
2723                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2724                     }
2725                 }
2726                 break;
2727             default:
2728                 break;
2729             }
2730             break;
2731         case SDL_MOUSEBUTTONDOWN:
2732             if (exit_on_mousedown) {
2733                 do_exit();
2734                 break;
2735             }
2736         case SDL_MOUSEMOTION:
2737             if (event.type == SDL_MOUSEBUTTONDOWN) {
2738                 x = event.button.x;
2739             } else {
2740                 if (event.motion.state != SDL_PRESSED)
2741                     break;
2742                 x = event.motion.x;
2743             }
2744             if (cur_stream) {
2745                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2746                     uint64_t size =  avio_size(cur_stream->ic->pb);
2747                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2748                 } else {
2749                     int64_t ts;
2750                     int ns, hh, mm, ss;
2751                     int tns, thh, tmm, tss;
2752                     tns  = cur_stream->ic->duration / 1000000LL;
2753                     thh  = tns / 3600;
2754                     tmm  = (tns % 3600) / 60;
2755                     tss  = (tns % 60);
2756                     frac = x / cur_stream->width;
2757                     ns   = frac * tns;
2758                     hh   = ns / 3600;
2759                     mm   = (ns % 3600) / 60;
2760                     ss   = (ns % 60);
2761                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2762                             hh, mm, ss, thh, tmm, tss);
2763                     ts = frac * cur_stream->ic->duration;
2764                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2765                         ts += cur_stream->ic->start_time;
2766                     stream_seek(cur_stream, ts, 0, 0);
2767                 }
2768             }
2769             break;
2770         case SDL_VIDEORESIZE:
2771             if (cur_stream) {
2772                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2773                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2774                 screen_width  = cur_stream->width  = event.resize.w;
2775                 screen_height = cur_stream->height = event.resize.h;
2776             }
2777             break;
2778         case SDL_QUIT:
2779         case FF_QUIT_EVENT:
2780             do_exit();
2781             break;
2782         case FF_ALLOC_EVENT:
2783             video_open(event.user.data1);
2784             alloc_picture(event.user.data1);
2785             break;
2786         case FF_REFRESH_EVENT:
2787             video_refresh_timer(event.user.data1);
2788             cur_stream->refresh = 0;
2789             break;
2790         default:
2791             break;
2792         }
2793     }
2794 }
2795
2796 static int opt_frame_size(const char *opt, const char *arg)
2797 {
2798     av_log(NULL, AV_LOG_ERROR,
2799            "Option '%s' has been removed, use private format options instead\n", opt);
2800     return AVERROR(EINVAL);
2801 }
2802
2803 static int opt_width(const char *opt, const char *arg)
2804 {
2805     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2806     return 0;
2807 }
2808
2809 static int opt_height(const char *opt, const char *arg)
2810 {
2811     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2812     return 0;
2813 }
2814
2815 static int opt_format(const char *opt, const char *arg)
2816 {
2817     file_iformat = av_find_input_format(arg);
2818     if (!file_iformat) {
2819         fprintf(stderr, "Unknown input format: %s\n", arg);
2820         return AVERROR(EINVAL);
2821     }
2822     return 0;
2823 }
2824
2825 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2826 {
2827     av_log(NULL, AV_LOG_ERROR,
2828            "Option '%s' has been removed, use private format options instead\n", opt);
2829     return AVERROR(EINVAL);
2830 }
2831
2832 static int opt_sync(const char *opt, const char *arg)
2833 {
2834     if (!strcmp(arg, "audio"))
2835         av_sync_type = AV_SYNC_AUDIO_MASTER;
2836     else if (!strcmp(arg, "video"))
2837         av_sync_type = AV_SYNC_VIDEO_MASTER;
2838     else if (!strcmp(arg, "ext"))
2839         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2840     else {
2841         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2842         exit(1);
2843     }
2844     return 0;
2845 }
2846
2847 static int opt_seek(const char *opt, const char *arg)
2848 {
2849     start_time = parse_time_or_die(opt, arg, 1);
2850     return 0;
2851 }
2852
2853 static int opt_duration(const char *opt, const char *arg)
2854 {
2855     duration = parse_time_or_die(opt, arg, 1);
2856     return 0;
2857 }
2858
2859 static int opt_debug(const char *opt, const char *arg)
2860 {
2861     av_log_set_level(99);
2862     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2863     return 0;
2864 }
2865
2866 static int opt_vismv(const char *opt, const char *arg)
2867 {
2868     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2869     return 0;
2870 }
2871
2872 static const OptionDef options[] = {
2873 #include "cmdutils_common_opts.h"
2874     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2875     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2876     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2877     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2878     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2879     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2880     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2881     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2882     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2883     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2884     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2885     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2886     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2887     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2888     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2889     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2890     { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
2891     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2892     { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
2893     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2894     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2895     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2896     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
2897     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
2898     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
2899     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
2900     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
2901     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2902     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
2903     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
2904     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
2905     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
2906     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
2907     { "infbuf", OPT_BOOL | OPT_EXPERT, { (void*)&infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2908     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
2909 #if CONFIG_AVFILTER
2910     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
2911 #endif
2912     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
2913     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
2914     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2915     { NULL, },
2916 };
2917
2918 static void show_usage(void)
2919 {
2920     printf("Simple media player\n");
2921     printf("usage: %s [options] input_file\n", program_name);
2922     printf("\n");
2923 }
2924
2925 static void show_help(void)
2926 {
2927     av_log_set_callback(log_callback_help);
2928     show_usage();
2929     show_help_options(options, "Main options:\n",
2930                       OPT_EXPERT, 0);
2931     show_help_options(options, "\nAdvanced options:\n",
2932                       OPT_EXPERT, OPT_EXPERT);
2933     printf("\n");
2934     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2935     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2936 #if !CONFIG_AVFILTER
2937     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2938 #endif
2939     printf("\nWhile playing:\n"
2940            "q, ESC              quit\n"
2941            "f                   toggle full screen\n"
2942            "p, SPC              pause\n"
2943            "a                   cycle audio channel\n"
2944            "v                   cycle video channel\n"
2945            "t                   cycle subtitle channel\n"
2946            "w                   show audio waves\n"
2947            "s                   activate frame-step mode\n"
2948            "left/right          seek backward/forward 10 seconds\n"
2949            "down/up             seek backward/forward 1 minute\n"
2950            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2951            );
2952 }
2953
2954 static void opt_input_file(void *optctx, const char *filename)
2955 {
2956     if (input_filename) {
2957         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2958                 filename, input_filename);
2959         exit(1);
2960     }
2961     if (!strcmp(filename, "-"))
2962         filename = "pipe:";
2963     input_filename = filename;
2964 }
2965
2966 /* Called from the main */
2967 int main(int argc, char **argv)
2968 {
2969     int flags;
2970
2971     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2972     parse_loglevel(argc, argv, options);
2973
2974     /* register all codecs, demux and protocols */
2975     avcodec_register_all();
2976 #if CONFIG_AVDEVICE
2977     avdevice_register_all();
2978 #endif
2979 #if CONFIG_AVFILTER
2980     avfilter_register_all();
2981 #endif
2982     av_register_all();
2983     avformat_network_init();
2984
2985     init_opts();
2986
2987     show_banner();
2988
2989     parse_options(NULL, argc, argv, options, opt_input_file);
2990
2991     if (!input_filename) {
2992         show_usage();
2993         fprintf(stderr, "An input file must be specified\n");
2994         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2995         exit(1);
2996     }
2997
2998     if (display_disable) {
2999         video_disable = 1;
3000     }
3001     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3002 #if !defined(__MINGW32__) && !defined(__APPLE__)
3003     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3004 #endif
3005     if (SDL_Init (flags)) {
3006         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3007         exit(1);
3008     }
3009
3010     if (!display_disable) {
3011 #if HAVE_SDL_VIDEO_SIZE
3012         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3013         fs_screen_width = vi->current_w;
3014         fs_screen_height = vi->current_h;
3015 #endif
3016     }
3017
3018     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3019     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3020     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3021
3022     av_init_packet(&flush_pkt);
3023     flush_pkt.data = "FLUSH";
3024
3025     cur_stream = stream_open(input_filename, file_iformat);
3026
3027     event_loop();
3028
3029     /* never returns */
3030
3031     return 0;
3032 }