]> git.sesse.net Git - ffmpeg/blob - avplay.c
x86: fft: replace call to memcpy by a loop
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///< byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///< current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
217     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
218     int use_dr1;
219     FrameBuffer *buffer_pool;
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 static void show_help(void);
228
229 /* options specified by the user */
230 static AVInputFormat *file_iformat;
231 static const char *input_filename;
232 static const char *window_title;
233 static int fs_screen_width;
234 static int fs_screen_height;
235 static int screen_width  = 0;
236 static int screen_height = 0;
237 static int audio_disable;
238 static int video_disable;
239 static int wanted_stream[AVMEDIA_TYPE_NB] = {
240     [AVMEDIA_TYPE_AUDIO]    = -1,
241     [AVMEDIA_TYPE_VIDEO]    = -1,
242     [AVMEDIA_TYPE_SUBTITLE] = -1,
243 };
244 static int seek_by_bytes = -1;
245 static int display_disable;
246 static int show_status = 1;
247 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
248 static int64_t start_time = AV_NOPTS_VALUE;
249 static int64_t duration = AV_NOPTS_VALUE;
250 static int debug = 0;
251 static int debug_mv = 0;
252 static int step = 0;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts = -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop = 1;
266 static int framedrop = 1;
267
268 static int rdftspeed = 20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 void exit_program(int ret)
287 {
288     exit(ret);
289 }
290
291 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
292
293 /* packet queue handling */
294 static void packet_queue_init(PacketQueue *q)
295 {
296     memset(q, 0, sizeof(PacketQueue));
297     q->mutex = SDL_CreateMutex();
298     q->cond = SDL_CreateCond();
299     packet_queue_put(q, &flush_pkt);
300 }
301
302 static void packet_queue_flush(PacketQueue *q)
303 {
304     AVPacketList *pkt, *pkt1;
305
306     SDL_LockMutex(q->mutex);
307     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
308         pkt1 = pkt->next;
309         av_free_packet(&pkt->pkt);
310         av_freep(&pkt);
311     }
312     q->last_pkt = NULL;
313     q->first_pkt = NULL;
314     q->nb_packets = 0;
315     q->size = 0;
316     SDL_UnlockMutex(q->mutex);
317 }
318
319 static void packet_queue_end(PacketQueue *q)
320 {
321     packet_queue_flush(q);
322     SDL_DestroyMutex(q->mutex);
323     SDL_DestroyCond(q->cond);
324 }
325
326 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
327 {
328     AVPacketList *pkt1;
329
330     /* duplicate the packet */
331     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
332         return -1;
333
334     pkt1 = av_malloc(sizeof(AVPacketList));
335     if (!pkt1)
336         return -1;
337     pkt1->pkt = *pkt;
338     pkt1->next = NULL;
339
340
341     SDL_LockMutex(q->mutex);
342
343     if (!q->last_pkt)
344
345         q->first_pkt = pkt1;
346     else
347         q->last_pkt->next = pkt1;
348     q->last_pkt = pkt1;
349     q->nb_packets++;
350     q->size += pkt1->pkt.size + sizeof(*pkt1);
351     /* XXX: should duplicate packet data in DV case */
352     SDL_CondSignal(q->cond);
353
354     SDL_UnlockMutex(q->mutex);
355     return 0;
356 }
357
358 static void packet_queue_abort(PacketQueue *q)
359 {
360     SDL_LockMutex(q->mutex);
361
362     q->abort_request = 1;
363
364     SDL_CondSignal(q->cond);
365
366     SDL_UnlockMutex(q->mutex);
367 }
368
369 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
370 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
371 {
372     AVPacketList *pkt1;
373     int ret;
374
375     SDL_LockMutex(q->mutex);
376
377     for (;;) {
378         if (q->abort_request) {
379             ret = -1;
380             break;
381         }
382
383         pkt1 = q->first_pkt;
384         if (pkt1) {
385             q->first_pkt = pkt1->next;
386             if (!q->first_pkt)
387                 q->last_pkt = NULL;
388             q->nb_packets--;
389             q->size -= pkt1->pkt.size + sizeof(*pkt1);
390             *pkt = pkt1->pkt;
391             av_free(pkt1);
392             ret = 1;
393             break;
394         } else if (!block) {
395             ret = 0;
396             break;
397         } else {
398             SDL_CondWait(q->cond, q->mutex);
399         }
400     }
401     SDL_UnlockMutex(q->mutex);
402     return ret;
403 }
404
405 static inline void fill_rectangle(SDL_Surface *screen,
406                                   int x, int y, int w, int h, int color)
407 {
408     SDL_Rect rect;
409     rect.x = x;
410     rect.y = y;
411     rect.w = w;
412     rect.h = h;
413     SDL_FillRect(screen, &rect, color);
414 }
415
416 #define ALPHA_BLEND(a, oldp, newp, s)\
417 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
418
419 #define RGBA_IN(r, g, b, a, s)\
420 {\
421     unsigned int v = ((const uint32_t *)(s))[0];\
422     a = (v >> 24) & 0xff;\
423     r = (v >> 16) & 0xff;\
424     g = (v >> 8) & 0xff;\
425     b = v & 0xff;\
426 }
427
428 #define YUVA_IN(y, u, v, a, s, pal)\
429 {\
430     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
431     a = (val >> 24) & 0xff;\
432     y = (val >> 16) & 0xff;\
433     u = (val >> 8) & 0xff;\
434     v = val & 0xff;\
435 }
436
437 #define YUVA_OUT(d, y, u, v, a)\
438 {\
439     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
440 }
441
442
443 #define BPP 1
444
445 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
446 {
447     int wrap, wrap3, width2, skip2;
448     int y, u, v, a, u1, v1, a1, w, h;
449     uint8_t *lum, *cb, *cr;
450     const uint8_t *p;
451     const uint32_t *pal;
452     int dstx, dsty, dstw, dsth;
453
454     dstw = av_clip(rect->w, 0, imgw);
455     dsth = av_clip(rect->h, 0, imgh);
456     dstx = av_clip(rect->x, 0, imgw - dstw);
457     dsty = av_clip(rect->y, 0, imgh - dsth);
458     lum = dst->data[0] + dsty * dst->linesize[0];
459     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
460     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
461
462     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
463     skip2 = dstx >> 1;
464     wrap = dst->linesize[0];
465     wrap3 = rect->pict.linesize[0];
466     p = rect->pict.data[0];
467     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
468
469     if (dsty & 1) {
470         lum += dstx;
471         cb += skip2;
472         cr += skip2;
473
474         if (dstx & 1) {
475             YUVA_IN(y, u, v, a, p, pal);
476             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
477             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
478             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
479             cb++;
480             cr++;
481             lum++;
482             p += BPP;
483         }
484         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
485             YUVA_IN(y, u, v, a, p, pal);
486             u1 = u;
487             v1 = v;
488             a1 = a;
489             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490
491             YUVA_IN(y, u, v, a, p + BPP, pal);
492             u1 += u;
493             v1 += v;
494             a1 += a;
495             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
496             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
497             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
498             cb++;
499             cr++;
500             p += 2 * BPP;
501             lum += 2;
502         }
503         if (w) {
504             YUVA_IN(y, u, v, a, p, pal);
505             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
506             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
507             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
508             p++;
509             lum++;
510         }
511         p += wrap3 - dstw * BPP;
512         lum += wrap - dstw - dstx;
513         cb += dst->linesize[1] - width2 - skip2;
514         cr += dst->linesize[2] - width2 - skip2;
515     }
516     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
517         lum += dstx;
518         cb += skip2;
519         cr += skip2;
520
521         if (dstx & 1) {
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 = u;
524             v1 = v;
525             a1 = a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527             p += wrap3;
528             lum += wrap;
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 += u;
531             v1 += v;
532             a1 += a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
535             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
536             cb++;
537             cr++;
538             p += -wrap3 + BPP;
539             lum += -wrap + 1;
540         }
541         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
542             YUVA_IN(y, u, v, a, p, pal);
543             u1 = u;
544             v1 = v;
545             a1 = a;
546             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
547
548             YUVA_IN(y, u, v, a, p + BPP, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
553             p += wrap3;
554             lum += wrap;
555
556             YUVA_IN(y, u, v, a, p, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
562             YUVA_IN(y, u, v, a, p + BPP, pal);
563             u1 += u;
564             v1 += v;
565             a1 += a;
566             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
567
568             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
569             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
570
571             cb++;
572             cr++;
573             p += -wrap3 + 2 * BPP;
574             lum += -wrap + 2;
575         }
576         if (w) {
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 = u;
579             v1 = v;
580             a1 = a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582             p += wrap3;
583             lum += wrap;
584             YUVA_IN(y, u, v, a, p, pal);
585             u1 += u;
586             v1 += v;
587             a1 += a;
588             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
589             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
590             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
591             cb++;
592             cr++;
593             p += -wrap3 + BPP;
594             lum += -wrap + 1;
595         }
596         p += wrap3 + (wrap3 - dstw * BPP);
597         lum += wrap + (wrap - dstw - dstx);
598         cb += dst->linesize[1] - width2 - skip2;
599         cr += dst->linesize[2] - width2 - skip2;
600     }
601     /* handle odd height */
602     if (h) {
603         lum += dstx;
604         cb += skip2;
605         cr += skip2;
606
607         if (dstx & 1) {
608             YUVA_IN(y, u, v, a, p, pal);
609             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
611             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
612             cb++;
613             cr++;
614             lum++;
615             p += BPP;
616         }
617         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
618             YUVA_IN(y, u, v, a, p, pal);
619             u1 = u;
620             v1 = v;
621             a1 = a;
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
624             YUVA_IN(y, u, v, a, p + BPP, pal);
625             u1 += u;
626             v1 += v;
627             a1 += a;
628             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
629             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
630             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
631             cb++;
632             cr++;
633             p += 2 * BPP;
634             lum += 2;
635         }
636         if (w) {
637             YUVA_IN(y, u, v, a, p, pal);
638             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
639             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
640             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
641         }
642     }
643 }
644
645 static void free_subpicture(SubPicture *sp)
646 {
647     avsubtitle_free(&sp->sub);
648 }
649
650 static void video_image_display(VideoState *is)
651 {
652     VideoPicture *vp;
653     SubPicture *sp;
654     AVPicture pict;
655     float aspect_ratio;
656     int width, height, x, y;
657     SDL_Rect rect;
658     int i;
659
660     vp = &is->pictq[is->pictq_rindex];
661     if (vp->bmp) {
662 #if CONFIG_AVFILTER
663          if (vp->picref->video->pixel_aspect.num == 0)
664              aspect_ratio = 0;
665          else
666              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
667 #else
668
669         /* XXX: use variable in the frame */
670         if (is->video_st->sample_aspect_ratio.num)
671             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
672         else if (is->video_st->codec->sample_aspect_ratio.num)
673             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
674         else
675             aspect_ratio = 0;
676 #endif
677         if (aspect_ratio <= 0.0)
678             aspect_ratio = 1.0;
679         aspect_ratio *= (float)vp->width / (float)vp->height;
680
681         if (is->subtitle_st)
682         {
683             if (is->subpq_size > 0)
684             {
685                 sp = &is->subpq[is->subpq_rindex];
686
687                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
688                 {
689                     SDL_LockYUVOverlay (vp->bmp);
690
691                     pict.data[0] = vp->bmp->pixels[0];
692                     pict.data[1] = vp->bmp->pixels[2];
693                     pict.data[2] = vp->bmp->pixels[1];
694
695                     pict.linesize[0] = vp->bmp->pitches[0];
696                     pict.linesize[1] = vp->bmp->pitches[2];
697                     pict.linesize[2] = vp->bmp->pitches[1];
698
699                     for (i = 0; i < sp->sub.num_rects; i++)
700                         blend_subrect(&pict, sp->sub.rects[i],
701                                       vp->bmp->w, vp->bmp->h);
702
703                     SDL_UnlockYUVOverlay (vp->bmp);
704                 }
705             }
706         }
707
708
709         /* XXX: we suppose the screen has a 1.0 pixel ratio */
710         height = is->height;
711         width = ((int)rint(height * aspect_ratio)) & ~1;
712         if (width > is->width) {
713             width = is->width;
714             height = ((int)rint(width / aspect_ratio)) & ~1;
715         }
716         x = (is->width - width) / 2;
717         y = (is->height - height) / 2;
718         is->no_background = 0;
719         rect.x = is->xleft + x;
720         rect.y = is->ytop  + y;
721         rect.w = width;
722         rect.h = height;
723         SDL_DisplayYUVOverlay(vp->bmp, &rect);
724     }
725 }
726
727 /* get the current audio output buffer size, in samples. With SDL, we
728    cannot have a precise information */
729 static int audio_write_get_buf_size(VideoState *is)
730 {
731     return is->audio_buf_size - is->audio_buf_index;
732 }
733
734 static inline int compute_mod(int a, int b)
735 {
736     a = a % b;
737     if (a >= 0)
738         return a;
739     else
740         return a + b;
741 }
742
743 static void video_audio_display(VideoState *s)
744 {
745     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
746     int ch, channels, h, h2, bgcolor, fgcolor;
747     int16_t time_diff;
748     int rdft_bits, nb_freq;
749
750     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
751         ;
752     nb_freq = 1 << (rdft_bits - 1);
753
754     /* compute display index : center on currently output samples */
755     channels = s->sdl_channels;
756     nb_display_channels = channels;
757     if (!s->paused) {
758         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
759         n = 2 * channels;
760         delay = audio_write_get_buf_size(s);
761         delay /= n;
762
763         /* to be more precise, we take into account the time spent since
764            the last buffer computation */
765         if (audio_callback_time) {
766             time_diff = av_gettime() - audio_callback_time;
767             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
768         }
769
770         delay += 2 * data_used;
771         if (delay < data_used)
772             delay = data_used;
773
774         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
775         if (s->show_audio == 1) {
776             h = INT_MIN;
777             for (i = 0; i < 1000; i += channels) {
778                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
779                 int a = s->sample_array[idx];
780                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
781                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
782                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
783                 int score = a - d;
784                 if (h < score && (b ^ c) < 0) {
785                     h = score;
786                     i_start = idx;
787                 }
788             }
789         }
790
791         s->last_i_start = i_start;
792     } else {
793         i_start = s->last_i_start;
794     }
795
796     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
797     if (s->show_audio == 1) {
798         fill_rectangle(screen,
799                        s->xleft, s->ytop, s->width, s->height,
800                        bgcolor);
801
802         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
803
804         /* total height for one channel */
805         h = s->height / nb_display_channels;
806         /* graph height / 2 */
807         h2 = (h * 9) / 20;
808         for (ch = 0; ch < nb_display_channels; ch++) {
809             i = i_start + ch;
810             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
811             for (x = 0; x < s->width; x++) {
812                 y = (s->sample_array[i] * h2) >> 15;
813                 if (y < 0) {
814                     y = -y;
815                     ys = y1 - y;
816                 } else {
817                     ys = y1;
818                 }
819                 fill_rectangle(screen,
820                                s->xleft + x, ys, 1, y,
821                                fgcolor);
822                 i += channels;
823                 if (i >= SAMPLE_ARRAY_SIZE)
824                     i -= SAMPLE_ARRAY_SIZE;
825             }
826         }
827
828         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
829
830         for (ch = 1; ch < nb_display_channels; ch++) {
831             y = s->ytop + ch * h;
832             fill_rectangle(screen,
833                            s->xleft, y, s->width, 1,
834                            fgcolor);
835         }
836         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
837     } else {
838         nb_display_channels= FFMIN(nb_display_channels, 2);
839         if (rdft_bits != s->rdft_bits) {
840             av_rdft_end(s->rdft);
841             av_free(s->rdft_data);
842             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
843             s->rdft_bits = rdft_bits;
844             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
845         }
846         {
847             FFTSample *data[2];
848             for (ch = 0; ch < nb_display_channels; ch++) {
849                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
850                 i = i_start + ch;
851                 for (x = 0; x < 2 * nb_freq; x++) {
852                     double w = (x-nb_freq) * (1.0 / nb_freq);
853                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
854                     i += channels;
855                     if (i >= SAMPLE_ARRAY_SIZE)
856                         i -= SAMPLE_ARRAY_SIZE;
857                 }
858                 av_rdft_calc(s->rdft, data[ch]);
859             }
860             // least efficient way to do this, we should of course directly access it but its more than fast enough
861             for (y = 0; y < s->height; y++) {
862                 double w = 1 / sqrt(nb_freq);
863                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
864                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
865                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
866                 a = FFMIN(a, 255);
867                 b = FFMIN(b, 255);
868                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
869
870                 fill_rectangle(screen,
871                             s->xpos, s->height-y, 1, 1,
872                             fgcolor);
873             }
874         }
875         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
876         s->xpos++;
877         if (s->xpos >= s->width)
878             s->xpos= s->xleft;
879     }
880 }
881
882 static int video_open(VideoState *is)
883 {
884     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
885     int w,h;
886
887     if (is_full_screen) flags |= SDL_FULLSCREEN;
888     else                flags |= SDL_RESIZABLE;
889
890     if (is_full_screen && fs_screen_width) {
891         w = fs_screen_width;
892         h = fs_screen_height;
893     } else if (!is_full_screen && screen_width) {
894         w = screen_width;
895         h = screen_height;
896 #if CONFIG_AVFILTER
897     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
898         w = is->out_video_filter->inputs[0]->w;
899         h = is->out_video_filter->inputs[0]->h;
900 #else
901     } else if (is->video_st && is->video_st->codec->width) {
902         w = is->video_st->codec->width;
903         h = is->video_st->codec->height;
904 #endif
905     } else {
906         w = 640;
907         h = 480;
908     }
909     if (screen && is->width == screen->w && screen->w == w
910        && is->height== screen->h && screen->h == h)
911         return 0;
912
913 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
914     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
915     screen = SDL_SetVideoMode(w, h, 24, flags);
916 #else
917     screen = SDL_SetVideoMode(w, h, 0, flags);
918 #endif
919     if (!screen) {
920         fprintf(stderr, "SDL: could not set video mode - exiting\n");
921         return -1;
922     }
923     if (!window_title)
924         window_title = input_filename;
925     SDL_WM_SetCaption(window_title, window_title);
926
927     is->width  = screen->w;
928     is->height = screen->h;
929
930     return 0;
931 }
932
933 /* display the current picture, if any */
934 static void video_display(VideoState *is)
935 {
936     if (!screen)
937         video_open(cur_stream);
938     if (is->audio_st && is->show_audio)
939         video_audio_display(is);
940     else if (is->video_st)
941         video_image_display(is);
942 }
943
944 static int refresh_thread(void *opaque)
945 {
946     VideoState *is= opaque;
947     while (!is->abort_request) {
948         SDL_Event event;
949         event.type = FF_REFRESH_EVENT;
950         event.user.data1 = opaque;
951         if (!is->refresh) {
952             is->refresh = 1;
953             SDL_PushEvent(&event);
954         }
955         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
956     }
957     return 0;
958 }
959
960 /* get the current audio clock value */
961 static double get_audio_clock(VideoState *is)
962 {
963     double pts;
964     int hw_buf_size, bytes_per_sec;
965     pts = is->audio_clock;
966     hw_buf_size = audio_write_get_buf_size(is);
967     bytes_per_sec = 0;
968     if (is->audio_st) {
969         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
970                         av_get_bytes_per_sample(is->sdl_sample_fmt);
971     }
972     if (bytes_per_sec)
973         pts -= (double)hw_buf_size / bytes_per_sec;
974     return pts;
975 }
976
977 /* get the current video clock value */
978 static double get_video_clock(VideoState *is)
979 {
980     if (is->paused) {
981         return is->video_current_pts;
982     } else {
983         return is->video_current_pts_drift + av_gettime() / 1000000.0;
984     }
985 }
986
987 /* get the current external clock value */
988 static double get_external_clock(VideoState *is)
989 {
990     int64_t ti;
991     ti = av_gettime();
992     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
993 }
994
995 /* get the current master clock value */
996 static double get_master_clock(VideoState *is)
997 {
998     double val;
999
1000     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1001         if (is->video_st)
1002             val = get_video_clock(is);
1003         else
1004             val = get_audio_clock(is);
1005     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1006         if (is->audio_st)
1007             val = get_audio_clock(is);
1008         else
1009             val = get_video_clock(is);
1010     } else {
1011         val = get_external_clock(is);
1012     }
1013     return val;
1014 }
1015
1016 /* seek in the stream */
1017 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1018 {
1019     if (!is->seek_req) {
1020         is->seek_pos = pos;
1021         is->seek_rel = rel;
1022         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1023         if (seek_by_bytes)
1024             is->seek_flags |= AVSEEK_FLAG_BYTE;
1025         is->seek_req = 1;
1026     }
1027 }
1028
1029 /* pause or resume the video */
1030 static void stream_pause(VideoState *is)
1031 {
1032     if (is->paused) {
1033         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1034         if (is->read_pause_return != AVERROR(ENOSYS)) {
1035             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1036         }
1037         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1038     }
1039     is->paused = !is->paused;
1040 }
1041
1042 static double compute_target_time(double frame_current_pts, VideoState *is)
1043 {
1044     double delay, sync_threshold, diff;
1045
1046     /* compute nominal delay */
1047     delay = frame_current_pts - is->frame_last_pts;
1048     if (delay <= 0 || delay >= 10.0) {
1049         /* if incorrect delay, use previous one */
1050         delay = is->frame_last_delay;
1051     } else {
1052         is->frame_last_delay = delay;
1053     }
1054     is->frame_last_pts = frame_current_pts;
1055
1056     /* update delay to follow master synchronisation source */
1057     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1058          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1059         /* if video is slave, we try to correct big delays by
1060            duplicating or deleting a frame */
1061         diff = get_video_clock(is) - get_master_clock(is);
1062
1063         /* skip or repeat frame. We take into account the
1064            delay to compute the threshold. I still don't know
1065            if it is the best guess */
1066         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1067         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1068             if (diff <= -sync_threshold)
1069                 delay = 0;
1070             else if (diff >= sync_threshold)
1071                 delay = 2 * delay;
1072         }
1073     }
1074     is->frame_timer += delay;
1075
1076     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1077             delay, frame_current_pts, -diff);
1078
1079     return is->frame_timer;
1080 }
1081
1082 /* called to display each frame */
1083 static void video_refresh_timer(void *opaque)
1084 {
1085     VideoState *is = opaque;
1086     VideoPicture *vp;
1087
1088     SubPicture *sp, *sp2;
1089
1090     if (is->video_st) {
1091 retry:
1092         if (is->pictq_size == 0) {
1093             // nothing to do, no picture to display in the que
1094         } else {
1095             double time = av_gettime() / 1000000.0;
1096             double next_target;
1097             /* dequeue the picture */
1098             vp = &is->pictq[is->pictq_rindex];
1099
1100             if (time < vp->target_clock)
1101                 return;
1102             /* update current video pts */
1103             is->video_current_pts = vp->pts;
1104             is->video_current_pts_drift = is->video_current_pts - time;
1105             is->video_current_pos = vp->pos;
1106             if (is->pictq_size > 1) {
1107                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1108                 assert(nextvp->target_clock >= vp->target_clock);
1109                 next_target= nextvp->target_clock;
1110             } else {
1111                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1112             }
1113             if (framedrop && time > next_target) {
1114                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1115                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1116                     /* update queue size and signal for next picture */
1117                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1118                         is->pictq_rindex = 0;
1119
1120                     SDL_LockMutex(is->pictq_mutex);
1121                     is->pictq_size--;
1122                     SDL_CondSignal(is->pictq_cond);
1123                     SDL_UnlockMutex(is->pictq_mutex);
1124                     goto retry;
1125                 }
1126             }
1127
1128             if (is->subtitle_st) {
1129                 if (is->subtitle_stream_changed) {
1130                     SDL_LockMutex(is->subpq_mutex);
1131
1132                     while (is->subpq_size) {
1133                         free_subpicture(&is->subpq[is->subpq_rindex]);
1134
1135                         /* update queue size and signal for next picture */
1136                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1137                             is->subpq_rindex = 0;
1138
1139                         is->subpq_size--;
1140                     }
1141                     is->subtitle_stream_changed = 0;
1142
1143                     SDL_CondSignal(is->subpq_cond);
1144                     SDL_UnlockMutex(is->subpq_mutex);
1145                 } else {
1146                     if (is->subpq_size > 0) {
1147                         sp = &is->subpq[is->subpq_rindex];
1148
1149                         if (is->subpq_size > 1)
1150                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1151                         else
1152                             sp2 = NULL;
1153
1154                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1155                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1156                         {
1157                             free_subpicture(sp);
1158
1159                             /* update queue size and signal for next picture */
1160                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1161                                 is->subpq_rindex = 0;
1162
1163                             SDL_LockMutex(is->subpq_mutex);
1164                             is->subpq_size--;
1165                             SDL_CondSignal(is->subpq_cond);
1166                             SDL_UnlockMutex(is->subpq_mutex);
1167                         }
1168                     }
1169                 }
1170             }
1171
1172             /* display picture */
1173             if (!display_disable)
1174                 video_display(is);
1175
1176             /* update queue size and signal for next picture */
1177             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1178                 is->pictq_rindex = 0;
1179
1180             SDL_LockMutex(is->pictq_mutex);
1181             is->pictq_size--;
1182             SDL_CondSignal(is->pictq_cond);
1183             SDL_UnlockMutex(is->pictq_mutex);
1184         }
1185     } else if (is->audio_st) {
1186         /* draw the next audio frame */
1187
1188         /* if only audio stream, then display the audio bars (better
1189            than nothing, just to test the implementation */
1190
1191         /* display picture */
1192         if (!display_disable)
1193             video_display(is);
1194     }
1195     if (show_status) {
1196         static int64_t last_time;
1197         int64_t cur_time;
1198         int aqsize, vqsize, sqsize;
1199         double av_diff;
1200
1201         cur_time = av_gettime();
1202         if (!last_time || (cur_time - last_time) >= 30000) {
1203             aqsize = 0;
1204             vqsize = 0;
1205             sqsize = 0;
1206             if (is->audio_st)
1207                 aqsize = is->audioq.size;
1208             if (is->video_st)
1209                 vqsize = is->videoq.size;
1210             if (is->subtitle_st)
1211                 sqsize = is->subtitleq.size;
1212             av_diff = 0;
1213             if (is->audio_st && is->video_st)
1214                 av_diff = get_audio_clock(is) - get_video_clock(is);
1215             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1216                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1217                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1218             fflush(stdout);
1219             last_time = cur_time;
1220         }
1221     }
1222 }
1223
1224 static void stream_close(VideoState *is)
1225 {
1226     VideoPicture *vp;
1227     int i;
1228     /* XXX: use a special url_shutdown call to abort parse cleanly */
1229     is->abort_request = 1;
1230     SDL_WaitThread(is->parse_tid, NULL);
1231     SDL_WaitThread(is->refresh_tid, NULL);
1232
1233     /* free all pictures */
1234     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1235         vp = &is->pictq[i];
1236 #if CONFIG_AVFILTER
1237         if (vp->picref) {
1238             avfilter_unref_buffer(vp->picref);
1239             vp->picref = NULL;
1240         }
1241 #endif
1242         if (vp->bmp) {
1243             SDL_FreeYUVOverlay(vp->bmp);
1244             vp->bmp = NULL;
1245         }
1246     }
1247     SDL_DestroyMutex(is->pictq_mutex);
1248     SDL_DestroyCond(is->pictq_cond);
1249     SDL_DestroyMutex(is->subpq_mutex);
1250     SDL_DestroyCond(is->subpq_cond);
1251 #if !CONFIG_AVFILTER
1252     if (is->img_convert_ctx)
1253         sws_freeContext(is->img_convert_ctx);
1254 #endif
1255     av_free(is);
1256 }
1257
1258 static void do_exit(void)
1259 {
1260     if (cur_stream) {
1261         stream_close(cur_stream);
1262         cur_stream = NULL;
1263     }
1264     uninit_opts();
1265 #if CONFIG_AVFILTER
1266     avfilter_uninit();
1267 #endif
1268     avformat_network_deinit();
1269     if (show_status)
1270         printf("\n");
1271     SDL_Quit();
1272     av_log(NULL, AV_LOG_QUIET, "");
1273     exit(0);
1274 }
1275
1276 /* allocate a picture (needs to do that in main thread to avoid
1277    potential locking problems */
1278 static void alloc_picture(void *opaque)
1279 {
1280     VideoState *is = opaque;
1281     VideoPicture *vp;
1282
1283     vp = &is->pictq[is->pictq_windex];
1284
1285     if (vp->bmp)
1286         SDL_FreeYUVOverlay(vp->bmp);
1287
1288 #if CONFIG_AVFILTER
1289     if (vp->picref)
1290         avfilter_unref_buffer(vp->picref);
1291     vp->picref = NULL;
1292
1293     vp->width   = is->out_video_filter->inputs[0]->w;
1294     vp->height  = is->out_video_filter->inputs[0]->h;
1295     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1296 #else
1297     vp->width   = is->video_st->codec->width;
1298     vp->height  = is->video_st->codec->height;
1299     vp->pix_fmt = is->video_st->codec->pix_fmt;
1300 #endif
1301
1302     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1303                                    SDL_YV12_OVERLAY,
1304                                    screen);
1305     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1306         /* SDL allocates a buffer smaller than requested if the video
1307          * overlay hardware is unable to support the requested size. */
1308         fprintf(stderr, "Error: the video system does not support an image\n"
1309                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1310                         "to reduce the image size.\n", vp->width, vp->height );
1311         do_exit();
1312     }
1313
1314     SDL_LockMutex(is->pictq_mutex);
1315     vp->allocated = 1;
1316     SDL_CondSignal(is->pictq_cond);
1317     SDL_UnlockMutex(is->pictq_mutex);
1318 }
1319
1320 /**
1321  *
1322  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1323  */
1324 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1325 {
1326     VideoPicture *vp;
1327 #if CONFIG_AVFILTER
1328     AVPicture pict_src;
1329 #else
1330     int dst_pix_fmt = PIX_FMT_YUV420P;
1331 #endif
1332     /* wait until we have space to put a new picture */
1333     SDL_LockMutex(is->pictq_mutex);
1334
1335     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1336         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1337
1338     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1339            !is->videoq.abort_request) {
1340         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1341     }
1342     SDL_UnlockMutex(is->pictq_mutex);
1343
1344     if (is->videoq.abort_request)
1345         return -1;
1346
1347     vp = &is->pictq[is->pictq_windex];
1348
1349     /* alloc or resize hardware picture buffer */
1350     if (!vp->bmp || vp->reallocate ||
1351 #if CONFIG_AVFILTER
1352         vp->width  != is->out_video_filter->inputs[0]->w ||
1353         vp->height != is->out_video_filter->inputs[0]->h) {
1354 #else
1355         vp->width != is->video_st->codec->width ||
1356         vp->height != is->video_st->codec->height) {
1357 #endif
1358         SDL_Event event;
1359
1360         vp->allocated  = 0;
1361         vp->reallocate = 0;
1362
1363         /* the allocation must be done in the main thread to avoid
1364            locking problems */
1365         event.type = FF_ALLOC_EVENT;
1366         event.user.data1 = is;
1367         SDL_PushEvent(&event);
1368
1369         /* wait until the picture is allocated */
1370         SDL_LockMutex(is->pictq_mutex);
1371         while (!vp->allocated && !is->videoq.abort_request) {
1372             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1373         }
1374         SDL_UnlockMutex(is->pictq_mutex);
1375
1376         if (is->videoq.abort_request)
1377             return -1;
1378     }
1379
1380     /* if the frame is not skipped, then display it */
1381     if (vp->bmp) {
1382         AVPicture pict = { { 0 } };
1383 #if CONFIG_AVFILTER
1384         if (vp->picref)
1385             avfilter_unref_buffer(vp->picref);
1386         vp->picref = src_frame->opaque;
1387 #endif
1388
1389         /* get a pointer on the bitmap */
1390         SDL_LockYUVOverlay (vp->bmp);
1391
1392         pict.data[0] = vp->bmp->pixels[0];
1393         pict.data[1] = vp->bmp->pixels[2];
1394         pict.data[2] = vp->bmp->pixels[1];
1395
1396         pict.linesize[0] = vp->bmp->pitches[0];
1397         pict.linesize[1] = vp->bmp->pitches[2];
1398         pict.linesize[2] = vp->bmp->pitches[1];
1399
1400 #if CONFIG_AVFILTER
1401         pict_src.data[0] = src_frame->data[0];
1402         pict_src.data[1] = src_frame->data[1];
1403         pict_src.data[2] = src_frame->data[2];
1404
1405         pict_src.linesize[0] = src_frame->linesize[0];
1406         pict_src.linesize[1] = src_frame->linesize[1];
1407         pict_src.linesize[2] = src_frame->linesize[2];
1408
1409         // FIXME use direct rendering
1410         av_picture_copy(&pict, &pict_src,
1411                         vp->pix_fmt, vp->width, vp->height);
1412 #else
1413         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1414         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1415             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1416             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1417         if (is->img_convert_ctx == NULL) {
1418             fprintf(stderr, "Cannot initialize the conversion context\n");
1419             exit(1);
1420         }
1421         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1422                   0, vp->height, pict.data, pict.linesize);
1423 #endif
1424         /* update the bitmap content */
1425         SDL_UnlockYUVOverlay(vp->bmp);
1426
1427         vp->pts = pts;
1428         vp->pos = pos;
1429
1430         /* now we can update the picture count */
1431         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1432             is->pictq_windex = 0;
1433         SDL_LockMutex(is->pictq_mutex);
1434         vp->target_clock = compute_target_time(vp->pts, is);
1435
1436         is->pictq_size++;
1437         SDL_UnlockMutex(is->pictq_mutex);
1438     }
1439     return 0;
1440 }
1441
1442 /**
1443  * compute the exact PTS for the picture if it is omitted in the stream
1444  * @param pts1 the dts of the pkt / pts of the frame
1445  */
1446 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1447 {
1448     double frame_delay, pts;
1449
1450     pts = pts1;
1451
1452     if (pts != 0) {
1453         /* update video clock with pts, if present */
1454         is->video_clock = pts;
1455     } else {
1456         pts = is->video_clock;
1457     }
1458     /* update video clock for next frame */
1459     frame_delay = av_q2d(is->video_st->codec->time_base);
1460     /* for MPEG2, the frame can be repeated, so we update the
1461        clock accordingly */
1462     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1463     is->video_clock += frame_delay;
1464
1465     return queue_picture(is, src_frame, pts, pos);
1466 }
1467
1468 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1469 {
1470     int got_picture, i;
1471
1472     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1473         return -1;
1474
1475     if (pkt->data == flush_pkt.data) {
1476         avcodec_flush_buffers(is->video_st->codec);
1477
1478         SDL_LockMutex(is->pictq_mutex);
1479         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1480         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1481             is->pictq[i].target_clock= 0;
1482         }
1483         while (is->pictq_size && !is->videoq.abort_request) {
1484             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1485         }
1486         is->video_current_pos = -1;
1487         SDL_UnlockMutex(is->pictq_mutex);
1488
1489         init_pts_correction(&is->pts_ctx);
1490         is->frame_last_pts = AV_NOPTS_VALUE;
1491         is->frame_last_delay = 0;
1492         is->frame_timer = (double)av_gettime() / 1000000.0;
1493         is->skip_frames = 1;
1494         is->skip_frames_index = 0;
1495         return 0;
1496     }
1497
1498     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1499
1500     if (got_picture) {
1501         if (decoder_reorder_pts == -1) {
1502             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1503         } else if (decoder_reorder_pts) {
1504             *pts = frame->pkt_pts;
1505         } else {
1506             *pts = frame->pkt_dts;
1507         }
1508
1509         if (*pts == AV_NOPTS_VALUE) {
1510             *pts = 0;
1511         }
1512
1513         is->skip_frames_index += 1;
1514         if (is->skip_frames_index >= is->skip_frames) {
1515             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1516             return 1;
1517         }
1518
1519     }
1520     return 0;
1521 }
1522
1523 #if CONFIG_AVFILTER
1524 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1525 {
1526     char sws_flags_str[128];
1527     char buffersrc_args[256];
1528     int ret;
1529     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1530     AVCodecContext *codec = is->video_st->codec;
1531
1532     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1533     graph->scale_sws_opts = av_strdup(sws_flags_str);
1534
1535     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1536              codec->width, codec->height, codec->pix_fmt,
1537              is->video_st->time_base.num, is->video_st->time_base.den,
1538              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1539
1540
1541     if ((ret = avfilter_graph_create_filter(&filt_src,
1542                                             avfilter_get_by_name("buffer"),
1543                                             "src", buffersrc_args, NULL,
1544                                             graph)) < 0)
1545         return ret;
1546     if ((ret = avfilter_graph_create_filter(&filt_out,
1547                                             avfilter_get_by_name("buffersink"),
1548                                             "out", NULL, NULL, graph)) < 0)
1549         return ret;
1550
1551     if ((ret = avfilter_graph_create_filter(&filt_format,
1552                                             avfilter_get_by_name("format"),
1553                                             "format", "yuv420p", NULL, graph)) < 0)
1554         return ret;
1555     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1556         return ret;
1557
1558
1559     if (vfilters) {
1560         AVFilterInOut *outputs = avfilter_inout_alloc();
1561         AVFilterInOut *inputs  = avfilter_inout_alloc();
1562
1563         outputs->name    = av_strdup("in");
1564         outputs->filter_ctx = filt_src;
1565         outputs->pad_idx = 0;
1566         outputs->next    = NULL;
1567
1568         inputs->name    = av_strdup("out");
1569         inputs->filter_ctx = filt_format;
1570         inputs->pad_idx = 0;
1571         inputs->next    = NULL;
1572
1573         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1574             return ret;
1575     } else {
1576         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1577             return ret;
1578     }
1579
1580     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1581         return ret;
1582
1583     is->in_video_filter  = filt_src;
1584     is->out_video_filter = filt_out;
1585
1586     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1587         is->use_dr1 = 1;
1588         codec->get_buffer     = codec_get_buffer;
1589         codec->release_buffer = codec_release_buffer;
1590         codec->opaque         = &is->buffer_pool;
1591     }
1592
1593     return ret;
1594 }
1595
1596 #endif  /* CONFIG_AVFILTER */
1597
1598 static int video_thread(void *arg)
1599 {
1600     VideoState *is = arg;
1601     AVFrame *frame = avcodec_alloc_frame();
1602     int64_t pts_int;
1603     double pts;
1604     int ret;
1605
1606 #if CONFIG_AVFILTER
1607     AVFilterGraph *graph = avfilter_graph_alloc();
1608     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1609     int64_t pos;
1610     int last_w = is->video_st->codec->width;
1611     int last_h = is->video_st->codec->height;
1612
1613     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1614         goto the_end;
1615     filt_in  = is->in_video_filter;
1616     filt_out = is->out_video_filter;
1617 #endif
1618
1619     for (;;) {
1620         AVPacket pkt;
1621 #if CONFIG_AVFILTER
1622         AVFilterBufferRef *picref;
1623         AVRational tb;
1624 #endif
1625         while (is->paused && !is->videoq.abort_request)
1626             SDL_Delay(10);
1627
1628         ret = get_video_frame(is, frame, &pts_int, &pkt);
1629         if (ret < 0)
1630             goto the_end;
1631         av_free_packet(&pkt);
1632
1633         if (!ret)
1634             continue;
1635
1636 #if CONFIG_AVFILTER
1637         if (   last_w != is->video_st->codec->width
1638             || last_h != is->video_st->codec->height) {
1639             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1640                     is->video_st->codec->width, is->video_st->codec->height);
1641             avfilter_graph_free(&graph);
1642             graph = avfilter_graph_alloc();
1643             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1644                 goto the_end;
1645             filt_out = is->out_video_filter;
1646             last_w = is->video_st->codec->width;
1647             last_h = is->video_st->codec->height;
1648         }
1649
1650         frame->pts = pts_int;
1651         if (is->use_dr1) {
1652             FrameBuffer      *buf = frame->opaque;
1653             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1654                                         frame->data, frame->linesize,
1655                                         AV_PERM_READ | AV_PERM_PRESERVE,
1656                                         frame->width, frame->height,
1657                                         frame->format);
1658
1659             avfilter_copy_frame_props(fb, frame);
1660             fb->buf->priv           = buf;
1661             fb->buf->free           = filter_release_buffer;
1662
1663             buf->refcount++;
1664             av_buffersrc_buffer(filt_in, fb);
1665
1666         } else
1667             av_buffersrc_write_frame(filt_in, frame);
1668
1669         while (ret >= 0) {
1670             ret = av_buffersink_read(filt_out, &picref);
1671             if (ret < 0) {
1672                 ret = 0;
1673                 break;
1674             }
1675
1676             avfilter_copy_buf_props(frame, picref);
1677
1678             pts_int = picref->pts;
1679             tb      = filt_out->inputs[0]->time_base;
1680             pos     = picref->pos;
1681             frame->opaque = picref;
1682
1683             if (av_cmp_q(tb, is->video_st->time_base)) {
1684                 av_unused int64_t pts1 = pts_int;
1685                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1686                 av_dlog(NULL, "video_thread(): "
1687                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1688                         tb.num, tb.den, pts1,
1689                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1690             }
1691             pts = pts_int * av_q2d(is->video_st->time_base);
1692             ret = output_picture2(is, frame, pts, pos);
1693         }
1694 #else
1695         pts = pts_int * av_q2d(is->video_st->time_base);
1696         ret = output_picture2(is, frame, pts,  pkt.pos);
1697 #endif
1698
1699         if (ret < 0)
1700             goto the_end;
1701
1702         if (step)
1703             if (cur_stream)
1704                 stream_pause(cur_stream);
1705     }
1706  the_end:
1707 #if CONFIG_AVFILTER
1708     av_freep(&vfilters);
1709     avfilter_graph_free(&graph);
1710 #endif
1711     av_free(frame);
1712     return 0;
1713 }
1714
1715 static int subtitle_thread(void *arg)
1716 {
1717     VideoState *is = arg;
1718     SubPicture *sp;
1719     AVPacket pkt1, *pkt = &pkt1;
1720     int got_subtitle;
1721     double pts;
1722     int i, j;
1723     int r, g, b, y, u, v, a;
1724
1725     for (;;) {
1726         while (is->paused && !is->subtitleq.abort_request) {
1727             SDL_Delay(10);
1728         }
1729         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1730             break;
1731
1732         if (pkt->data == flush_pkt.data) {
1733             avcodec_flush_buffers(is->subtitle_st->codec);
1734             continue;
1735         }
1736         SDL_LockMutex(is->subpq_mutex);
1737         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1738                !is->subtitleq.abort_request) {
1739             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1740         }
1741         SDL_UnlockMutex(is->subpq_mutex);
1742
1743         if (is->subtitleq.abort_request)
1744             return 0;
1745
1746         sp = &is->subpq[is->subpq_windex];
1747
1748        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1749            this packet, if any */
1750         pts = 0;
1751         if (pkt->pts != AV_NOPTS_VALUE)
1752             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1753
1754         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1755                                  &got_subtitle, pkt);
1756
1757         if (got_subtitle && sp->sub.format == 0) {
1758             sp->pts = pts;
1759
1760             for (i = 0; i < sp->sub.num_rects; i++)
1761             {
1762                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1763                 {
1764                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1765                     y = RGB_TO_Y_CCIR(r, g, b);
1766                     u = RGB_TO_U_CCIR(r, g, b, 0);
1767                     v = RGB_TO_V_CCIR(r, g, b, 0);
1768                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1769                 }
1770             }
1771
1772             /* now we can update the picture count */
1773             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1774                 is->subpq_windex = 0;
1775             SDL_LockMutex(is->subpq_mutex);
1776             is->subpq_size++;
1777             SDL_UnlockMutex(is->subpq_mutex);
1778         }
1779         av_free_packet(pkt);
1780     }
1781     return 0;
1782 }
1783
1784 /* copy samples for viewing in editor window */
1785 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1786 {
1787     int size, len;
1788
1789     size = samples_size / sizeof(short);
1790     while (size > 0) {
1791         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1792         if (len > size)
1793             len = size;
1794         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1795         samples += len;
1796         is->sample_array_index += len;
1797         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1798             is->sample_array_index = 0;
1799         size -= len;
1800     }
1801 }
1802
1803 /* return the new audio buffer size (samples can be added or deleted
1804    to get better sync if video or external master clock) */
1805 static int synchronize_audio(VideoState *is, short *samples,
1806                              int samples_size1, double pts)
1807 {
1808     int n, samples_size;
1809     double ref_clock;
1810
1811     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1812     samples_size = samples_size1;
1813
1814     /* if not master, then we try to remove or add samples to correct the clock */
1815     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1816          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1817         double diff, avg_diff;
1818         int wanted_size, min_size, max_size, nb_samples;
1819
1820         ref_clock = get_master_clock(is);
1821         diff = get_audio_clock(is) - ref_clock;
1822
1823         if (diff < AV_NOSYNC_THRESHOLD) {
1824             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1825             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1826                 /* not enough measures to have a correct estimate */
1827                 is->audio_diff_avg_count++;
1828             } else {
1829                 /* estimate the A-V difference */
1830                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1831
1832                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1833                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1834                     nb_samples = samples_size / n;
1835
1836                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1837                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1838                     if (wanted_size < min_size)
1839                         wanted_size = min_size;
1840                     else if (wanted_size > max_size)
1841                         wanted_size = max_size;
1842
1843                     /* add or remove samples to correction the synchro */
1844                     if (wanted_size < samples_size) {
1845                         /* remove samples */
1846                         samples_size = wanted_size;
1847                     } else if (wanted_size > samples_size) {
1848                         uint8_t *samples_end, *q;
1849                         int nb;
1850
1851                         /* add samples */
1852                         nb = (samples_size - wanted_size);
1853                         samples_end = (uint8_t *)samples + samples_size - n;
1854                         q = samples_end + n;
1855                         while (nb > 0) {
1856                             memcpy(q, samples_end, n);
1857                             q += n;
1858                             nb -= n;
1859                         }
1860                         samples_size = wanted_size;
1861                     }
1862                 }
1863                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1864                         diff, avg_diff, samples_size - samples_size1,
1865                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1866             }
1867         } else {
1868             /* too big difference : may be initial PTS errors, so
1869                reset A-V filter */
1870             is->audio_diff_avg_count = 0;
1871             is->audio_diff_cum       = 0;
1872         }
1873     }
1874
1875     return samples_size;
1876 }
1877
1878 /* decode one audio frame and returns its uncompressed size */
1879 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1880 {
1881     AVPacket *pkt_temp = &is->audio_pkt_temp;
1882     AVPacket *pkt = &is->audio_pkt;
1883     AVCodecContext *dec = is->audio_st->codec;
1884     int n, len1, data_size, got_frame;
1885     double pts;
1886     int new_packet = 0;
1887     int flush_complete = 0;
1888
1889     for (;;) {
1890         /* NOTE: the audio packet can contain several frames */
1891         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1892             int resample_changed, audio_resample;
1893
1894             if (!is->frame) {
1895                 if (!(is->frame = avcodec_alloc_frame()))
1896                     return AVERROR(ENOMEM);
1897             } else
1898                 avcodec_get_frame_defaults(is->frame);
1899
1900             if (flush_complete)
1901                 break;
1902             new_packet = 0;
1903             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1904             if (len1 < 0) {
1905                 /* if error, we skip the frame */
1906                 pkt_temp->size = 0;
1907                 break;
1908             }
1909
1910             pkt_temp->data += len1;
1911             pkt_temp->size -= len1;
1912
1913             if (!got_frame) {
1914                 /* stop sending empty packets if the decoder is finished */
1915                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1916                     flush_complete = 1;
1917                 continue;
1918             }
1919             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1920                                                    is->frame->nb_samples,
1921                                                    dec->sample_fmt, 1);
1922
1923             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
1924                              dec->channel_layout != is->sdl_channel_layout;
1925
1926             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
1927                                dec->channel_layout != is->resample_channel_layout;
1928
1929             if ((!is->avr && audio_resample) || resample_changed) {
1930                 int ret;
1931                 if (is->avr)
1932                     avresample_close(is->avr);
1933                 else if (audio_resample) {
1934                     is->avr = avresample_alloc_context();
1935                     if (!is->avr) {
1936                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1937                         break;
1938                     }
1939                 }
1940                 if (audio_resample) {
1941                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
1942                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
1943                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
1944                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1945                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
1946                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
1947                     if (av_get_bytes_per_sample(dec->sample_fmt) <= 2)
1948                         av_opt_set_int(is->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
1949
1950                     if ((ret = avresample_open(is->avr)) < 0) {
1951                         fprintf(stderr, "error initializing libavresample\n");
1952                         break;
1953                     }
1954                 }
1955                 is->resample_sample_fmt     = dec->sample_fmt;
1956                 is->resample_channel_layout = dec->channel_layout;
1957             }
1958
1959             if (audio_resample) {
1960                 void *tmp_out;
1961                 int out_samples, out_size, out_linesize;
1962                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1963                 int nb_samples = is->frame->nb_samples;
1964
1965                 out_size = av_samples_get_buffer_size(&out_linesize,
1966                                                       is->sdl_channels,
1967                                                       nb_samples,
1968                                                       is->sdl_sample_fmt, 0);
1969                 tmp_out = av_realloc(is->audio_buf1, out_size);
1970                 if (!tmp_out)
1971                     return AVERROR(ENOMEM);
1972                 is->audio_buf1 = tmp_out;
1973
1974                 out_samples = avresample_convert(is->avr,
1975                                                  (void **)&is->audio_buf1,
1976                                                  out_linesize, nb_samples,
1977                                                  (void **)is->frame->data,
1978                                                  is->frame->linesize[0],
1979                                                  is->frame->nb_samples);
1980                 if (out_samples < 0) {
1981                     fprintf(stderr, "avresample_convert() failed\n");
1982                     break;
1983                 }
1984                 is->audio_buf = is->audio_buf1;
1985                 data_size = out_samples * osize * is->sdl_channels;
1986             } else {
1987                 is->audio_buf = is->frame->data[0];
1988             }
1989
1990             /* if no pts, then compute it */
1991             pts = is->audio_clock;
1992             *pts_ptr = pts;
1993             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1994             is->audio_clock += (double)data_size /
1995                 (double)(n * dec->sample_rate);
1996 #ifdef DEBUG
1997             {
1998                 static double last_clock;
1999                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2000                        is->audio_clock - last_clock,
2001                        is->audio_clock, pts);
2002                 last_clock = is->audio_clock;
2003             }
2004 #endif
2005             return data_size;
2006         }
2007
2008         /* free the current packet */
2009         if (pkt->data)
2010             av_free_packet(pkt);
2011         memset(pkt_temp, 0, sizeof(*pkt_temp));
2012
2013         if (is->paused || is->audioq.abort_request) {
2014             return -1;
2015         }
2016
2017         /* read next packet */
2018         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2019             return -1;
2020
2021         if (pkt->data == flush_pkt.data) {
2022             avcodec_flush_buffers(dec);
2023             flush_complete = 0;
2024         }
2025
2026         *pkt_temp = *pkt;
2027
2028         /* if update the audio clock with the pts */
2029         if (pkt->pts != AV_NOPTS_VALUE) {
2030             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2031         }
2032     }
2033 }
2034
2035 /* prepare a new audio buffer */
2036 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2037 {
2038     VideoState *is = opaque;
2039     int audio_size, len1;
2040     double pts;
2041
2042     audio_callback_time = av_gettime();
2043
2044     while (len > 0) {
2045         if (is->audio_buf_index >= is->audio_buf_size) {
2046            audio_size = audio_decode_frame(is, &pts);
2047            if (audio_size < 0) {
2048                 /* if error, just output silence */
2049                is->audio_buf      = is->silence_buf;
2050                is->audio_buf_size = sizeof(is->silence_buf);
2051            } else {
2052                if (is->show_audio)
2053                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2054                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2055                                               pts);
2056                is->audio_buf_size = audio_size;
2057            }
2058            is->audio_buf_index = 0;
2059         }
2060         len1 = is->audio_buf_size - is->audio_buf_index;
2061         if (len1 > len)
2062             len1 = len;
2063         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2064         len -= len1;
2065         stream += len1;
2066         is->audio_buf_index += len1;
2067     }
2068 }
2069
2070 /* open a given stream. Return 0 if OK */
2071 static int stream_component_open(VideoState *is, int stream_index)
2072 {
2073     AVFormatContext *ic = is->ic;
2074     AVCodecContext *avctx;
2075     AVCodec *codec;
2076     SDL_AudioSpec wanted_spec, spec;
2077     AVDictionary *opts;
2078     AVDictionaryEntry *t = NULL;
2079
2080     if (stream_index < 0 || stream_index >= ic->nb_streams)
2081         return -1;
2082     avctx = ic->streams[stream_index]->codec;
2083
2084     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2085
2086     codec = avcodec_find_decoder(avctx->codec_id);
2087     avctx->debug_mv          = debug_mv;
2088     avctx->debug             = debug;
2089     avctx->workaround_bugs   = workaround_bugs;
2090     avctx->idct_algo         = idct;
2091     avctx->skip_frame        = skip_frame;
2092     avctx->skip_idct         = skip_idct;
2093     avctx->skip_loop_filter  = skip_loop_filter;
2094     avctx->error_concealment = error_concealment;
2095
2096     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2097
2098     if (!av_dict_get(opts, "threads", NULL, 0))
2099         av_dict_set(&opts, "threads", "auto", 0);
2100     if (!codec ||
2101         avcodec_open2(avctx, codec, &opts) < 0)
2102         return -1;
2103     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2104         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2105         return AVERROR_OPTION_NOT_FOUND;
2106     }
2107
2108     /* prepare audio output */
2109     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2110         wanted_spec.freq = avctx->sample_rate;
2111         wanted_spec.format = AUDIO_S16SYS;
2112
2113         if (!avctx->channel_layout)
2114             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2115         if (!avctx->channel_layout) {
2116             fprintf(stderr, "unable to guess channel layout\n");
2117             return -1;
2118         }
2119         if (avctx->channels == 1)
2120             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2121         else
2122             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2123         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2124
2125         wanted_spec.channels = is->sdl_channels;
2126         wanted_spec.silence = 0;
2127         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2128         wanted_spec.callback = sdl_audio_callback;
2129         wanted_spec.userdata = is;
2130         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2131             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2132             return -1;
2133         }
2134         is->audio_hw_buf_size = spec.size;
2135         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2136         is->resample_sample_fmt     = is->sdl_sample_fmt;
2137         is->resample_channel_layout = is->sdl_channel_layout;
2138     }
2139
2140     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2141     switch (avctx->codec_type) {
2142     case AVMEDIA_TYPE_AUDIO:
2143         is->audio_stream = stream_index;
2144         is->audio_st = ic->streams[stream_index];
2145         is->audio_buf_size  = 0;
2146         is->audio_buf_index = 0;
2147
2148         /* init averaging filter */
2149         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2150         is->audio_diff_avg_count = 0;
2151         /* since we do not have a precise anough audio fifo fullness,
2152            we correct audio sync only if larger than this threshold */
2153         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2154
2155         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2156         packet_queue_init(&is->audioq);
2157         SDL_PauseAudio(0);
2158         break;
2159     case AVMEDIA_TYPE_VIDEO:
2160         is->video_stream = stream_index;
2161         is->video_st = ic->streams[stream_index];
2162
2163         packet_queue_init(&is->videoq);
2164         is->video_tid = SDL_CreateThread(video_thread, is);
2165         break;
2166     case AVMEDIA_TYPE_SUBTITLE:
2167         is->subtitle_stream = stream_index;
2168         is->subtitle_st = ic->streams[stream_index];
2169         packet_queue_init(&is->subtitleq);
2170
2171         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2172         break;
2173     default:
2174         break;
2175     }
2176     return 0;
2177 }
2178
2179 static void stream_component_close(VideoState *is, int stream_index)
2180 {
2181     AVFormatContext *ic = is->ic;
2182     AVCodecContext *avctx;
2183
2184     if (stream_index < 0 || stream_index >= ic->nb_streams)
2185         return;
2186     avctx = ic->streams[stream_index]->codec;
2187
2188     switch (avctx->codec_type) {
2189     case AVMEDIA_TYPE_AUDIO:
2190         packet_queue_abort(&is->audioq);
2191
2192         SDL_CloseAudio();
2193
2194         packet_queue_end(&is->audioq);
2195         av_free_packet(&is->audio_pkt);
2196         if (is->avr)
2197             avresample_free(&is->avr);
2198         av_freep(&is->audio_buf1);
2199         is->audio_buf = NULL;
2200         av_freep(&is->frame);
2201
2202         if (is->rdft) {
2203             av_rdft_end(is->rdft);
2204             av_freep(&is->rdft_data);
2205             is->rdft = NULL;
2206             is->rdft_bits = 0;
2207         }
2208         break;
2209     case AVMEDIA_TYPE_VIDEO:
2210         packet_queue_abort(&is->videoq);
2211
2212         /* note: we also signal this mutex to make sure we deblock the
2213            video thread in all cases */
2214         SDL_LockMutex(is->pictq_mutex);
2215         SDL_CondSignal(is->pictq_cond);
2216         SDL_UnlockMutex(is->pictq_mutex);
2217
2218         SDL_WaitThread(is->video_tid, NULL);
2219
2220         packet_queue_end(&is->videoq);
2221         break;
2222     case AVMEDIA_TYPE_SUBTITLE:
2223         packet_queue_abort(&is->subtitleq);
2224
2225         /* note: we also signal this mutex to make sure we deblock the
2226            video thread in all cases */
2227         SDL_LockMutex(is->subpq_mutex);
2228         is->subtitle_stream_changed = 1;
2229
2230         SDL_CondSignal(is->subpq_cond);
2231         SDL_UnlockMutex(is->subpq_mutex);
2232
2233         SDL_WaitThread(is->subtitle_tid, NULL);
2234
2235         packet_queue_end(&is->subtitleq);
2236         break;
2237     default:
2238         break;
2239     }
2240
2241     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2242     avcodec_close(avctx);
2243     free_buffer_pool(&is->buffer_pool);
2244     switch (avctx->codec_type) {
2245     case AVMEDIA_TYPE_AUDIO:
2246         is->audio_st = NULL;
2247         is->audio_stream = -1;
2248         break;
2249     case AVMEDIA_TYPE_VIDEO:
2250         is->video_st = NULL;
2251         is->video_stream = -1;
2252         break;
2253     case AVMEDIA_TYPE_SUBTITLE:
2254         is->subtitle_st = NULL;
2255         is->subtitle_stream = -1;
2256         break;
2257     default:
2258         break;
2259     }
2260 }
2261
2262 /* since we have only one decoding thread, we can use a global
2263    variable instead of a thread local variable */
2264 static VideoState *global_video_state;
2265
2266 static int decode_interrupt_cb(void *ctx)
2267 {
2268     return global_video_state && global_video_state->abort_request;
2269 }
2270
2271 /* this thread gets the stream from the disk or the network */
2272 static int decode_thread(void *arg)
2273 {
2274     VideoState *is = arg;
2275     AVFormatContext *ic = NULL;
2276     int err, i, ret;
2277     int st_index[AVMEDIA_TYPE_NB];
2278     AVPacket pkt1, *pkt = &pkt1;
2279     int eof = 0;
2280     int pkt_in_play_range = 0;
2281     AVDictionaryEntry *t;
2282     AVDictionary **opts;
2283     int orig_nb_streams;
2284
2285     memset(st_index, -1, sizeof(st_index));
2286     is->video_stream = -1;
2287     is->audio_stream = -1;
2288     is->subtitle_stream = -1;
2289
2290     global_video_state = is;
2291
2292     ic = avformat_alloc_context();
2293     ic->interrupt_callback.callback = decode_interrupt_cb;
2294     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2295     if (err < 0) {
2296         print_error(is->filename, err);
2297         ret = -1;
2298         goto fail;
2299     }
2300     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2301         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2302         ret = AVERROR_OPTION_NOT_FOUND;
2303         goto fail;
2304     }
2305     is->ic = ic;
2306
2307     if (genpts)
2308         ic->flags |= AVFMT_FLAG_GENPTS;
2309
2310     opts = setup_find_stream_info_opts(ic, codec_opts);
2311     orig_nb_streams = ic->nb_streams;
2312
2313     err = avformat_find_stream_info(ic, opts);
2314     if (err < 0) {
2315         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2316         ret = -1;
2317         goto fail;
2318     }
2319     for (i = 0; i < orig_nb_streams; i++)
2320         av_dict_free(&opts[i]);
2321     av_freep(&opts);
2322
2323     if (ic->pb)
2324         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2325
2326     if (seek_by_bytes < 0)
2327         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2328
2329     /* if seeking requested, we execute it */
2330     if (start_time != AV_NOPTS_VALUE) {
2331         int64_t timestamp;
2332
2333         timestamp = start_time;
2334         /* add the stream start time */
2335         if (ic->start_time != AV_NOPTS_VALUE)
2336             timestamp += ic->start_time;
2337         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2338         if (ret < 0) {
2339             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2340                     is->filename, (double)timestamp / AV_TIME_BASE);
2341         }
2342     }
2343
2344     for (i = 0; i < ic->nb_streams; i++)
2345         ic->streams[i]->discard = AVDISCARD_ALL;
2346     if (!video_disable)
2347         st_index[AVMEDIA_TYPE_VIDEO] =
2348             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2349                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2350     if (!audio_disable)
2351         st_index[AVMEDIA_TYPE_AUDIO] =
2352             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2353                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2354                                 st_index[AVMEDIA_TYPE_VIDEO],
2355                                 NULL, 0);
2356     if (!video_disable)
2357         st_index[AVMEDIA_TYPE_SUBTITLE] =
2358             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2359                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2360                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2361                                  st_index[AVMEDIA_TYPE_AUDIO] :
2362                                  st_index[AVMEDIA_TYPE_VIDEO]),
2363                                 NULL, 0);
2364     if (show_status) {
2365         av_dump_format(ic, 0, is->filename, 0);
2366     }
2367
2368     /* open the streams */
2369     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2370         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2371     }
2372
2373     ret = -1;
2374     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2375         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2376     }
2377     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2378     if (ret < 0) {
2379         if (!display_disable)
2380             is->show_audio = 2;
2381     }
2382
2383     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2384         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2385     }
2386
2387     if (is->video_stream < 0 && is->audio_stream < 0) {
2388         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2389         ret = -1;
2390         goto fail;
2391     }
2392
2393     for (;;) {
2394         if (is->abort_request)
2395             break;
2396         if (is->paused != is->last_paused) {
2397             is->last_paused = is->paused;
2398             if (is->paused)
2399                 is->read_pause_return = av_read_pause(ic);
2400             else
2401                 av_read_play(ic);
2402         }
2403 #if CONFIG_RTSP_DEMUXER
2404         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2405             /* wait 10 ms to avoid trying to get another packet */
2406             /* XXX: horrible */
2407             SDL_Delay(10);
2408             continue;
2409         }
2410 #endif
2411         if (is->seek_req) {
2412             int64_t seek_target = is->seek_pos;
2413             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2414             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2415 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2416 //      of the seek_pos/seek_rel variables
2417
2418             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2419             if (ret < 0) {
2420                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2421             } else {
2422                 if (is->audio_stream >= 0) {
2423                     packet_queue_flush(&is->audioq);
2424                     packet_queue_put(&is->audioq, &flush_pkt);
2425                 }
2426                 if (is->subtitle_stream >= 0) {
2427                     packet_queue_flush(&is->subtitleq);
2428                     packet_queue_put(&is->subtitleq, &flush_pkt);
2429                 }
2430                 if (is->video_stream >= 0) {
2431                     packet_queue_flush(&is->videoq);
2432                     packet_queue_put(&is->videoq, &flush_pkt);
2433                 }
2434             }
2435             is->seek_req = 0;
2436             eof = 0;
2437         }
2438
2439         /* if the queue are full, no need to read more */
2440         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2441             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2442                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2443                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
2444             /* wait 10 ms */
2445             SDL_Delay(10);
2446             continue;
2447         }
2448         if (eof) {
2449             if (is->video_stream >= 0) {
2450                 av_init_packet(pkt);
2451                 pkt->data = NULL;
2452                 pkt->size = 0;
2453                 pkt->stream_index = is->video_stream;
2454                 packet_queue_put(&is->videoq, pkt);
2455             }
2456             if (is->audio_stream >= 0 &&
2457                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2458                 av_init_packet(pkt);
2459                 pkt->data = NULL;
2460                 pkt->size = 0;
2461                 pkt->stream_index = is->audio_stream;
2462                 packet_queue_put(&is->audioq, pkt);
2463             }
2464             SDL_Delay(10);
2465             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2466                 if (loop != 1 && (!loop || --loop)) {
2467                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2468                 } else if (autoexit) {
2469                     ret = AVERROR_EOF;
2470                     goto fail;
2471                 }
2472             }
2473             continue;
2474         }
2475         ret = av_read_frame(ic, pkt);
2476         if (ret < 0) {
2477             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2478                 eof = 1;
2479             if (ic->pb && ic->pb->error)
2480                 break;
2481             SDL_Delay(100); /* wait for user event */
2482             continue;
2483         }
2484         /* check if packet is in play range specified by user, then queue, otherwise discard */
2485         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2486                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2487                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2488                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2489                 <= ((double)duration / 1000000);
2490         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2491             packet_queue_put(&is->audioq, pkt);
2492         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2493             packet_queue_put(&is->videoq, pkt);
2494         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2495             packet_queue_put(&is->subtitleq, pkt);
2496         } else {
2497             av_free_packet(pkt);
2498         }
2499     }
2500     /* wait until the end */
2501     while (!is->abort_request) {
2502         SDL_Delay(100);
2503     }
2504
2505     ret = 0;
2506  fail:
2507     /* disable interrupting */
2508     global_video_state = NULL;
2509
2510     /* close each stream */
2511     if (is->audio_stream >= 0)
2512         stream_component_close(is, is->audio_stream);
2513     if (is->video_stream >= 0)
2514         stream_component_close(is, is->video_stream);
2515     if (is->subtitle_stream >= 0)
2516         stream_component_close(is, is->subtitle_stream);
2517     if (is->ic) {
2518         avformat_close_input(&is->ic);
2519     }
2520
2521     if (ret != 0) {
2522         SDL_Event event;
2523
2524         event.type = FF_QUIT_EVENT;
2525         event.user.data1 = is;
2526         SDL_PushEvent(&event);
2527     }
2528     return 0;
2529 }
2530
2531 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2532 {
2533     VideoState *is;
2534
2535     is = av_mallocz(sizeof(VideoState));
2536     if (!is)
2537         return NULL;
2538     av_strlcpy(is->filename, filename, sizeof(is->filename));
2539     is->iformat = iformat;
2540     is->ytop    = 0;
2541     is->xleft   = 0;
2542
2543     /* start video display */
2544     is->pictq_mutex = SDL_CreateMutex();
2545     is->pictq_cond  = SDL_CreateCond();
2546
2547     is->subpq_mutex = SDL_CreateMutex();
2548     is->subpq_cond  = SDL_CreateCond();
2549
2550     is->av_sync_type = av_sync_type;
2551     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2552     if (!is->parse_tid) {
2553         av_free(is);
2554         return NULL;
2555     }
2556     return is;
2557 }
2558
2559 static void stream_cycle_channel(VideoState *is, int codec_type)
2560 {
2561     AVFormatContext *ic = is->ic;
2562     int start_index, stream_index;
2563     AVStream *st;
2564
2565     if (codec_type == AVMEDIA_TYPE_VIDEO)
2566         start_index = is->video_stream;
2567     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2568         start_index = is->audio_stream;
2569     else
2570         start_index = is->subtitle_stream;
2571     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2572         return;
2573     stream_index = start_index;
2574     for (;;) {
2575         if (++stream_index >= is->ic->nb_streams)
2576         {
2577             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2578             {
2579                 stream_index = -1;
2580                 goto the_end;
2581             } else
2582                 stream_index = 0;
2583         }
2584         if (stream_index == start_index)
2585             return;
2586         st = ic->streams[stream_index];
2587         if (st->codec->codec_type == codec_type) {
2588             /* check that parameters are OK */
2589             switch (codec_type) {
2590             case AVMEDIA_TYPE_AUDIO:
2591                 if (st->codec->sample_rate != 0 &&
2592                     st->codec->channels != 0)
2593                     goto the_end;
2594                 break;
2595             case AVMEDIA_TYPE_VIDEO:
2596             case AVMEDIA_TYPE_SUBTITLE:
2597                 goto the_end;
2598             default:
2599                 break;
2600             }
2601         }
2602     }
2603  the_end:
2604     stream_component_close(is, start_index);
2605     stream_component_open(is, stream_index);
2606 }
2607
2608
2609 static void toggle_full_screen(void)
2610 {
2611 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2612     /* OS X needs to empty the picture_queue */
2613     int i;
2614     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2615         cur_stream->pictq[i].reallocate = 1;
2616 #endif
2617     is_full_screen = !is_full_screen;
2618     video_open(cur_stream);
2619 }
2620
2621 static void toggle_pause(void)
2622 {
2623     if (cur_stream)
2624         stream_pause(cur_stream);
2625     step = 0;
2626 }
2627
2628 static void step_to_next_frame(void)
2629 {
2630     if (cur_stream) {
2631         /* if the stream is paused unpause it, then step */
2632         if (cur_stream->paused)
2633             stream_pause(cur_stream);
2634     }
2635     step = 1;
2636 }
2637
2638 static void toggle_audio_display(void)
2639 {
2640     if (cur_stream) {
2641         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2642         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2643         fill_rectangle(screen,
2644                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2645                        bgcolor);
2646         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2647     }
2648 }
2649
2650 /* handle an event sent by the GUI */
2651 static void event_loop(void)
2652 {
2653     SDL_Event event;
2654     double incr, pos, frac;
2655
2656     for (;;) {
2657         double x;
2658         SDL_WaitEvent(&event);
2659         switch (event.type) {
2660         case SDL_KEYDOWN:
2661             if (exit_on_keydown) {
2662                 do_exit();
2663                 break;
2664             }
2665             switch (event.key.keysym.sym) {
2666             case SDLK_ESCAPE:
2667             case SDLK_q:
2668                 do_exit();
2669                 break;
2670             case SDLK_f:
2671                 toggle_full_screen();
2672                 break;
2673             case SDLK_p:
2674             case SDLK_SPACE:
2675                 toggle_pause();
2676                 break;
2677             case SDLK_s: // S: Step to next frame
2678                 step_to_next_frame();
2679                 break;
2680             case SDLK_a:
2681                 if (cur_stream)
2682                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2683                 break;
2684             case SDLK_v:
2685                 if (cur_stream)
2686                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2687                 break;
2688             case SDLK_t:
2689                 if (cur_stream)
2690                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2691                 break;
2692             case SDLK_w:
2693                 toggle_audio_display();
2694                 break;
2695             case SDLK_LEFT:
2696                 incr = -10.0;
2697                 goto do_seek;
2698             case SDLK_RIGHT:
2699                 incr = 10.0;
2700                 goto do_seek;
2701             case SDLK_UP:
2702                 incr = 60.0;
2703                 goto do_seek;
2704             case SDLK_DOWN:
2705                 incr = -60.0;
2706             do_seek:
2707                 if (cur_stream) {
2708                     if (seek_by_bytes) {
2709                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2710                             pos = cur_stream->video_current_pos;
2711                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2712                             pos = cur_stream->audio_pkt.pos;
2713                         } else
2714                             pos = avio_tell(cur_stream->ic->pb);
2715                         if (cur_stream->ic->bit_rate)
2716                             incr *= cur_stream->ic->bit_rate / 8.0;
2717                         else
2718                             incr *= 180000.0;
2719                         pos += incr;
2720                         stream_seek(cur_stream, pos, incr, 1);
2721                     } else {
2722                         pos = get_master_clock(cur_stream);
2723                         pos += incr;
2724                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2725                     }
2726                 }
2727                 break;
2728             default:
2729                 break;
2730             }
2731             break;
2732         case SDL_MOUSEBUTTONDOWN:
2733             if (exit_on_mousedown) {
2734                 do_exit();
2735                 break;
2736             }
2737         case SDL_MOUSEMOTION:
2738             if (event.type == SDL_MOUSEBUTTONDOWN) {
2739                 x = event.button.x;
2740             } else {
2741                 if (event.motion.state != SDL_PRESSED)
2742                     break;
2743                 x = event.motion.x;
2744             }
2745             if (cur_stream) {
2746                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2747                     uint64_t size =  avio_size(cur_stream->ic->pb);
2748                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2749                 } else {
2750                     int64_t ts;
2751                     int ns, hh, mm, ss;
2752                     int tns, thh, tmm, tss;
2753                     tns  = cur_stream->ic->duration / 1000000LL;
2754                     thh  = tns / 3600;
2755                     tmm  = (tns % 3600) / 60;
2756                     tss  = (tns % 60);
2757                     frac = x / cur_stream->width;
2758                     ns   = frac * tns;
2759                     hh   = ns / 3600;
2760                     mm   = (ns % 3600) / 60;
2761                     ss   = (ns % 60);
2762                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2763                             hh, mm, ss, thh, tmm, tss);
2764                     ts = frac * cur_stream->ic->duration;
2765                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2766                         ts += cur_stream->ic->start_time;
2767                     stream_seek(cur_stream, ts, 0, 0);
2768                 }
2769             }
2770             break;
2771         case SDL_VIDEORESIZE:
2772             if (cur_stream) {
2773                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2774                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2775                 screen_width  = cur_stream->width  = event.resize.w;
2776                 screen_height = cur_stream->height = event.resize.h;
2777             }
2778             break;
2779         case SDL_QUIT:
2780         case FF_QUIT_EVENT:
2781             do_exit();
2782             break;
2783         case FF_ALLOC_EVENT:
2784             video_open(event.user.data1);
2785             alloc_picture(event.user.data1);
2786             break;
2787         case FF_REFRESH_EVENT:
2788             video_refresh_timer(event.user.data1);
2789             cur_stream->refresh = 0;
2790             break;
2791         default:
2792             break;
2793         }
2794     }
2795 }
2796
2797 static int opt_frame_size(const char *opt, const char *arg)
2798 {
2799     av_log(NULL, AV_LOG_ERROR,
2800            "Option '%s' has been removed, use private format options instead\n", opt);
2801     return AVERROR(EINVAL);
2802 }
2803
2804 static int opt_width(const char *opt, const char *arg)
2805 {
2806     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2807     return 0;
2808 }
2809
2810 static int opt_height(const char *opt, const char *arg)
2811 {
2812     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2813     return 0;
2814 }
2815
2816 static int opt_format(const char *opt, const char *arg)
2817 {
2818     file_iformat = av_find_input_format(arg);
2819     if (!file_iformat) {
2820         fprintf(stderr, "Unknown input format: %s\n", arg);
2821         return AVERROR(EINVAL);
2822     }
2823     return 0;
2824 }
2825
2826 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2827 {
2828     av_log(NULL, AV_LOG_ERROR,
2829            "Option '%s' has been removed, use private format options instead\n", opt);
2830     return AVERROR(EINVAL);
2831 }
2832
2833 static int opt_sync(const char *opt, const char *arg)
2834 {
2835     if (!strcmp(arg, "audio"))
2836         av_sync_type = AV_SYNC_AUDIO_MASTER;
2837     else if (!strcmp(arg, "video"))
2838         av_sync_type = AV_SYNC_VIDEO_MASTER;
2839     else if (!strcmp(arg, "ext"))
2840         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2841     else {
2842         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2843         exit(1);
2844     }
2845     return 0;
2846 }
2847
2848 static int opt_seek(const char *opt, const char *arg)
2849 {
2850     start_time = parse_time_or_die(opt, arg, 1);
2851     return 0;
2852 }
2853
2854 static int opt_duration(const char *opt, const char *arg)
2855 {
2856     duration = parse_time_or_die(opt, arg, 1);
2857     return 0;
2858 }
2859
2860 static int opt_debug(const char *opt, const char *arg)
2861 {
2862     av_log_set_level(99);
2863     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2864     return 0;
2865 }
2866
2867 static int opt_vismv(const char *opt, const char *arg)
2868 {
2869     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2870     return 0;
2871 }
2872
2873 static const OptionDef options[] = {
2874 #include "cmdutils_common_opts.h"
2875     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2876     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2877     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2878     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2879     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2880     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2881     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2882     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2883     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2884     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2885     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2886     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2887     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2888     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2889     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2890     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2891     { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
2892     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2893     { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
2894     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2895     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2896     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2897     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
2898     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
2899     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
2900     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
2901     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
2902     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2903     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
2904     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
2905     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
2906     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
2907     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
2908     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
2909 #if CONFIG_AVFILTER
2910     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
2911 #endif
2912     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
2913     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
2914     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2915     { NULL, },
2916 };
2917
2918 static void show_usage(void)
2919 {
2920     printf("Simple media player\n");
2921     printf("usage: %s [options] input_file\n", program_name);
2922     printf("\n");
2923 }
2924
2925 static void show_help(void)
2926 {
2927     av_log_set_callback(log_callback_help);
2928     show_usage();
2929     show_help_options(options, "Main options:\n",
2930                       OPT_EXPERT, 0);
2931     show_help_options(options, "\nAdvanced options:\n",
2932                       OPT_EXPERT, OPT_EXPERT);
2933     printf("\n");
2934     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2935     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2936 #if !CONFIG_AVFILTER
2937     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2938 #endif
2939     printf("\nWhile playing:\n"
2940            "q, ESC              quit\n"
2941            "f                   toggle full screen\n"
2942            "p, SPC              pause\n"
2943            "a                   cycle audio channel\n"
2944            "v                   cycle video channel\n"
2945            "t                   cycle subtitle channel\n"
2946            "w                   show audio waves\n"
2947            "s                   activate frame-step mode\n"
2948            "left/right          seek backward/forward 10 seconds\n"
2949            "down/up             seek backward/forward 1 minute\n"
2950            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2951            );
2952 }
2953
2954 static void opt_input_file(void *optctx, const char *filename)
2955 {
2956     if (input_filename) {
2957         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2958                 filename, input_filename);
2959         exit(1);
2960     }
2961     if (!strcmp(filename, "-"))
2962         filename = "pipe:";
2963     input_filename = filename;
2964 }
2965
2966 /* Called from the main */
2967 int main(int argc, char **argv)
2968 {
2969     int flags;
2970
2971     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2972     parse_loglevel(argc, argv, options);
2973
2974     /* register all codecs, demux and protocols */
2975     avcodec_register_all();
2976 #if CONFIG_AVDEVICE
2977     avdevice_register_all();
2978 #endif
2979 #if CONFIG_AVFILTER
2980     avfilter_register_all();
2981 #endif
2982     av_register_all();
2983     avformat_network_init();
2984
2985     init_opts();
2986
2987     show_banner();
2988
2989     parse_options(NULL, argc, argv, options, opt_input_file);
2990
2991     if (!input_filename) {
2992         show_usage();
2993         fprintf(stderr, "An input file must be specified\n");
2994         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2995         exit(1);
2996     }
2997
2998     if (display_disable) {
2999         video_disable = 1;
3000     }
3001     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3002 #if !defined(__MINGW32__) && !defined(__APPLE__)
3003     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3004 #endif
3005     if (SDL_Init (flags)) {
3006         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3007         exit(1);
3008     }
3009
3010     if (!display_disable) {
3011 #if HAVE_SDL_VIDEO_SIZE
3012         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3013         fs_screen_width = vi->current_w;
3014         fs_screen_height = vi->current_h;
3015 #endif
3016     }
3017
3018     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3019     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3020     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3021
3022     av_init_packet(&flush_pkt);
3023     flush_pkt.data = "FLUSH";
3024
3025     cur_stream = stream_open(input_filename, file_iformat);
3026
3027     event_loop();
3028
3029     /* never returns */
3030
3031     return 0;
3032 }