]> git.sesse.net Git - ffmpeg/blob - avplay.c
sgi: decode images with 4 channels at 8 and 16 bits
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/mathematics.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/dict.h"
34 #include "libavutil/parseutils.h"
35 #include "libavutil/samplefmt.h"
36 #include "libavutil/time.h"
37 #include "libavformat/avformat.h"
38 #include "libavdevice/avdevice.h"
39 #include "libswscale/swscale.h"
40 #include "libavresample/avresample.h"
41 #include "libavutil/opt.h"
42 #include "libavcodec/avfft.h"
43
44 #if CONFIG_AVFILTER
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/buffersink.h"
47 # include "libavfilter/buffersrc.h"
48 #endif
49
50 #include "cmdutils.h"
51
52 #include <SDL.h>
53 #include <SDL_thread.h>
54
55 #ifdef __MINGW32__
56 #undef main /* We don't want SDL to override our main() */
57 #endif
58
59 #include <assert.h>
60
61 const char program_name[] = "avplay";
62 const int program_birth_year = 2003;
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2 * 65536)
87
88 static int64_t sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;             // presentation timestamp for this picture
104     double target_clock;    // av_gettime() time at which this should be displayed ideally
105     int64_t pos;            // byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     int reallocate;
110     enum AVPixelFormat pix_fmt;
111
112     AVRational sar;
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 enum {
121     AV_SYNC_AUDIO_MASTER, /* default choice */
122     AV_SYNC_VIDEO_MASTER,
123     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125
126 typedef struct VideoState {
127     SDL_Thread *parse_tid;
128     SDL_Thread *video_tid;
129     SDL_Thread *refresh_tid;
130     AVInputFormat *iformat;
131     int no_background;
132     int abort_request;
133     int paused;
134     int last_paused;
135     int seek_req;
136     int seek_flags;
137     int64_t seek_pos;
138     int64_t seek_rel;
139     int read_pause_return;
140     AVFormatContext *ic;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
157     uint8_t *audio_buf;
158     uint8_t *audio_buf1;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat sdl_sample_fmt;
164     uint64_t sdl_channel_layout;
165     int sdl_channels;
166     int sdl_sample_rate;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     int resample_sample_rate;
170     AVAudioResampleContext *avr;
171     AVFrame *frame;
172
173     int show_audio; /* if true, display audio samples */
174     int16_t sample_array[SAMPLE_ARRAY_SIZE];
175     int sample_array_index;
176     int last_i_start;
177     RDFTContext *rdft;
178     int rdft_bits;
179     FFTSample *rdft_data;
180     int xpos;
181
182     SDL_Thread *subtitle_tid;
183     int subtitle_stream;
184     int subtitle_stream_changed;
185     AVStream *subtitle_st;
186     PacketQueue subtitleq;
187     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
188     int subpq_size, subpq_rindex, subpq_windex;
189     SDL_mutex *subpq_mutex;
190     SDL_cond *subpq_cond;
191
192     double frame_timer;
193     double frame_last_pts;
194     double frame_last_delay;
195     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
196     int video_stream;
197     AVStream *video_st;
198     PacketQueue videoq;
199     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
200     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
201     int64_t video_current_pos;      // current displayed file pos
202     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
203     int pictq_size, pictq_rindex, pictq_windex;
204     SDL_mutex *pictq_mutex;
205     SDL_cond *pictq_cond;
206 #if !CONFIG_AVFILTER
207     struct SwsContext *img_convert_ctx;
208 #endif
209
210     //    QETimer *video_timer;
211     char filename[1024];
212     int width, height, xleft, ytop;
213
214     PtsCorrectionContext pts_ctx;
215
216 #if CONFIG_AVFILTER
217     AVFilterContext *in_video_filter;   // the first filter in the video chain
218     AVFilterContext *out_video_filter;  // the last filter in the video chain
219 #endif
220
221     float skip_frames;
222     float skip_frames_index;
223     int refresh;
224 } VideoState;
225
226 /* options specified by the user */
227 static AVInputFormat *file_iformat;
228 static const char *input_filename;
229 static const char *window_title;
230 static int fs_screen_width;
231 static int fs_screen_height;
232 static int screen_width  = 0;
233 static int screen_height = 0;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB] = {
237     [AVMEDIA_TYPE_AUDIO]    = -1,
238     [AVMEDIA_TYPE_VIDEO]    = -1,
239     [AVMEDIA_TYPE_SUBTITLE] = -1,
240 };
241 static int seek_by_bytes = -1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int step = 0;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts = -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop = 1;
261 static int framedrop = 1;
262 static int infinite_buffer = 0;
263
264 static int rdftspeed = 20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for (;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (!vp->sar.num)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->sar);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st)
673         {
674             if (is->subpq_size > 0)
675             {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679                 {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = width;
713         rect.h = height;
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 /* get the current audio output buffer size, in samples. With SDL, we
719    cannot have a precise information */
720 static int audio_write_get_buf_size(VideoState *is)
721 {
722     return is->audio_buf_size - is->audio_buf_index;
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     a = a % b;
728     if (a >= 0)
729         return a;
730     else
731         return a + b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
742         ;
743     nb_freq = 1 << (rdft_bits - 1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->sdl_channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
750         n = 2 * channels;
751         delay = audio_write_get_buf_size(s);
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
759         }
760
761         delay += 2 * data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if (s->show_audio == 1) {
767             h = INT_MIN;
768             for (i = 0; i < 1000; i += channels) {
769                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a = s->sample_array[idx];
771                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
772                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
773                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
774                 int score = a - d;
775                 if (h < score && (b ^ c) < 0) {
776                     h = score;
777                     i_start = idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if (s->show_audio == 1) {
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for (ch = 0; ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for (x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for (ch = 1; ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     } else {
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if (rdft_bits != s->rdft_bits) {
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits = rdft_bits;
835             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for (ch = 0; ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
841                 i = i_start + ch;
842                 for (x = 0; x < 2 * nb_freq; x++) {
843                     double w = (x-nb_freq) * (1.0 / nb_freq);
844                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             /* Least efficient way to do this, we should of course
852              * directly access it but it is more than fast enough. */
853             for (y = 0; y < s->height; y++) {
854                 double w = 1 / sqrt(nb_freq);
855                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
856                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
857                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
858                 a = FFMIN(a, 255);
859                 b = FFMIN(b, 255);
860                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
861
862                 fill_rectangle(screen,
863                             s->xpos, s->height-y, 1, 1,
864                             fgcolor);
865             }
866         }
867         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
868         s->xpos++;
869         if (s->xpos >= s->width)
870             s->xpos= s->xleft;
871     }
872 }
873
874 static int video_open(VideoState *is)
875 {
876     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
877     int w,h;
878
879     if (is_full_screen) flags |= SDL_FULLSCREEN;
880     else                flags |= SDL_RESIZABLE;
881
882     if (is_full_screen && fs_screen_width) {
883         w = fs_screen_width;
884         h = fs_screen_height;
885     } else if (!is_full_screen && screen_width) {
886         w = screen_width;
887         h = screen_height;
888 #if CONFIG_AVFILTER
889     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
890         w = is->out_video_filter->inputs[0]->w;
891         h = is->out_video_filter->inputs[0]->h;
892 #else
893     } else if (is->video_st && is->video_st->codec->width) {
894         w = is->video_st->codec->width;
895         h = is->video_st->codec->height;
896 #endif
897     } else {
898         w = 640;
899         h = 480;
900     }
901     if (screen && is->width == screen->w && screen->w == w
902        && is->height== screen->h && screen->h == h)
903         return 0;
904
905 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
906     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
907     screen = SDL_SetVideoMode(w, h, 24, flags);
908 #else
909     screen = SDL_SetVideoMode(w, h, 0, flags);
910 #endif
911     if (!screen) {
912         fprintf(stderr, "SDL: could not set video mode - exiting\n");
913         return -1;
914     }
915     if (!window_title)
916         window_title = input_filename;
917     SDL_WM_SetCaption(window_title, window_title);
918
919     is->width  = screen->w;
920     is->height = screen->h;
921
922     return 0;
923 }
924
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928     if (!screen)
929         video_open(cur_stream);
930     if (is->audio_st && is->show_audio)
931         video_audio_display(is);
932     else if (is->video_st)
933         video_image_display(is);
934 }
935
936 static int refresh_thread(void *opaque)
937 {
938     VideoState *is= opaque;
939     while (!is->abort_request) {
940         SDL_Event event;
941         event.type = FF_REFRESH_EVENT;
942         event.user.data1 = opaque;
943         if (!is->refresh) {
944             is->refresh = 1;
945             SDL_PushEvent(&event);
946         }
947         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948     }
949     return 0;
950 }
951
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955     double pts;
956     int hw_buf_size, bytes_per_sec;
957     pts = is->audio_clock;
958     hw_buf_size = audio_write_get_buf_size(is);
959     bytes_per_sec = 0;
960     if (is->audio_st) {
961         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
962                         av_get_bytes_per_sample(is->sdl_sample_fmt);
963     }
964     if (bytes_per_sec)
965         pts -= (double)hw_buf_size / bytes_per_sec;
966     return pts;
967 }
968
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972     if (is->paused) {
973         return is->video_current_pts;
974     } else {
975         return is->video_current_pts_drift + av_gettime() / 1000000.0;
976     }
977 }
978
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982     int64_t ti;
983     ti = av_gettime();
984     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990     double val;
991
992     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993         if (is->video_st)
994             val = get_video_clock(is);
995         else
996             val = get_audio_clock(is);
997     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998         if (is->audio_st)
999             val = get_audio_clock(is);
1000         else
1001             val = get_video_clock(is);
1002     } else {
1003         val = get_external_clock(is);
1004     }
1005     return val;
1006 }
1007
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011     if (!is->seek_req) {
1012         is->seek_pos = pos;
1013         is->seek_rel = rel;
1014         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015         if (seek_by_bytes)
1016             is->seek_flags |= AVSEEK_FLAG_BYTE;
1017         is->seek_req = 1;
1018     }
1019 }
1020
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024     if (is->paused) {
1025         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026         if (is->read_pause_return != AVERROR(ENOSYS)) {
1027             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1028         }
1029         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1030     }
1031     is->paused = !is->paused;
1032 }
1033
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036     double delay, sync_threshold, diff;
1037
1038     /* compute nominal delay */
1039     delay = frame_current_pts - is->frame_last_pts;
1040     if (delay <= 0 || delay >= 10.0) {
1041         /* if incorrect delay, use previous one */
1042         delay = is->frame_last_delay;
1043     } else {
1044         is->frame_last_delay = delay;
1045     }
1046     is->frame_last_pts = frame_current_pts;
1047
1048     /* update delay to follow master synchronisation source */
1049     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1050          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1051         /* if video is slave, we try to correct big delays by
1052            duplicating or deleting a frame */
1053         diff = get_video_clock(is) - get_master_clock(is);
1054
1055         /* skip or repeat frame. We take into account the
1056            delay to compute the threshold. I still don't know
1057            if it is the best guess */
1058         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060             if (diff <= -sync_threshold)
1061                 delay = 0;
1062             else if (diff >= sync_threshold)
1063                 delay = 2 * delay;
1064         }
1065     }
1066     is->frame_timer += delay;
1067
1068     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069             delay, frame_current_pts, -diff);
1070
1071     return is->frame_timer;
1072 }
1073
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077     VideoState *is = opaque;
1078     VideoPicture *vp;
1079
1080     SubPicture *sp, *sp2;
1081
1082     if (is->video_st) {
1083 retry:
1084         if (is->pictq_size == 0) {
1085             // nothing to do, no picture to display in the que
1086         } else {
1087             double time = av_gettime() / 1000000.0;
1088             double next_target;
1089             /* dequeue the picture */
1090             vp = &is->pictq[is->pictq_rindex];
1091
1092             if (time < vp->target_clock)
1093                 return;
1094             /* update current video pts */
1095             is->video_current_pts = vp->pts;
1096             is->video_current_pts_drift = is->video_current_pts - time;
1097             is->video_current_pos = vp->pos;
1098             if (is->pictq_size > 1) {
1099                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1100                 assert(nextvp->target_clock >= vp->target_clock);
1101                 next_target= nextvp->target_clock;
1102             } else {
1103                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1104             }
1105             if (framedrop && time > next_target) {
1106                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1108                     /* update queue size and signal for next picture */
1109                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110                         is->pictq_rindex = 0;
1111
1112                     SDL_LockMutex(is->pictq_mutex);
1113                     is->pictq_size--;
1114                     SDL_CondSignal(is->pictq_cond);
1115                     SDL_UnlockMutex(is->pictq_mutex);
1116                     goto retry;
1117                 }
1118             }
1119
1120             if (is->subtitle_st) {
1121                 if (is->subtitle_stream_changed) {
1122                     SDL_LockMutex(is->subpq_mutex);
1123
1124                     while (is->subpq_size) {
1125                         free_subpicture(&is->subpq[is->subpq_rindex]);
1126
1127                         /* update queue size and signal for next picture */
1128                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129                             is->subpq_rindex = 0;
1130
1131                         is->subpq_size--;
1132                     }
1133                     is->subtitle_stream_changed = 0;
1134
1135                     SDL_CondSignal(is->subpq_cond);
1136                     SDL_UnlockMutex(is->subpq_mutex);
1137                 } else {
1138                     if (is->subpq_size > 0) {
1139                         sp = &is->subpq[is->subpq_rindex];
1140
1141                         if (is->subpq_size > 1)
1142                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143                         else
1144                             sp2 = NULL;
1145
1146                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148                         {
1149                             free_subpicture(sp);
1150
1151                             /* update queue size and signal for next picture */
1152                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153                                 is->subpq_rindex = 0;
1154
1155                             SDL_LockMutex(is->subpq_mutex);
1156                             is->subpq_size--;
1157                             SDL_CondSignal(is->subpq_cond);
1158                             SDL_UnlockMutex(is->subpq_mutex);
1159                         }
1160                     }
1161                 }
1162             }
1163
1164             /* display picture */
1165             if (!display_disable)
1166                 video_display(is);
1167
1168             /* update queue size and signal for next picture */
1169             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1170                 is->pictq_rindex = 0;
1171
1172             SDL_LockMutex(is->pictq_mutex);
1173             is->pictq_size--;
1174             SDL_CondSignal(is->pictq_cond);
1175             SDL_UnlockMutex(is->pictq_mutex);
1176         }
1177     } else if (is->audio_st) {
1178         /* draw the next audio frame */
1179
1180         /* if only audio stream, then display the audio bars (better
1181            than nothing, just to test the implementation */
1182
1183         /* display picture */
1184         if (!display_disable)
1185             video_display(is);
1186     }
1187     if (show_status) {
1188         static int64_t last_time;
1189         int64_t cur_time;
1190         int aqsize, vqsize, sqsize;
1191         double av_diff;
1192
1193         cur_time = av_gettime();
1194         if (!last_time || (cur_time - last_time) >= 30000) {
1195             aqsize = 0;
1196             vqsize = 0;
1197             sqsize = 0;
1198             if (is->audio_st)
1199                 aqsize = is->audioq.size;
1200             if (is->video_st)
1201                 vqsize = is->videoq.size;
1202             if (is->subtitle_st)
1203                 sqsize = is->subtitleq.size;
1204             av_diff = 0;
1205             if (is->audio_st && is->video_st)
1206                 av_diff = get_audio_clock(is) - get_video_clock(is);
1207             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1208                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1209                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1210             fflush(stdout);
1211             last_time = cur_time;
1212         }
1213     }
1214 }
1215
1216 static void stream_close(VideoState *is)
1217 {
1218     VideoPicture *vp;
1219     int i;
1220     /* XXX: use a special url_shutdown call to abort parse cleanly */
1221     is->abort_request = 1;
1222     SDL_WaitThread(is->parse_tid, NULL);
1223     SDL_WaitThread(is->refresh_tid, NULL);
1224
1225     /* free all pictures */
1226     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1227         vp = &is->pictq[i];
1228         if (vp->bmp) {
1229             SDL_FreeYUVOverlay(vp->bmp);
1230             vp->bmp = NULL;
1231         }
1232     }
1233     SDL_DestroyMutex(is->pictq_mutex);
1234     SDL_DestroyCond(is->pictq_cond);
1235     SDL_DestroyMutex(is->subpq_mutex);
1236     SDL_DestroyCond(is->subpq_cond);
1237 #if !CONFIG_AVFILTER
1238     if (is->img_convert_ctx)
1239         sws_freeContext(is->img_convert_ctx);
1240 #endif
1241     av_free(is);
1242 }
1243
1244 static void do_exit(void)
1245 {
1246     if (cur_stream) {
1247         stream_close(cur_stream);
1248         cur_stream = NULL;
1249     }
1250     uninit_opts();
1251     avformat_network_deinit();
1252     if (show_status)
1253         printf("\n");
1254     SDL_Quit();
1255     av_log(NULL, AV_LOG_QUIET, "");
1256     exit(0);
1257 }
1258
1259 /* allocate a picture (needs to do that in main thread to avoid
1260    potential locking problems */
1261 static void alloc_picture(void *opaque)
1262 {
1263     VideoState *is = opaque;
1264     VideoPicture *vp;
1265
1266     vp = &is->pictq[is->pictq_windex];
1267
1268     if (vp->bmp)
1269         SDL_FreeYUVOverlay(vp->bmp);
1270
1271 #if CONFIG_AVFILTER
1272     vp->width   = is->out_video_filter->inputs[0]->w;
1273     vp->height  = is->out_video_filter->inputs[0]->h;
1274     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1275 #else
1276     vp->width   = is->video_st->codec->width;
1277     vp->height  = is->video_st->codec->height;
1278     vp->pix_fmt = is->video_st->codec->pix_fmt;
1279 #endif
1280
1281     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1282                                    SDL_YV12_OVERLAY,
1283                                    screen);
1284     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1285         /* SDL allocates a buffer smaller than requested if the video
1286          * overlay hardware is unable to support the requested size. */
1287         fprintf(stderr, "Error: the video system does not support an image\n"
1288                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1289                         "to reduce the image size.\n", vp->width, vp->height );
1290         do_exit();
1291     }
1292
1293     SDL_LockMutex(is->pictq_mutex);
1294     vp->allocated = 1;
1295     SDL_CondSignal(is->pictq_cond);
1296     SDL_UnlockMutex(is->pictq_mutex);
1297 }
1298
1299 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1300  * guessed if not known. */
1301 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1302 {
1303     VideoPicture *vp;
1304 #if CONFIG_AVFILTER
1305     AVPicture pict_src;
1306 #else
1307     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1308 #endif
1309     /* wait until we have space to put a new picture */
1310     SDL_LockMutex(is->pictq_mutex);
1311
1312     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1313         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1314
1315     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1316            !is->videoq.abort_request) {
1317         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1318     }
1319     SDL_UnlockMutex(is->pictq_mutex);
1320
1321     if (is->videoq.abort_request)
1322         return -1;
1323
1324     vp = &is->pictq[is->pictq_windex];
1325
1326     /* alloc or resize hardware picture buffer */
1327     if (!vp->bmp || vp->reallocate ||
1328 #if CONFIG_AVFILTER
1329         vp->width  != is->out_video_filter->inputs[0]->w ||
1330         vp->height != is->out_video_filter->inputs[0]->h) {
1331 #else
1332         vp->width != is->video_st->codec->width ||
1333         vp->height != is->video_st->codec->height) {
1334 #endif
1335         SDL_Event event;
1336
1337         vp->allocated  = 0;
1338         vp->reallocate = 0;
1339
1340         /* the allocation must be done in the main thread to avoid
1341            locking problems */
1342         event.type = FF_ALLOC_EVENT;
1343         event.user.data1 = is;
1344         SDL_PushEvent(&event);
1345
1346         /* wait until the picture is allocated */
1347         SDL_LockMutex(is->pictq_mutex);
1348         while (!vp->allocated && !is->videoq.abort_request) {
1349             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1350         }
1351         SDL_UnlockMutex(is->pictq_mutex);
1352
1353         if (is->videoq.abort_request)
1354             return -1;
1355     }
1356
1357     /* if the frame is not skipped, then display it */
1358     if (vp->bmp) {
1359         AVPicture pict = { { 0 } };
1360
1361         /* get a pointer on the bitmap */
1362         SDL_LockYUVOverlay (vp->bmp);
1363
1364         pict.data[0] = vp->bmp->pixels[0];
1365         pict.data[1] = vp->bmp->pixels[2];
1366         pict.data[2] = vp->bmp->pixels[1];
1367
1368         pict.linesize[0] = vp->bmp->pitches[0];
1369         pict.linesize[1] = vp->bmp->pitches[2];
1370         pict.linesize[2] = vp->bmp->pitches[1];
1371
1372 #if CONFIG_AVFILTER
1373         pict_src.data[0] = src_frame->data[0];
1374         pict_src.data[1] = src_frame->data[1];
1375         pict_src.data[2] = src_frame->data[2];
1376
1377         pict_src.linesize[0] = src_frame->linesize[0];
1378         pict_src.linesize[1] = src_frame->linesize[1];
1379         pict_src.linesize[2] = src_frame->linesize[2];
1380
1381         // FIXME use direct rendering
1382         av_picture_copy(&pict, &pict_src,
1383                         vp->pix_fmt, vp->width, vp->height);
1384 #else
1385         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1386         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1387             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1388             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1389         if (is->img_convert_ctx == NULL) {
1390             fprintf(stderr, "Cannot initialize the conversion context\n");
1391             exit(1);
1392         }
1393         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1394                   0, vp->height, pict.data, pict.linesize);
1395 #endif
1396         /* update the bitmap content */
1397         SDL_UnlockYUVOverlay(vp->bmp);
1398
1399         vp->pts = pts;
1400         vp->pos = pos;
1401
1402         /* now we can update the picture count */
1403         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1404             is->pictq_windex = 0;
1405         SDL_LockMutex(is->pictq_mutex);
1406         vp->target_clock = compute_target_time(vp->pts, is);
1407
1408         is->pictq_size++;
1409         SDL_UnlockMutex(is->pictq_mutex);
1410     }
1411     return 0;
1412 }
1413
1414 /* Compute the exact PTS for the picture if it is omitted in the stream.
1415  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1416 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1417 {
1418     double frame_delay, pts;
1419     int ret;
1420
1421     pts = pts1;
1422
1423     if (pts != 0) {
1424         /* update video clock with pts, if present */
1425         is->video_clock = pts;
1426     } else {
1427         pts = is->video_clock;
1428     }
1429     /* update video clock for next frame */
1430     frame_delay = av_q2d(is->video_st->codec->time_base);
1431     /* for MPEG2, the frame can be repeated, so we update the
1432        clock accordingly */
1433     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1434     is->video_clock += frame_delay;
1435
1436     ret = queue_picture(is, src_frame, pts, pos);
1437     av_frame_unref(src_frame);
1438     return ret;
1439 }
1440
1441 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1442 {
1443     int got_picture, i;
1444
1445     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1446         return -1;
1447
1448     if (pkt->data == flush_pkt.data) {
1449         avcodec_flush_buffers(is->video_st->codec);
1450
1451         SDL_LockMutex(is->pictq_mutex);
1452         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1453         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1454             is->pictq[i].target_clock= 0;
1455         }
1456         while (is->pictq_size && !is->videoq.abort_request) {
1457             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1458         }
1459         is->video_current_pos = -1;
1460         SDL_UnlockMutex(is->pictq_mutex);
1461
1462         init_pts_correction(&is->pts_ctx);
1463         is->frame_last_pts = AV_NOPTS_VALUE;
1464         is->frame_last_delay = 0;
1465         is->frame_timer = (double)av_gettime() / 1000000.0;
1466         is->skip_frames = 1;
1467         is->skip_frames_index = 0;
1468         return 0;
1469     }
1470
1471     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1472
1473     if (got_picture) {
1474         if (decoder_reorder_pts == -1) {
1475             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1476         } else if (decoder_reorder_pts) {
1477             *pts = frame->pkt_pts;
1478         } else {
1479             *pts = frame->pkt_dts;
1480         }
1481
1482         if (*pts == AV_NOPTS_VALUE) {
1483             *pts = 0;
1484         }
1485         if (is->video_st->sample_aspect_ratio.num) {
1486             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1487         }
1488
1489         is->skip_frames_index += 1;
1490         if (is->skip_frames_index >= is->skip_frames) {
1491             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1492             return 1;
1493         }
1494         av_frame_unref(frame);
1495     }
1496     return 0;
1497 }
1498
1499 #if CONFIG_AVFILTER
1500 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1501 {
1502     char sws_flags_str[128];
1503     char buffersrc_args[256];
1504     int ret;
1505     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1506     AVCodecContext *codec = is->video_st->codec;
1507
1508     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1509     graph->scale_sws_opts = av_strdup(sws_flags_str);
1510
1511     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1512              codec->width, codec->height, codec->pix_fmt,
1513              is->video_st->time_base.num, is->video_st->time_base.den,
1514              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1515
1516
1517     if ((ret = avfilter_graph_create_filter(&filt_src,
1518                                             avfilter_get_by_name("buffer"),
1519                                             "src", buffersrc_args, NULL,
1520                                             graph)) < 0)
1521         return ret;
1522     if ((ret = avfilter_graph_create_filter(&filt_out,
1523                                             avfilter_get_by_name("buffersink"),
1524                                             "out", NULL, NULL, graph)) < 0)
1525         return ret;
1526
1527     if ((ret = avfilter_graph_create_filter(&filt_format,
1528                                             avfilter_get_by_name("format"),
1529                                             "format", "yuv420p", NULL, graph)) < 0)
1530         return ret;
1531     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1532         return ret;
1533
1534
1535     if (vfilters) {
1536         AVFilterInOut *outputs = avfilter_inout_alloc();
1537         AVFilterInOut *inputs  = avfilter_inout_alloc();
1538
1539         outputs->name    = av_strdup("in");
1540         outputs->filter_ctx = filt_src;
1541         outputs->pad_idx = 0;
1542         outputs->next    = NULL;
1543
1544         inputs->name    = av_strdup("out");
1545         inputs->filter_ctx = filt_format;
1546         inputs->pad_idx = 0;
1547         inputs->next    = NULL;
1548
1549         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1550             return ret;
1551     } else {
1552         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1553             return ret;
1554     }
1555
1556     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1557         return ret;
1558
1559     is->in_video_filter  = filt_src;
1560     is->out_video_filter = filt_out;
1561
1562     return ret;
1563 }
1564
1565 #endif  /* CONFIG_AVFILTER */
1566
1567 static int video_thread(void *arg)
1568 {
1569     AVPacket pkt = { 0 };
1570     VideoState *is = arg;
1571     AVFrame *frame = av_frame_alloc();
1572     int64_t pts_int;
1573     double pts;
1574     int ret;
1575
1576 #if CONFIG_AVFILTER
1577     AVFilterGraph *graph = avfilter_graph_alloc();
1578     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1579     int last_w = is->video_st->codec->width;
1580     int last_h = is->video_st->codec->height;
1581
1582     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1583         goto the_end;
1584     filt_in  = is->in_video_filter;
1585     filt_out = is->out_video_filter;
1586 #endif
1587
1588     for (;;) {
1589 #if CONFIG_AVFILTER
1590         AVRational tb;
1591 #endif
1592         while (is->paused && !is->videoq.abort_request)
1593             SDL_Delay(10);
1594
1595         av_free_packet(&pkt);
1596
1597         ret = get_video_frame(is, frame, &pts_int, &pkt);
1598         if (ret < 0)
1599             goto the_end;
1600
1601         if (!ret)
1602             continue;
1603
1604 #if CONFIG_AVFILTER
1605         if (   last_w != is->video_st->codec->width
1606             || last_h != is->video_st->codec->height) {
1607             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1608                     is->video_st->codec->width, is->video_st->codec->height);
1609             avfilter_graph_free(&graph);
1610             graph = avfilter_graph_alloc();
1611             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1612                 goto the_end;
1613             filt_in  = is->in_video_filter;
1614             filt_out = is->out_video_filter;
1615             last_w = is->video_st->codec->width;
1616             last_h = is->video_st->codec->height;
1617         }
1618
1619         frame->pts = pts_int;
1620         ret = av_buffersrc_add_frame(filt_in, frame);
1621         if (ret < 0)
1622             goto the_end;
1623
1624         while (ret >= 0) {
1625             ret = av_buffersink_get_frame(filt_out, frame);
1626             if (ret < 0) {
1627                 ret = 0;
1628                 break;
1629             }
1630
1631             pts_int = frame->pts;
1632             tb      = filt_out->inputs[0]->time_base;
1633             if (av_cmp_q(tb, is->video_st->time_base)) {
1634                 av_unused int64_t pts1 = pts_int;
1635                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1636                 av_dlog(NULL, "video_thread(): "
1637                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1638                         tb.num, tb.den, pts1,
1639                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1640             }
1641             pts = pts_int * av_q2d(is->video_st->time_base);
1642             ret = output_picture2(is, frame, pts, 0);
1643         }
1644 #else
1645         pts = pts_int * av_q2d(is->video_st->time_base);
1646         ret = output_picture2(is, frame, pts,  pkt.pos);
1647 #endif
1648
1649         if (ret < 0)
1650             goto the_end;
1651
1652
1653         if (step)
1654             if (cur_stream)
1655                 stream_pause(cur_stream);
1656     }
1657  the_end:
1658 #if CONFIG_AVFILTER
1659     av_freep(&vfilters);
1660     avfilter_graph_free(&graph);
1661 #endif
1662     av_free_packet(&pkt);
1663     av_frame_free(&frame);
1664     return 0;
1665 }
1666
1667 static int subtitle_thread(void *arg)
1668 {
1669     VideoState *is = arg;
1670     SubPicture *sp;
1671     AVPacket pkt1, *pkt = &pkt1;
1672     int got_subtitle;
1673     double pts;
1674     int i, j;
1675     int r, g, b, y, u, v, a;
1676
1677     for (;;) {
1678         while (is->paused && !is->subtitleq.abort_request) {
1679             SDL_Delay(10);
1680         }
1681         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1682             break;
1683
1684         if (pkt->data == flush_pkt.data) {
1685             avcodec_flush_buffers(is->subtitle_st->codec);
1686             continue;
1687         }
1688         SDL_LockMutex(is->subpq_mutex);
1689         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1690                !is->subtitleq.abort_request) {
1691             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1692         }
1693         SDL_UnlockMutex(is->subpq_mutex);
1694
1695         if (is->subtitleq.abort_request)
1696             return 0;
1697
1698         sp = &is->subpq[is->subpq_windex];
1699
1700        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1701            this packet, if any */
1702         pts = 0;
1703         if (pkt->pts != AV_NOPTS_VALUE)
1704             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1705
1706         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1707                                  &got_subtitle, pkt);
1708
1709         if (got_subtitle && sp->sub.format == 0) {
1710             sp->pts = pts;
1711
1712             for (i = 0; i < sp->sub.num_rects; i++)
1713             {
1714                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1715                 {
1716                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1717                     y = RGB_TO_Y_CCIR(r, g, b);
1718                     u = RGB_TO_U_CCIR(r, g, b, 0);
1719                     v = RGB_TO_V_CCIR(r, g, b, 0);
1720                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1721                 }
1722             }
1723
1724             /* now we can update the picture count */
1725             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1726                 is->subpq_windex = 0;
1727             SDL_LockMutex(is->subpq_mutex);
1728             is->subpq_size++;
1729             SDL_UnlockMutex(is->subpq_mutex);
1730         }
1731         av_free_packet(pkt);
1732     }
1733     return 0;
1734 }
1735
1736 /* copy samples for viewing in editor window */
1737 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1738 {
1739     int size, len;
1740
1741     size = samples_size / sizeof(short);
1742     while (size > 0) {
1743         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1744         if (len > size)
1745             len = size;
1746         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1747         samples += len;
1748         is->sample_array_index += len;
1749         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1750             is->sample_array_index = 0;
1751         size -= len;
1752     }
1753 }
1754
1755 /* return the new audio buffer size (samples can be added or deleted
1756    to get better sync if video or external master clock) */
1757 static int synchronize_audio(VideoState *is, short *samples,
1758                              int samples_size1, double pts)
1759 {
1760     int n, samples_size;
1761     double ref_clock;
1762
1763     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1764     samples_size = samples_size1;
1765
1766     /* if not master, then we try to remove or add samples to correct the clock */
1767     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1768          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1769         double diff, avg_diff;
1770         int wanted_size, min_size, max_size, nb_samples;
1771
1772         ref_clock = get_master_clock(is);
1773         diff = get_audio_clock(is) - ref_clock;
1774
1775         if (diff < AV_NOSYNC_THRESHOLD) {
1776             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1777             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1778                 /* not enough measures to have a correct estimate */
1779                 is->audio_diff_avg_count++;
1780             } else {
1781                 /* estimate the A-V difference */
1782                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1783
1784                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1785                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1786                     nb_samples = samples_size / n;
1787
1788                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1789                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1790                     if (wanted_size < min_size)
1791                         wanted_size = min_size;
1792                     else if (wanted_size > max_size)
1793                         wanted_size = max_size;
1794
1795                     /* add or remove samples to correction the synchro */
1796                     if (wanted_size < samples_size) {
1797                         /* remove samples */
1798                         samples_size = wanted_size;
1799                     } else if (wanted_size > samples_size) {
1800                         uint8_t *samples_end, *q;
1801                         int nb;
1802
1803                         /* add samples */
1804                         nb = (samples_size - wanted_size);
1805                         samples_end = (uint8_t *)samples + samples_size - n;
1806                         q = samples_end + n;
1807                         while (nb > 0) {
1808                             memcpy(q, samples_end, n);
1809                             q += n;
1810                             nb -= n;
1811                         }
1812                         samples_size = wanted_size;
1813                     }
1814                 }
1815                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1816                         diff, avg_diff, samples_size - samples_size1,
1817                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1818             }
1819         } else {
1820             /* too big difference : may be initial PTS errors, so
1821                reset A-V filter */
1822             is->audio_diff_avg_count = 0;
1823             is->audio_diff_cum       = 0;
1824         }
1825     }
1826
1827     return samples_size;
1828 }
1829
1830 /* decode one audio frame and returns its uncompressed size */
1831 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1832 {
1833     AVPacket *pkt_temp = &is->audio_pkt_temp;
1834     AVPacket *pkt = &is->audio_pkt;
1835     AVCodecContext *dec = is->audio_st->codec;
1836     int n, len1, data_size, got_frame;
1837     double pts;
1838     int new_packet = 0;
1839     int flush_complete = 0;
1840
1841     for (;;) {
1842         /* NOTE: the audio packet can contain several frames */
1843         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1844             int resample_changed, audio_resample;
1845
1846             if (!is->frame) {
1847                 if (!(is->frame = av_frame_alloc()))
1848                     return AVERROR(ENOMEM);
1849             }
1850
1851             if (flush_complete)
1852                 break;
1853             new_packet = 0;
1854             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1855             if (len1 < 0) {
1856                 /* if error, we skip the frame */
1857                 pkt_temp->size = 0;
1858                 break;
1859             }
1860
1861             pkt_temp->data += len1;
1862             pkt_temp->size -= len1;
1863
1864             if (!got_frame) {
1865                 /* stop sending empty packets if the decoder is finished */
1866                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1867                     flush_complete = 1;
1868                 continue;
1869             }
1870             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1871                                                    is->frame->nb_samples,
1872                                                    is->frame->format, 1);
1873
1874             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1875                              is->frame->channel_layout != is->sdl_channel_layout ||
1876                              is->frame->sample_rate    != is->sdl_sample_rate;
1877
1878             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1879                                is->frame->channel_layout != is->resample_channel_layout ||
1880                                is->frame->sample_rate    != is->resample_sample_rate;
1881
1882             if ((!is->avr && audio_resample) || resample_changed) {
1883                 int ret;
1884                 if (is->avr)
1885                     avresample_close(is->avr);
1886                 else if (audio_resample) {
1887                     is->avr = avresample_alloc_context();
1888                     if (!is->avr) {
1889                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1890                         break;
1891                     }
1892                 }
1893                 if (audio_resample) {
1894                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1895                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1896                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1897                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1898                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1899                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1900
1901                     if ((ret = avresample_open(is->avr)) < 0) {
1902                         fprintf(stderr, "error initializing libavresample\n");
1903                         break;
1904                     }
1905                 }
1906                 is->resample_sample_fmt     = is->frame->format;
1907                 is->resample_channel_layout = is->frame->channel_layout;
1908                 is->resample_sample_rate    = is->frame->sample_rate;
1909             }
1910
1911             if (audio_resample) {
1912                 void *tmp_out;
1913                 int out_samples, out_size, out_linesize;
1914                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1915                 int nb_samples = is->frame->nb_samples;
1916
1917                 out_size = av_samples_get_buffer_size(&out_linesize,
1918                                                       is->sdl_channels,
1919                                                       nb_samples,
1920                                                       is->sdl_sample_fmt, 0);
1921                 tmp_out = av_realloc(is->audio_buf1, out_size);
1922                 if (!tmp_out)
1923                     return AVERROR(ENOMEM);
1924                 is->audio_buf1 = tmp_out;
1925
1926                 out_samples = avresample_convert(is->avr,
1927                                                  &is->audio_buf1,
1928                                                  out_linesize, nb_samples,
1929                                                  is->frame->data,
1930                                                  is->frame->linesize[0],
1931                                                  is->frame->nb_samples);
1932                 if (out_samples < 0) {
1933                     fprintf(stderr, "avresample_convert() failed\n");
1934                     break;
1935                 }
1936                 is->audio_buf = is->audio_buf1;
1937                 data_size = out_samples * osize * is->sdl_channels;
1938             } else {
1939                 is->audio_buf = is->frame->data[0];
1940             }
1941
1942             /* if no pts, then compute it */
1943             pts = is->audio_clock;
1944             *pts_ptr = pts;
1945             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1946             is->audio_clock += (double)data_size /
1947                 (double)(n * is->sdl_sample_rate);
1948 #ifdef DEBUG
1949             {
1950                 static double last_clock;
1951                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1952                        is->audio_clock - last_clock,
1953                        is->audio_clock, pts);
1954                 last_clock = is->audio_clock;
1955             }
1956 #endif
1957             return data_size;
1958         }
1959
1960         /* free the current packet */
1961         if (pkt->data)
1962             av_free_packet(pkt);
1963         memset(pkt_temp, 0, sizeof(*pkt_temp));
1964
1965         if (is->paused || is->audioq.abort_request) {
1966             return -1;
1967         }
1968
1969         /* read next packet */
1970         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1971             return -1;
1972
1973         if (pkt->data == flush_pkt.data) {
1974             avcodec_flush_buffers(dec);
1975             flush_complete = 0;
1976         }
1977
1978         *pkt_temp = *pkt;
1979
1980         /* if update the audio clock with the pts */
1981         if (pkt->pts != AV_NOPTS_VALUE) {
1982             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1983         }
1984     }
1985 }
1986
1987 /* prepare a new audio buffer */
1988 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1989 {
1990     VideoState *is = opaque;
1991     int audio_size, len1;
1992     double pts;
1993
1994     audio_callback_time = av_gettime();
1995
1996     while (len > 0) {
1997         if (is->audio_buf_index >= is->audio_buf_size) {
1998            audio_size = audio_decode_frame(is, &pts);
1999            if (audio_size < 0) {
2000                 /* if error, just output silence */
2001                is->audio_buf      = is->silence_buf;
2002                is->audio_buf_size = sizeof(is->silence_buf);
2003            } else {
2004                if (is->show_audio)
2005                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2006                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2007                                               pts);
2008                is->audio_buf_size = audio_size;
2009            }
2010            is->audio_buf_index = 0;
2011         }
2012         len1 = is->audio_buf_size - is->audio_buf_index;
2013         if (len1 > len)
2014             len1 = len;
2015         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2016         len -= len1;
2017         stream += len1;
2018         is->audio_buf_index += len1;
2019     }
2020 }
2021
2022 /* open a given stream. Return 0 if OK */
2023 static int stream_component_open(VideoState *is, int stream_index)
2024 {
2025     AVFormatContext *ic = is->ic;
2026     AVCodecContext *avctx;
2027     AVCodec *codec;
2028     SDL_AudioSpec wanted_spec, spec;
2029     AVDictionary *opts;
2030     AVDictionaryEntry *t = NULL;
2031
2032     if (stream_index < 0 || stream_index >= ic->nb_streams)
2033         return -1;
2034     avctx = ic->streams[stream_index]->codec;
2035
2036     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2037
2038     codec = avcodec_find_decoder(avctx->codec_id);
2039     avctx->workaround_bugs   = workaround_bugs;
2040     avctx->idct_algo         = idct;
2041     avctx->skip_frame        = skip_frame;
2042     avctx->skip_idct         = skip_idct;
2043     avctx->skip_loop_filter  = skip_loop_filter;
2044     avctx->error_concealment = error_concealment;
2045
2046     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2047
2048     if (!av_dict_get(opts, "threads", NULL, 0))
2049         av_dict_set(&opts, "threads", "auto", 0);
2050     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2051         av_dict_set(&opts, "refcounted_frames", "1", 0);
2052     if (!codec ||
2053         avcodec_open2(avctx, codec, &opts) < 0)
2054         return -1;
2055     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2056         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2057         return AVERROR_OPTION_NOT_FOUND;
2058     }
2059
2060     /* prepare audio output */
2061     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2062         is->sdl_sample_rate = avctx->sample_rate;
2063
2064         if (!avctx->channel_layout)
2065             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2066         if (!avctx->channel_layout) {
2067             fprintf(stderr, "unable to guess channel layout\n");
2068             return -1;
2069         }
2070         if (avctx->channels == 1)
2071             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2072         else
2073             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2074         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2075
2076         wanted_spec.format = AUDIO_S16SYS;
2077         wanted_spec.freq = is->sdl_sample_rate;
2078         wanted_spec.channels = is->sdl_channels;
2079         wanted_spec.silence = 0;
2080         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2081         wanted_spec.callback = sdl_audio_callback;
2082         wanted_spec.userdata = is;
2083         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2084             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2085             return -1;
2086         }
2087         is->audio_hw_buf_size = spec.size;
2088         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2089         is->resample_sample_fmt     = is->sdl_sample_fmt;
2090         is->resample_channel_layout = avctx->channel_layout;
2091         is->resample_sample_rate    = avctx->sample_rate;
2092     }
2093
2094     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2095     switch (avctx->codec_type) {
2096     case AVMEDIA_TYPE_AUDIO:
2097         is->audio_stream = stream_index;
2098         is->audio_st = ic->streams[stream_index];
2099         is->audio_buf_size  = 0;
2100         is->audio_buf_index = 0;
2101
2102         /* init averaging filter */
2103         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2104         is->audio_diff_avg_count = 0;
2105         /* since we do not have a precise anough audio fifo fullness,
2106            we correct audio sync only if larger than this threshold */
2107         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2108
2109         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2110         packet_queue_init(&is->audioq);
2111         SDL_PauseAudio(0);
2112         break;
2113     case AVMEDIA_TYPE_VIDEO:
2114         is->video_stream = stream_index;
2115         is->video_st = ic->streams[stream_index];
2116
2117         packet_queue_init(&is->videoq);
2118         is->video_tid = SDL_CreateThread(video_thread, is);
2119         break;
2120     case AVMEDIA_TYPE_SUBTITLE:
2121         is->subtitle_stream = stream_index;
2122         is->subtitle_st = ic->streams[stream_index];
2123         packet_queue_init(&is->subtitleq);
2124
2125         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2126         break;
2127     default:
2128         break;
2129     }
2130     return 0;
2131 }
2132
2133 static void stream_component_close(VideoState *is, int stream_index)
2134 {
2135     AVFormatContext *ic = is->ic;
2136     AVCodecContext *avctx;
2137
2138     if (stream_index < 0 || stream_index >= ic->nb_streams)
2139         return;
2140     avctx = ic->streams[stream_index]->codec;
2141
2142     switch (avctx->codec_type) {
2143     case AVMEDIA_TYPE_AUDIO:
2144         packet_queue_abort(&is->audioq);
2145
2146         SDL_CloseAudio();
2147
2148         packet_queue_end(&is->audioq);
2149         av_free_packet(&is->audio_pkt);
2150         if (is->avr)
2151             avresample_free(&is->avr);
2152         av_freep(&is->audio_buf1);
2153         is->audio_buf = NULL;
2154         av_frame_free(&is->frame);
2155
2156         if (is->rdft) {
2157             av_rdft_end(is->rdft);
2158             av_freep(&is->rdft_data);
2159             is->rdft = NULL;
2160             is->rdft_bits = 0;
2161         }
2162         break;
2163     case AVMEDIA_TYPE_VIDEO:
2164         packet_queue_abort(&is->videoq);
2165
2166         /* note: we also signal this mutex to make sure we deblock the
2167            video thread in all cases */
2168         SDL_LockMutex(is->pictq_mutex);
2169         SDL_CondSignal(is->pictq_cond);
2170         SDL_UnlockMutex(is->pictq_mutex);
2171
2172         SDL_WaitThread(is->video_tid, NULL);
2173
2174         packet_queue_end(&is->videoq);
2175         break;
2176     case AVMEDIA_TYPE_SUBTITLE:
2177         packet_queue_abort(&is->subtitleq);
2178
2179         /* note: we also signal this mutex to make sure we deblock the
2180            video thread in all cases */
2181         SDL_LockMutex(is->subpq_mutex);
2182         is->subtitle_stream_changed = 1;
2183
2184         SDL_CondSignal(is->subpq_cond);
2185         SDL_UnlockMutex(is->subpq_mutex);
2186
2187         SDL_WaitThread(is->subtitle_tid, NULL);
2188
2189         packet_queue_end(&is->subtitleq);
2190         break;
2191     default:
2192         break;
2193     }
2194
2195     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2196     avcodec_close(avctx);
2197     switch (avctx->codec_type) {
2198     case AVMEDIA_TYPE_AUDIO:
2199         is->audio_st = NULL;
2200         is->audio_stream = -1;
2201         break;
2202     case AVMEDIA_TYPE_VIDEO:
2203         is->video_st = NULL;
2204         is->video_stream = -1;
2205         break;
2206     case AVMEDIA_TYPE_SUBTITLE:
2207         is->subtitle_st = NULL;
2208         is->subtitle_stream = -1;
2209         break;
2210     default:
2211         break;
2212     }
2213 }
2214
2215 /* since we have only one decoding thread, we can use a global
2216    variable instead of a thread local variable */
2217 static VideoState *global_video_state;
2218
2219 static int decode_interrupt_cb(void *ctx)
2220 {
2221     return global_video_state && global_video_state->abort_request;
2222 }
2223
2224 /* this thread gets the stream from the disk or the network */
2225 static int decode_thread(void *arg)
2226 {
2227     VideoState *is = arg;
2228     AVFormatContext *ic = NULL;
2229     int err, i, ret;
2230     int st_index[AVMEDIA_TYPE_NB];
2231     AVPacket pkt1, *pkt = &pkt1;
2232     int eof = 0;
2233     int pkt_in_play_range = 0;
2234     AVDictionaryEntry *t;
2235     AVDictionary **opts;
2236     int orig_nb_streams;
2237
2238     memset(st_index, -1, sizeof(st_index));
2239     is->video_stream = -1;
2240     is->audio_stream = -1;
2241     is->subtitle_stream = -1;
2242
2243     global_video_state = is;
2244
2245     ic = avformat_alloc_context();
2246     ic->interrupt_callback.callback = decode_interrupt_cb;
2247     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2248     if (err < 0) {
2249         print_error(is->filename, err);
2250         ret = -1;
2251         goto fail;
2252     }
2253     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2254         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2255         ret = AVERROR_OPTION_NOT_FOUND;
2256         goto fail;
2257     }
2258     is->ic = ic;
2259
2260     if (genpts)
2261         ic->flags |= AVFMT_FLAG_GENPTS;
2262
2263     opts = setup_find_stream_info_opts(ic, codec_opts);
2264     orig_nb_streams = ic->nb_streams;
2265
2266     err = avformat_find_stream_info(ic, opts);
2267     if (err < 0) {
2268         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2269         ret = -1;
2270         goto fail;
2271     }
2272     for (i = 0; i < orig_nb_streams; i++)
2273         av_dict_free(&opts[i]);
2274     av_freep(&opts);
2275
2276     if (ic->pb)
2277         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2278
2279     if (seek_by_bytes < 0)
2280         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2281
2282     /* if seeking requested, we execute it */
2283     if (start_time != AV_NOPTS_VALUE) {
2284         int64_t timestamp;
2285
2286         timestamp = start_time;
2287         /* add the stream start time */
2288         if (ic->start_time != AV_NOPTS_VALUE)
2289             timestamp += ic->start_time;
2290         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2291         if (ret < 0) {
2292             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2293                     is->filename, (double)timestamp / AV_TIME_BASE);
2294         }
2295     }
2296
2297     for (i = 0; i < ic->nb_streams; i++)
2298         ic->streams[i]->discard = AVDISCARD_ALL;
2299     if (!video_disable)
2300         st_index[AVMEDIA_TYPE_VIDEO] =
2301             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2302                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2303     if (!audio_disable)
2304         st_index[AVMEDIA_TYPE_AUDIO] =
2305             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2306                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2307                                 st_index[AVMEDIA_TYPE_VIDEO],
2308                                 NULL, 0);
2309     if (!video_disable)
2310         st_index[AVMEDIA_TYPE_SUBTITLE] =
2311             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2312                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2313                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2314                                  st_index[AVMEDIA_TYPE_AUDIO] :
2315                                  st_index[AVMEDIA_TYPE_VIDEO]),
2316                                 NULL, 0);
2317     if (show_status) {
2318         av_dump_format(ic, 0, is->filename, 0);
2319     }
2320
2321     /* open the streams */
2322     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2323         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2324     }
2325
2326     ret = -1;
2327     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2328         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2329     }
2330     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2331     if (ret < 0) {
2332         if (!display_disable)
2333             is->show_audio = 2;
2334     }
2335
2336     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2337         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2338     }
2339
2340     if (is->video_stream < 0 && is->audio_stream < 0) {
2341         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2342         ret = -1;
2343         goto fail;
2344     }
2345
2346     for (;;) {
2347         if (is->abort_request)
2348             break;
2349         if (is->paused != is->last_paused) {
2350             is->last_paused = is->paused;
2351             if (is->paused)
2352                 is->read_pause_return = av_read_pause(ic);
2353             else
2354                 av_read_play(ic);
2355         }
2356 #if CONFIG_RTSP_DEMUXER
2357         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2358             /* wait 10 ms to avoid trying to get another packet */
2359             /* XXX: horrible */
2360             SDL_Delay(10);
2361             continue;
2362         }
2363 #endif
2364         if (is->seek_req) {
2365             int64_t seek_target = is->seek_pos;
2366             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2367             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2368 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2369 //      of the seek_pos/seek_rel variables
2370
2371             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2372             if (ret < 0) {
2373                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2374             } else {
2375                 if (is->audio_stream >= 0) {
2376                     packet_queue_flush(&is->audioq);
2377                     packet_queue_put(&is->audioq, &flush_pkt);
2378                 }
2379                 if (is->subtitle_stream >= 0) {
2380                     packet_queue_flush(&is->subtitleq);
2381                     packet_queue_put(&is->subtitleq, &flush_pkt);
2382                 }
2383                 if (is->video_stream >= 0) {
2384                     packet_queue_flush(&is->videoq);
2385                     packet_queue_put(&is->videoq, &flush_pkt);
2386                 }
2387             }
2388             is->seek_req = 0;
2389             eof = 0;
2390         }
2391
2392         /* if the queue are full, no need to read more */
2393         if (!infinite_buffer &&
2394               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2395             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2396                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2397                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2398             /* wait 10 ms */
2399             SDL_Delay(10);
2400             continue;
2401         }
2402         if (eof) {
2403             if (is->video_stream >= 0) {
2404                 av_init_packet(pkt);
2405                 pkt->data = NULL;
2406                 pkt->size = 0;
2407                 pkt->stream_index = is->video_stream;
2408                 packet_queue_put(&is->videoq, pkt);
2409             }
2410             if (is->audio_stream >= 0 &&
2411                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2412                 av_init_packet(pkt);
2413                 pkt->data = NULL;
2414                 pkt->size = 0;
2415                 pkt->stream_index = is->audio_stream;
2416                 packet_queue_put(&is->audioq, pkt);
2417             }
2418             SDL_Delay(10);
2419             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2420                 if (loop != 1 && (!loop || --loop)) {
2421                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2422                 } else if (autoexit) {
2423                     ret = AVERROR_EOF;
2424                     goto fail;
2425                 }
2426             }
2427             continue;
2428         }
2429         ret = av_read_frame(ic, pkt);
2430         if (ret < 0) {
2431             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2432                 eof = 1;
2433             if (ic->pb && ic->pb->error)
2434                 break;
2435             SDL_Delay(100); /* wait for user event */
2436             continue;
2437         }
2438         /* check if packet is in play range specified by user, then queue, otherwise discard */
2439         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2440                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2441                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2442                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2443                 <= ((double)duration / 1000000);
2444         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2445             packet_queue_put(&is->audioq, pkt);
2446         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2447             packet_queue_put(&is->videoq, pkt);
2448         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2449             packet_queue_put(&is->subtitleq, pkt);
2450         } else {
2451             av_free_packet(pkt);
2452         }
2453     }
2454     /* wait until the end */
2455     while (!is->abort_request) {
2456         SDL_Delay(100);
2457     }
2458
2459     ret = 0;
2460  fail:
2461     /* disable interrupting */
2462     global_video_state = NULL;
2463
2464     /* close each stream */
2465     if (is->audio_stream >= 0)
2466         stream_component_close(is, is->audio_stream);
2467     if (is->video_stream >= 0)
2468         stream_component_close(is, is->video_stream);
2469     if (is->subtitle_stream >= 0)
2470         stream_component_close(is, is->subtitle_stream);
2471     if (is->ic) {
2472         avformat_close_input(&is->ic);
2473     }
2474
2475     if (ret != 0) {
2476         SDL_Event event;
2477
2478         event.type = FF_QUIT_EVENT;
2479         event.user.data1 = is;
2480         SDL_PushEvent(&event);
2481     }
2482     return 0;
2483 }
2484
2485 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2486 {
2487     VideoState *is;
2488
2489     is = av_mallocz(sizeof(VideoState));
2490     if (!is)
2491         return NULL;
2492     av_strlcpy(is->filename, filename, sizeof(is->filename));
2493     is->iformat = iformat;
2494     is->ytop    = 0;
2495     is->xleft   = 0;
2496
2497     /* start video display */
2498     is->pictq_mutex = SDL_CreateMutex();
2499     is->pictq_cond  = SDL_CreateCond();
2500
2501     is->subpq_mutex = SDL_CreateMutex();
2502     is->subpq_cond  = SDL_CreateCond();
2503
2504     is->av_sync_type = av_sync_type;
2505     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2506     if (!is->parse_tid) {
2507         av_free(is);
2508         return NULL;
2509     }
2510     return is;
2511 }
2512
2513 static void stream_cycle_channel(VideoState *is, int codec_type)
2514 {
2515     AVFormatContext *ic = is->ic;
2516     int start_index, stream_index;
2517     AVStream *st;
2518
2519     if (codec_type == AVMEDIA_TYPE_VIDEO)
2520         start_index = is->video_stream;
2521     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2522         start_index = is->audio_stream;
2523     else
2524         start_index = is->subtitle_stream;
2525     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2526         return;
2527     stream_index = start_index;
2528     for (;;) {
2529         if (++stream_index >= is->ic->nb_streams)
2530         {
2531             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2532             {
2533                 stream_index = -1;
2534                 goto the_end;
2535             } else
2536                 stream_index = 0;
2537         }
2538         if (stream_index == start_index)
2539             return;
2540         st = ic->streams[stream_index];
2541         if (st->codec->codec_type == codec_type) {
2542             /* check that parameters are OK */
2543             switch (codec_type) {
2544             case AVMEDIA_TYPE_AUDIO:
2545                 if (st->codec->sample_rate != 0 &&
2546                     st->codec->channels != 0)
2547                     goto the_end;
2548                 break;
2549             case AVMEDIA_TYPE_VIDEO:
2550             case AVMEDIA_TYPE_SUBTITLE:
2551                 goto the_end;
2552             default:
2553                 break;
2554             }
2555         }
2556     }
2557  the_end:
2558     stream_component_close(is, start_index);
2559     stream_component_open(is, stream_index);
2560 }
2561
2562
2563 static void toggle_full_screen(void)
2564 {
2565 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2566     /* OS X needs to empty the picture_queue */
2567     int i;
2568     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2569         cur_stream->pictq[i].reallocate = 1;
2570 #endif
2571     is_full_screen = !is_full_screen;
2572     video_open(cur_stream);
2573 }
2574
2575 static void toggle_pause(void)
2576 {
2577     if (cur_stream)
2578         stream_pause(cur_stream);
2579     step = 0;
2580 }
2581
2582 static void step_to_next_frame(void)
2583 {
2584     if (cur_stream) {
2585         /* if the stream is paused unpause it, then step */
2586         if (cur_stream->paused)
2587             stream_pause(cur_stream);
2588     }
2589     step = 1;
2590 }
2591
2592 static void toggle_audio_display(void)
2593 {
2594     if (cur_stream) {
2595         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2596         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2597         fill_rectangle(screen,
2598                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2599                        bgcolor);
2600         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2601     }
2602 }
2603
2604 static void seek_chapter(VideoState *is, int incr)
2605 {
2606     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2607     int i;
2608
2609     if (!is->ic->nb_chapters)
2610         return;
2611
2612     /* find the current chapter */
2613     for (i = 0; i < is->ic->nb_chapters; i++) {
2614         AVChapter *ch = is->ic->chapters[i];
2615         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2616             i--;
2617             break;
2618         }
2619     }
2620
2621     i += incr;
2622     i = FFMAX(i, 0);
2623     if (i >= is->ic->nb_chapters)
2624         return;
2625
2626     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2627     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2628                                  AV_TIME_BASE_Q), 0, 0);
2629 }
2630
2631 /* handle an event sent by the GUI */
2632 static void event_loop(void)
2633 {
2634     SDL_Event event;
2635     double incr, pos, frac;
2636
2637     for (;;) {
2638         double x;
2639         SDL_WaitEvent(&event);
2640         switch (event.type) {
2641         case SDL_KEYDOWN:
2642             if (exit_on_keydown) {
2643                 do_exit();
2644                 break;
2645             }
2646             switch (event.key.keysym.sym) {
2647             case SDLK_ESCAPE:
2648             case SDLK_q:
2649                 do_exit();
2650                 break;
2651             case SDLK_f:
2652                 toggle_full_screen();
2653                 break;
2654             case SDLK_p:
2655             case SDLK_SPACE:
2656                 toggle_pause();
2657                 break;
2658             case SDLK_s: // S: Step to next frame
2659                 step_to_next_frame();
2660                 break;
2661             case SDLK_a:
2662                 if (cur_stream)
2663                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2664                 break;
2665             case SDLK_v:
2666                 if (cur_stream)
2667                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2668                 break;
2669             case SDLK_t:
2670                 if (cur_stream)
2671                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2672                 break;
2673             case SDLK_w:
2674                 toggle_audio_display();
2675                 break;
2676             case SDLK_PAGEUP:
2677                 seek_chapter(cur_stream, 1);
2678                 break;
2679             case SDLK_PAGEDOWN:
2680                 seek_chapter(cur_stream, -1);
2681                 break;
2682             case SDLK_LEFT:
2683                 incr = -10.0;
2684                 goto do_seek;
2685             case SDLK_RIGHT:
2686                 incr = 10.0;
2687                 goto do_seek;
2688             case SDLK_UP:
2689                 incr = 60.0;
2690                 goto do_seek;
2691             case SDLK_DOWN:
2692                 incr = -60.0;
2693             do_seek:
2694                 if (cur_stream) {
2695                     if (seek_by_bytes) {
2696                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2697                             pos = cur_stream->video_current_pos;
2698                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2699                             pos = cur_stream->audio_pkt.pos;
2700                         } else
2701                             pos = avio_tell(cur_stream->ic->pb);
2702                         if (cur_stream->ic->bit_rate)
2703                             incr *= cur_stream->ic->bit_rate / 8.0;
2704                         else
2705                             incr *= 180000.0;
2706                         pos += incr;
2707                         stream_seek(cur_stream, pos, incr, 1);
2708                     } else {
2709                         pos = get_master_clock(cur_stream);
2710                         pos += incr;
2711                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2712                     }
2713                 }
2714                 break;
2715             default:
2716                 break;
2717             }
2718             break;
2719         case SDL_MOUSEBUTTONDOWN:
2720             if (exit_on_mousedown) {
2721                 do_exit();
2722                 break;
2723             }
2724         case SDL_MOUSEMOTION:
2725             if (event.type == SDL_MOUSEBUTTONDOWN) {
2726                 x = event.button.x;
2727             } else {
2728                 if (event.motion.state != SDL_PRESSED)
2729                     break;
2730                 x = event.motion.x;
2731             }
2732             if (cur_stream) {
2733                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2734                     uint64_t size =  avio_size(cur_stream->ic->pb);
2735                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2736                 } else {
2737                     int64_t ts;
2738                     int ns, hh, mm, ss;
2739                     int tns, thh, tmm, tss;
2740                     tns  = cur_stream->ic->duration / 1000000LL;
2741                     thh  = tns / 3600;
2742                     tmm  = (tns % 3600) / 60;
2743                     tss  = (tns % 60);
2744                     frac = x / cur_stream->width;
2745                     ns   = frac * tns;
2746                     hh   = ns / 3600;
2747                     mm   = (ns % 3600) / 60;
2748                     ss   = (ns % 60);
2749                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2750                             hh, mm, ss, thh, tmm, tss);
2751                     ts = frac * cur_stream->ic->duration;
2752                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2753                         ts += cur_stream->ic->start_time;
2754                     stream_seek(cur_stream, ts, 0, 0);
2755                 }
2756             }
2757             break;
2758         case SDL_VIDEORESIZE:
2759             if (cur_stream) {
2760                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2761                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2762                 screen_width  = cur_stream->width  = event.resize.w;
2763                 screen_height = cur_stream->height = event.resize.h;
2764             }
2765             break;
2766         case SDL_QUIT:
2767         case FF_QUIT_EVENT:
2768             do_exit();
2769             break;
2770         case FF_ALLOC_EVENT:
2771             video_open(event.user.data1);
2772             alloc_picture(event.user.data1);
2773             break;
2774         case FF_REFRESH_EVENT:
2775             video_refresh_timer(event.user.data1);
2776             cur_stream->refresh = 0;
2777             break;
2778         default:
2779             break;
2780         }
2781     }
2782 }
2783
2784 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2785 {
2786     av_log(NULL, AV_LOG_ERROR,
2787            "Option '%s' has been removed, use private format options instead\n", opt);
2788     return AVERROR(EINVAL);
2789 }
2790
2791 static int opt_width(void *optctx, const char *opt, const char *arg)
2792 {
2793     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2794     return 0;
2795 }
2796
2797 static int opt_height(void *optctx, const char *opt, const char *arg)
2798 {
2799     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2800     return 0;
2801 }
2802
2803 static int opt_format(void *optctx, const char *opt, const char *arg)
2804 {
2805     file_iformat = av_find_input_format(arg);
2806     if (!file_iformat) {
2807         fprintf(stderr, "Unknown input format: %s\n", arg);
2808         return AVERROR(EINVAL);
2809     }
2810     return 0;
2811 }
2812
2813 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2814 {
2815     av_log(NULL, AV_LOG_ERROR,
2816            "Option '%s' has been removed, use private format options instead\n", opt);
2817     return AVERROR(EINVAL);
2818 }
2819
2820 static int opt_sync(void *optctx, const char *opt, const char *arg)
2821 {
2822     if (!strcmp(arg, "audio"))
2823         av_sync_type = AV_SYNC_AUDIO_MASTER;
2824     else if (!strcmp(arg, "video"))
2825         av_sync_type = AV_SYNC_VIDEO_MASTER;
2826     else if (!strcmp(arg, "ext"))
2827         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2828     else {
2829         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2830         exit(1);
2831     }
2832     return 0;
2833 }
2834
2835 static int opt_seek(void *optctx, const char *opt, const char *arg)
2836 {
2837     start_time = parse_time_or_die(opt, arg, 1);
2838     return 0;
2839 }
2840
2841 static int opt_duration(void *optctx, const char *opt, const char *arg)
2842 {
2843     duration = parse_time_or_die(opt, arg, 1);
2844     return 0;
2845 }
2846
2847 static const OptionDef options[] = {
2848 #include "cmdutils_common_opts.h"
2849     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2850     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2851     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2852     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2853     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2854     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2855     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2856     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2857     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2858     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2859     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2860     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2861     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2862     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2863     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2864     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2865     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2866     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2867     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2868     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2869     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2870     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2871     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2872     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2873     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2874     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2875     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2876     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2877     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2878     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2879     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2880     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2881     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2882 #if CONFIG_AVFILTER
2883     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2884 #endif
2885     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2886     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2887     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2888     { NULL, },
2889 };
2890
2891 static void show_usage(void)
2892 {
2893     printf("Simple media player\n");
2894     printf("usage: %s [options] input_file\n", program_name);
2895     printf("\n");
2896 }
2897
2898 void show_help_default(const char *opt, const char *arg)
2899 {
2900     av_log_set_callback(log_callback_help);
2901     show_usage();
2902     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2903     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2904     printf("\n");
2905     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2906     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2907 #if !CONFIG_AVFILTER
2908     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2909 #endif
2910     printf("\nWhile playing:\n"
2911            "q, ESC              quit\n"
2912            "f                   toggle full screen\n"
2913            "p, SPC              pause\n"
2914            "a                   cycle audio channel\n"
2915            "v                   cycle video channel\n"
2916            "t                   cycle subtitle channel\n"
2917            "w                   show audio waves\n"
2918            "s                   activate frame-step mode\n"
2919            "left/right          seek backward/forward 10 seconds\n"
2920            "down/up             seek backward/forward 1 minute\n"
2921            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2922            );
2923 }
2924
2925 static void opt_input_file(void *optctx, const char *filename)
2926 {
2927     if (input_filename) {
2928         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2929                 filename, input_filename);
2930         exit(1);
2931     }
2932     if (!strcmp(filename, "-"))
2933         filename = "pipe:";
2934     input_filename = filename;
2935 }
2936
2937 /* Called from the main */
2938 int main(int argc, char **argv)
2939 {
2940     int flags;
2941
2942     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2943     parse_loglevel(argc, argv, options);
2944
2945     /* register all codecs, demux and protocols */
2946     avcodec_register_all();
2947 #if CONFIG_AVDEVICE
2948     avdevice_register_all();
2949 #endif
2950 #if CONFIG_AVFILTER
2951     avfilter_register_all();
2952 #endif
2953     av_register_all();
2954     avformat_network_init();
2955
2956     init_opts();
2957
2958     show_banner();
2959
2960     parse_options(NULL, argc, argv, options, opt_input_file);
2961
2962     if (!input_filename) {
2963         show_usage();
2964         fprintf(stderr, "An input file must be specified\n");
2965         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2966         exit(1);
2967     }
2968
2969     if (display_disable) {
2970         video_disable = 1;
2971     }
2972     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2973 #if !defined(__MINGW32__) && !defined(__APPLE__)
2974     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2975 #endif
2976     if (SDL_Init (flags)) {
2977         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2978         exit(1);
2979     }
2980
2981     if (!display_disable) {
2982         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2983         fs_screen_width = vi->current_w;
2984         fs_screen_height = vi->current_h;
2985     }
2986
2987     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2988     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2989     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2990
2991     av_init_packet(&flush_pkt);
2992     flush_pkt.data = (uint8_t *)&flush_pkt;
2993
2994     cur_stream = stream_open(input_filename, file_iformat);
2995
2996     event_loop();
2997
2998     /* never returns */
2999
3000     return 0;
3001 }