]> git.sesse.net Git - ffmpeg/blob - avplay.c
lavfi: change the filter registering system to match the other libraries
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/buffersink.h"
45 # include "libavfilter/buffersrc.h"
46 #endif
47
48 #include "cmdutils.h"
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #ifdef __MINGW32__
54 #undef main /* We don't want SDL to override our main() */
55 #endif
56
57 #include <assert.h>
58
59 const char program_name[] = "avplay";
60 const int program_birth_year = 2003;
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int64_t sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;             // presentation timestamp for this picture
102     double target_clock;    // av_gettime() time at which this should be displayed ideally
103     int64_t pos;            // byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     int reallocate;
108     enum AVPixelFormat pix_fmt;
109
110     AVRational sar;
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
155     uint8_t *audio_buf;
156     uint8_t *audio_buf1;
157     unsigned int audio_buf_size; /* in bytes */
158     int audio_buf_index; /* in bytes */
159     AVPacket audio_pkt_temp;
160     AVPacket audio_pkt;
161     enum AVSampleFormat sdl_sample_fmt;
162     uint64_t sdl_channel_layout;
163     int sdl_channels;
164     int sdl_sample_rate;
165     enum AVSampleFormat resample_sample_fmt;
166     uint64_t resample_channel_layout;
167     int resample_sample_rate;
168     AVAudioResampleContext *avr;
169     AVFrame *frame;
170
171     int show_audio; /* if true, display audio samples */
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;      // current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     //    QETimer *video_timer;
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212     PtsCorrectionContext pts_ctx;
213
214 #if CONFIG_AVFILTER
215     AVFilterContext *in_video_filter;   // the first filter in the video chain
216     AVFilterContext *out_video_filter;  // the last filter in the video chain
217 #endif
218
219     float skip_frames;
220     float skip_frames_index;
221     int refresh;
222 } VideoState;
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width  = 0;
231 static int screen_height = 0;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB] = {
235     [AVMEDIA_TYPE_AUDIO]    = -1,
236     [AVMEDIA_TYPE_VIDEO]    = -1,
237     [AVMEDIA_TYPE_SUBTITLE] = -1,
238 };
239 static int seek_by_bytes = -1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug_mv = 0;
246 static int step = 0;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts = -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop = 1;
260 static int framedrop = 1;
261 static int infinite_buffer = 0;
262
263 static int rdftspeed = 20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
282
283 /* packet queue handling */
284 static void packet_queue_init(PacketQueue *q)
285 {
286     memset(q, 0, sizeof(PacketQueue));
287     q->mutex = SDL_CreateMutex();
288     q->cond = SDL_CreateCond();
289     packet_queue_put(q, &flush_pkt);
290 }
291
292 static void packet_queue_flush(PacketQueue *q)
293 {
294     AVPacketList *pkt, *pkt1;
295
296     SDL_LockMutex(q->mutex);
297     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
298         pkt1 = pkt->next;
299         av_free_packet(&pkt->pkt);
300         av_freep(&pkt);
301     }
302     q->last_pkt = NULL;
303     q->first_pkt = NULL;
304     q->nb_packets = 0;
305     q->size = 0;
306     SDL_UnlockMutex(q->mutex);
307 }
308
309 static void packet_queue_end(PacketQueue *q)
310 {
311     packet_queue_flush(q);
312     SDL_DestroyMutex(q->mutex);
313     SDL_DestroyCond(q->cond);
314 }
315
316 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
317 {
318     AVPacketList *pkt1;
319
320     /* duplicate the packet */
321     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
322         return -1;
323
324     pkt1 = av_malloc(sizeof(AVPacketList));
325     if (!pkt1)
326         return -1;
327     pkt1->pkt = *pkt;
328     pkt1->next = NULL;
329
330
331     SDL_LockMutex(q->mutex);
332
333     if (!q->last_pkt)
334
335         q->first_pkt = pkt1;
336     else
337         q->last_pkt->next = pkt1;
338     q->last_pkt = pkt1;
339     q->nb_packets++;
340     q->size += pkt1->pkt.size + sizeof(*pkt1);
341     /* XXX: should duplicate packet data in DV case */
342     SDL_CondSignal(q->cond);
343
344     SDL_UnlockMutex(q->mutex);
345     return 0;
346 }
347
348 static void packet_queue_abort(PacketQueue *q)
349 {
350     SDL_LockMutex(q->mutex);
351
352     q->abort_request = 1;
353
354     SDL_CondSignal(q->cond);
355
356     SDL_UnlockMutex(q->mutex);
357 }
358
359 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
360 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
361 {
362     AVPacketList *pkt1;
363     int ret;
364
365     SDL_LockMutex(q->mutex);
366
367     for (;;) {
368         if (q->abort_request) {
369             ret = -1;
370             break;
371         }
372
373         pkt1 = q->first_pkt;
374         if (pkt1) {
375             q->first_pkt = pkt1->next;
376             if (!q->first_pkt)
377                 q->last_pkt = NULL;
378             q->nb_packets--;
379             q->size -= pkt1->pkt.size + sizeof(*pkt1);
380             *pkt = pkt1->pkt;
381             av_free(pkt1);
382             ret = 1;
383             break;
384         } else if (!block) {
385             ret = 0;
386             break;
387         } else {
388             SDL_CondWait(q->cond, q->mutex);
389         }
390     }
391     SDL_UnlockMutex(q->mutex);
392     return ret;
393 }
394
395 static inline void fill_rectangle(SDL_Surface *screen,
396                                   int x, int y, int w, int h, int color)
397 {
398     SDL_Rect rect;
399     rect.x = x;
400     rect.y = y;
401     rect.w = w;
402     rect.h = h;
403     SDL_FillRect(screen, &rect, color);
404 }
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411     unsigned int v = ((const uint32_t *)(s))[0];\
412     a = (v >> 24) & 0xff;\
413     r = (v >> 16) & 0xff;\
414     g = (v >> 8) & 0xff;\
415     b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421     a = (val >> 24) & 0xff;\
422     y = (val >> 16) & 0xff;\
423     u = (val >> 8) & 0xff;\
424     v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437     int wrap, wrap3, width2, skip2;
438     int y, u, v, a, u1, v1, a1, w, h;
439     uint8_t *lum, *cb, *cr;
440     const uint8_t *p;
441     const uint32_t *pal;
442     int dstx, dsty, dstw, dsth;
443
444     dstw = av_clip(rect->w, 0, imgw);
445     dsth = av_clip(rect->h, 0, imgh);
446     dstx = av_clip(rect->x, 0, imgw - dstw);
447     dsty = av_clip(rect->y, 0, imgh - dsth);
448     lum = dst->data[0] + dsty * dst->linesize[0];
449     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451
452     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
453     skip2 = dstx >> 1;
454     wrap = dst->linesize[0];
455     wrap3 = rect->pict.linesize[0];
456     p = rect->pict.data[0];
457     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
458
459     if (dsty & 1) {
460         lum += dstx;
461         cb += skip2;
462         cr += skip2;
463
464         if (dstx & 1) {
465             YUVA_IN(y, u, v, a, p, pal);
466             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469             cb++;
470             cr++;
471             lum++;
472             p += BPP;
473         }
474         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
475             YUVA_IN(y, u, v, a, p, pal);
476             u1 = u;
477             v1 = v;
478             a1 = a;
479             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
481             YUVA_IN(y, u, v, a, p + BPP, pal);
482             u1 += u;
483             v1 += v;
484             a1 += a;
485             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488             cb++;
489             cr++;
490             p += 2 * BPP;
491             lum += 2;
492         }
493         if (w) {
494             YUVA_IN(y, u, v, a, p, pal);
495             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498             p++;
499             lum++;
500         }
501         p += wrap3 - dstw * BPP;
502         lum += wrap - dstw - dstx;
503         cb += dst->linesize[1] - width2 - skip2;
504         cr += dst->linesize[2] - width2 - skip2;
505     }
506     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
507         lum += dstx;
508         cb += skip2;
509         cr += skip2;
510
511         if (dstx & 1) {
512             YUVA_IN(y, u, v, a, p, pal);
513             u1 = u;
514             v1 = v;
515             a1 = a;
516             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517             p += wrap3;
518             lum += wrap;
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 += u;
521             v1 += v;
522             a1 += a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
525             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
526             cb++;
527             cr++;
528             p += -wrap3 + BPP;
529             lum += -wrap + 1;
530         }
531         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
532             YUVA_IN(y, u, v, a, p, pal);
533             u1 = u;
534             v1 = v;
535             a1 = a;
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
538             YUVA_IN(y, u, v, a, p + BPP, pal);
539             u1 += u;
540             v1 += v;
541             a1 += a;
542             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
543             p += wrap3;
544             lum += wrap;
545
546             YUVA_IN(y, u, v, a, p, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
551
552             YUVA_IN(y, u, v, a, p + BPP, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
557
558             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
559             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
560
561             cb++;
562             cr++;
563             p += -wrap3 + 2 * BPP;
564             lum += -wrap + 2;
565         }
566         if (w) {
567             YUVA_IN(y, u, v, a, p, pal);
568             u1 = u;
569             v1 = v;
570             a1 = a;
571             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572             p += wrap3;
573             lum += wrap;
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 += u;
576             v1 += v;
577             a1 += a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
580             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
581             cb++;
582             cr++;
583             p += -wrap3 + BPP;
584             lum += -wrap + 1;
585         }
586         p += wrap3 + (wrap3 - dstw * BPP);
587         lum += wrap + (wrap - dstw - dstx);
588         cb += dst->linesize[1] - width2 - skip2;
589         cr += dst->linesize[2] - width2 - skip2;
590     }
591     /* handle odd height */
592     if (h) {
593         lum += dstx;
594         cb += skip2;
595         cr += skip2;
596
597         if (dstx & 1) {
598             YUVA_IN(y, u, v, a, p, pal);
599             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
601             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
602             cb++;
603             cr++;
604             lum++;
605             p += BPP;
606         }
607         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
614             YUVA_IN(y, u, v, a, p + BPP, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
621             cb++;
622             cr++;
623             p += 2 * BPP;
624             lum += 2;
625         }
626         if (w) {
627             YUVA_IN(y, u, v, a, p, pal);
628             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631         }
632     }
633 }
634
635 static void free_subpicture(SubPicture *sp)
636 {
637     avsubtitle_free(&sp->sub);
638 }
639
640 static void video_image_display(VideoState *is)
641 {
642     VideoPicture *vp;
643     SubPicture *sp;
644     AVPicture pict;
645     float aspect_ratio;
646     int width, height, x, y;
647     SDL_Rect rect;
648     int i;
649
650     vp = &is->pictq[is->pictq_rindex];
651     if (vp->bmp) {
652 #if CONFIG_AVFILTER
653          if (!vp->sar.num)
654              aspect_ratio = 0;
655          else
656              aspect_ratio = av_q2d(vp->sar);
657 #else
658
659         /* XXX: use variable in the frame */
660         if (is->video_st->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
662         else if (is->video_st->codec->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
664         else
665             aspect_ratio = 0;
666 #endif
667         if (aspect_ratio <= 0.0)
668             aspect_ratio = 1.0;
669         aspect_ratio *= (float)vp->width / (float)vp->height;
670
671         if (is->subtitle_st)
672         {
673             if (is->subpq_size > 0)
674             {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
678                 {
679                     SDL_LockYUVOverlay (vp->bmp);
680
681                     pict.data[0] = vp->bmp->pixels[0];
682                     pict.data[1] = vp->bmp->pixels[2];
683                     pict.data[2] = vp->bmp->pixels[1];
684
685                     pict.linesize[0] = vp->bmp->pitches[0];
686                     pict.linesize[1] = vp->bmp->pitches[2];
687                     pict.linesize[2] = vp->bmp->pitches[1];
688
689                     for (i = 0; i < sp->sub.num_rects; i++)
690                         blend_subrect(&pict, sp->sub.rects[i],
691                                       vp->bmp->w, vp->bmp->h);
692
693                     SDL_UnlockYUVOverlay (vp->bmp);
694                 }
695             }
696         }
697
698
699         /* XXX: we suppose the screen has a 1.0 pixel ratio */
700         height = is->height;
701         width = ((int)rint(height * aspect_ratio)) & ~1;
702         if (width > is->width) {
703             width = is->width;
704             height = ((int)rint(width / aspect_ratio)) & ~1;
705         }
706         x = (is->width - width) / 2;
707         y = (is->height - height) / 2;
708         is->no_background = 0;
709         rect.x = is->xleft + x;
710         rect.y = is->ytop  + y;
711         rect.w = width;
712         rect.h = height;
713         SDL_DisplayYUVOverlay(vp->bmp, &rect);
714     }
715 }
716
717 /* get the current audio output buffer size, in samples. With SDL, we
718    cannot have a precise information */
719 static int audio_write_get_buf_size(VideoState *is)
720 {
721     return is->audio_buf_size - is->audio_buf_index;
722 }
723
724 static inline int compute_mod(int a, int b)
725 {
726     a = a % b;
727     if (a >= 0)
728         return a;
729     else
730         return a + b;
731 }
732
733 static void video_audio_display(VideoState *s)
734 {
735     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
736     int ch, channels, h, h2, bgcolor, fgcolor;
737     int16_t time_diff;
738     int rdft_bits, nb_freq;
739
740     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
741         ;
742     nb_freq = 1 << (rdft_bits - 1);
743
744     /* compute display index : center on currently output samples */
745     channels = s->sdl_channels;
746     nb_display_channels = channels;
747     if (!s->paused) {
748         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
749         n = 2 * channels;
750         delay = audio_write_get_buf_size(s);
751         delay /= n;
752
753         /* to be more precise, we take into account the time spent since
754            the last buffer computation */
755         if (audio_callback_time) {
756             time_diff = av_gettime() - audio_callback_time;
757             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
758         }
759
760         delay += 2 * data_used;
761         if (delay < data_used)
762             delay = data_used;
763
764         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
765         if (s->show_audio == 1) {
766             h = INT_MIN;
767             for (i = 0; i < 1000; i += channels) {
768                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
769                 int a = s->sample_array[idx];
770                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
771                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
772                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
773                 int score = a - d;
774                 if (h < score && (b ^ c) < 0) {
775                     h = score;
776                     i_start = idx;
777                 }
778             }
779         }
780
781         s->last_i_start = i_start;
782     } else {
783         i_start = s->last_i_start;
784     }
785
786     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
787     if (s->show_audio == 1) {
788         fill_rectangle(screen,
789                        s->xleft, s->ytop, s->width, s->height,
790                        bgcolor);
791
792         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
793
794         /* total height for one channel */
795         h = s->height / nb_display_channels;
796         /* graph height / 2 */
797         h2 = (h * 9) / 20;
798         for (ch = 0; ch < nb_display_channels; ch++) {
799             i = i_start + ch;
800             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
801             for (x = 0; x < s->width; x++) {
802                 y = (s->sample_array[i] * h2) >> 15;
803                 if (y < 0) {
804                     y = -y;
805                     ys = y1 - y;
806                 } else {
807                     ys = y1;
808                 }
809                 fill_rectangle(screen,
810                                s->xleft + x, ys, 1, y,
811                                fgcolor);
812                 i += channels;
813                 if (i >= SAMPLE_ARRAY_SIZE)
814                     i -= SAMPLE_ARRAY_SIZE;
815             }
816         }
817
818         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
819
820         for (ch = 1; ch < nb_display_channels; ch++) {
821             y = s->ytop + ch * h;
822             fill_rectangle(screen,
823                            s->xleft, y, s->width, 1,
824                            fgcolor);
825         }
826         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
827     } else {
828         nb_display_channels= FFMIN(nb_display_channels, 2);
829         if (rdft_bits != s->rdft_bits) {
830             av_rdft_end(s->rdft);
831             av_free(s->rdft_data);
832             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
833             s->rdft_bits = rdft_bits;
834             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
835         }
836         {
837             FFTSample *data[2];
838             for (ch = 0; ch < nb_display_channels; ch++) {
839                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
840                 i = i_start + ch;
841                 for (x = 0; x < 2 * nb_freq; x++) {
842                     double w = (x-nb_freq) * (1.0 / nb_freq);
843                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
844                     i += channels;
845                     if (i >= SAMPLE_ARRAY_SIZE)
846                         i -= SAMPLE_ARRAY_SIZE;
847                 }
848                 av_rdft_calc(s->rdft, data[ch]);
849             }
850             // least efficient way to do this, we should of course directly access it but its more than fast enough
851             for (y = 0; y < s->height; y++) {
852                 double w = 1 / sqrt(nb_freq);
853                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
854                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
855                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
856                 a = FFMIN(a, 255);
857                 b = FFMIN(b, 255);
858                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
859
860                 fill_rectangle(screen,
861                             s->xpos, s->height-y, 1, 1,
862                             fgcolor);
863             }
864         }
865         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
866         s->xpos++;
867         if (s->xpos >= s->width)
868             s->xpos= s->xleft;
869     }
870 }
871
872 static int video_open(VideoState *is)
873 {
874     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
875     int w,h;
876
877     if (is_full_screen) flags |= SDL_FULLSCREEN;
878     else                flags |= SDL_RESIZABLE;
879
880     if (is_full_screen && fs_screen_width) {
881         w = fs_screen_width;
882         h = fs_screen_height;
883     } else if (!is_full_screen && screen_width) {
884         w = screen_width;
885         h = screen_height;
886 #if CONFIG_AVFILTER
887     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
888         w = is->out_video_filter->inputs[0]->w;
889         h = is->out_video_filter->inputs[0]->h;
890 #else
891     } else if (is->video_st && is->video_st->codec->width) {
892         w = is->video_st->codec->width;
893         h = is->video_st->codec->height;
894 #endif
895     } else {
896         w = 640;
897         h = 480;
898     }
899     if (screen && is->width == screen->w && screen->w == w
900        && is->height== screen->h && screen->h == h)
901         return 0;
902
903 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
904     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
905     screen = SDL_SetVideoMode(w, h, 24, flags);
906 #else
907     screen = SDL_SetVideoMode(w, h, 0, flags);
908 #endif
909     if (!screen) {
910         fprintf(stderr, "SDL: could not set video mode - exiting\n");
911         return -1;
912     }
913     if (!window_title)
914         window_title = input_filename;
915     SDL_WM_SetCaption(window_title, window_title);
916
917     is->width  = screen->w;
918     is->height = screen->h;
919
920     return 0;
921 }
922
923 /* display the current picture, if any */
924 static void video_display(VideoState *is)
925 {
926     if (!screen)
927         video_open(cur_stream);
928     if (is->audio_st && is->show_audio)
929         video_audio_display(is);
930     else if (is->video_st)
931         video_image_display(is);
932 }
933
934 static int refresh_thread(void *opaque)
935 {
936     VideoState *is= opaque;
937     while (!is->abort_request) {
938         SDL_Event event;
939         event.type = FF_REFRESH_EVENT;
940         event.user.data1 = opaque;
941         if (!is->refresh) {
942             is->refresh = 1;
943             SDL_PushEvent(&event);
944         }
945         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
946     }
947     return 0;
948 }
949
950 /* get the current audio clock value */
951 static double get_audio_clock(VideoState *is)
952 {
953     double pts;
954     int hw_buf_size, bytes_per_sec;
955     pts = is->audio_clock;
956     hw_buf_size = audio_write_get_buf_size(is);
957     bytes_per_sec = 0;
958     if (is->audio_st) {
959         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
960                         av_get_bytes_per_sample(is->sdl_sample_fmt);
961     }
962     if (bytes_per_sec)
963         pts -= (double)hw_buf_size / bytes_per_sec;
964     return pts;
965 }
966
967 /* get the current video clock value */
968 static double get_video_clock(VideoState *is)
969 {
970     if (is->paused) {
971         return is->video_current_pts;
972     } else {
973         return is->video_current_pts_drift + av_gettime() / 1000000.0;
974     }
975 }
976
977 /* get the current external clock value */
978 static double get_external_clock(VideoState *is)
979 {
980     int64_t ti;
981     ti = av_gettime();
982     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
983 }
984
985 /* get the current master clock value */
986 static double get_master_clock(VideoState *is)
987 {
988     double val;
989
990     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
991         if (is->video_st)
992             val = get_video_clock(is);
993         else
994             val = get_audio_clock(is);
995     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
996         if (is->audio_st)
997             val = get_audio_clock(is);
998         else
999             val = get_video_clock(is);
1000     } else {
1001         val = get_external_clock(is);
1002     }
1003     return val;
1004 }
1005
1006 /* seek in the stream */
1007 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1008 {
1009     if (!is->seek_req) {
1010         is->seek_pos = pos;
1011         is->seek_rel = rel;
1012         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1013         if (seek_by_bytes)
1014             is->seek_flags |= AVSEEK_FLAG_BYTE;
1015         is->seek_req = 1;
1016     }
1017 }
1018
1019 /* pause or resume the video */
1020 static void stream_pause(VideoState *is)
1021 {
1022     if (is->paused) {
1023         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1024         if (is->read_pause_return != AVERROR(ENOSYS)) {
1025             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1026         }
1027         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1028     }
1029     is->paused = !is->paused;
1030 }
1031
1032 static double compute_target_time(double frame_current_pts, VideoState *is)
1033 {
1034     double delay, sync_threshold, diff;
1035
1036     /* compute nominal delay */
1037     delay = frame_current_pts - is->frame_last_pts;
1038     if (delay <= 0 || delay >= 10.0) {
1039         /* if incorrect delay, use previous one */
1040         delay = is->frame_last_delay;
1041     } else {
1042         is->frame_last_delay = delay;
1043     }
1044     is->frame_last_pts = frame_current_pts;
1045
1046     /* update delay to follow master synchronisation source */
1047     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1048          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1049         /* if video is slave, we try to correct big delays by
1050            duplicating or deleting a frame */
1051         diff = get_video_clock(is) - get_master_clock(is);
1052
1053         /* skip or repeat frame. We take into account the
1054            delay to compute the threshold. I still don't know
1055            if it is the best guess */
1056         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1057         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1058             if (diff <= -sync_threshold)
1059                 delay = 0;
1060             else if (diff >= sync_threshold)
1061                 delay = 2 * delay;
1062         }
1063     }
1064     is->frame_timer += delay;
1065
1066     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1067             delay, frame_current_pts, -diff);
1068
1069     return is->frame_timer;
1070 }
1071
1072 /* called to display each frame */
1073 static void video_refresh_timer(void *opaque)
1074 {
1075     VideoState *is = opaque;
1076     VideoPicture *vp;
1077
1078     SubPicture *sp, *sp2;
1079
1080     if (is->video_st) {
1081 retry:
1082         if (is->pictq_size == 0) {
1083             // nothing to do, no picture to display in the que
1084         } else {
1085             double time = av_gettime() / 1000000.0;
1086             double next_target;
1087             /* dequeue the picture */
1088             vp = &is->pictq[is->pictq_rindex];
1089
1090             if (time < vp->target_clock)
1091                 return;
1092             /* update current video pts */
1093             is->video_current_pts = vp->pts;
1094             is->video_current_pts_drift = is->video_current_pts - time;
1095             is->video_current_pos = vp->pos;
1096             if (is->pictq_size > 1) {
1097                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1098                 assert(nextvp->target_clock >= vp->target_clock);
1099                 next_target= nextvp->target_clock;
1100             } else {
1101                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1102             }
1103             if (framedrop && time > next_target) {
1104                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1105                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1106                     /* update queue size and signal for next picture */
1107                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1108                         is->pictq_rindex = 0;
1109
1110                     SDL_LockMutex(is->pictq_mutex);
1111                     is->pictq_size--;
1112                     SDL_CondSignal(is->pictq_cond);
1113                     SDL_UnlockMutex(is->pictq_mutex);
1114                     goto retry;
1115                 }
1116             }
1117
1118             if (is->subtitle_st) {
1119                 if (is->subtitle_stream_changed) {
1120                     SDL_LockMutex(is->subpq_mutex);
1121
1122                     while (is->subpq_size) {
1123                         free_subpicture(&is->subpq[is->subpq_rindex]);
1124
1125                         /* update queue size and signal for next picture */
1126                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1127                             is->subpq_rindex = 0;
1128
1129                         is->subpq_size--;
1130                     }
1131                     is->subtitle_stream_changed = 0;
1132
1133                     SDL_CondSignal(is->subpq_cond);
1134                     SDL_UnlockMutex(is->subpq_mutex);
1135                 } else {
1136                     if (is->subpq_size > 0) {
1137                         sp = &is->subpq[is->subpq_rindex];
1138
1139                         if (is->subpq_size > 1)
1140                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1141                         else
1142                             sp2 = NULL;
1143
1144                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1145                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1146                         {
1147                             free_subpicture(sp);
1148
1149                             /* update queue size and signal for next picture */
1150                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1151                                 is->subpq_rindex = 0;
1152
1153                             SDL_LockMutex(is->subpq_mutex);
1154                             is->subpq_size--;
1155                             SDL_CondSignal(is->subpq_cond);
1156                             SDL_UnlockMutex(is->subpq_mutex);
1157                         }
1158                     }
1159                 }
1160             }
1161
1162             /* display picture */
1163             if (!display_disable)
1164                 video_display(is);
1165
1166             /* update queue size and signal for next picture */
1167             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1168                 is->pictq_rindex = 0;
1169
1170             SDL_LockMutex(is->pictq_mutex);
1171             is->pictq_size--;
1172             SDL_CondSignal(is->pictq_cond);
1173             SDL_UnlockMutex(is->pictq_mutex);
1174         }
1175     } else if (is->audio_st) {
1176         /* draw the next audio frame */
1177
1178         /* if only audio stream, then display the audio bars (better
1179            than nothing, just to test the implementation */
1180
1181         /* display picture */
1182         if (!display_disable)
1183             video_display(is);
1184     }
1185     if (show_status) {
1186         static int64_t last_time;
1187         int64_t cur_time;
1188         int aqsize, vqsize, sqsize;
1189         double av_diff;
1190
1191         cur_time = av_gettime();
1192         if (!last_time || (cur_time - last_time) >= 30000) {
1193             aqsize = 0;
1194             vqsize = 0;
1195             sqsize = 0;
1196             if (is->audio_st)
1197                 aqsize = is->audioq.size;
1198             if (is->video_st)
1199                 vqsize = is->videoq.size;
1200             if (is->subtitle_st)
1201                 sqsize = is->subtitleq.size;
1202             av_diff = 0;
1203             if (is->audio_st && is->video_st)
1204                 av_diff = get_audio_clock(is) - get_video_clock(is);
1205             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1206                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1207                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1208             fflush(stdout);
1209             last_time = cur_time;
1210         }
1211     }
1212 }
1213
1214 static void stream_close(VideoState *is)
1215 {
1216     VideoPicture *vp;
1217     int i;
1218     /* XXX: use a special url_shutdown call to abort parse cleanly */
1219     is->abort_request = 1;
1220     SDL_WaitThread(is->parse_tid, NULL);
1221     SDL_WaitThread(is->refresh_tid, NULL);
1222
1223     /* free all pictures */
1224     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1225         vp = &is->pictq[i];
1226         if (vp->bmp) {
1227             SDL_FreeYUVOverlay(vp->bmp);
1228             vp->bmp = NULL;
1229         }
1230     }
1231     SDL_DestroyMutex(is->pictq_mutex);
1232     SDL_DestroyCond(is->pictq_cond);
1233     SDL_DestroyMutex(is->subpq_mutex);
1234     SDL_DestroyCond(is->subpq_cond);
1235 #if !CONFIG_AVFILTER
1236     if (is->img_convert_ctx)
1237         sws_freeContext(is->img_convert_ctx);
1238 #endif
1239     av_free(is);
1240 }
1241
1242 static void do_exit(void)
1243 {
1244     if (cur_stream) {
1245         stream_close(cur_stream);
1246         cur_stream = NULL;
1247     }
1248     uninit_opts();
1249     avformat_network_deinit();
1250     if (show_status)
1251         printf("\n");
1252     SDL_Quit();
1253     av_log(NULL, AV_LOG_QUIET, "");
1254     exit(0);
1255 }
1256
1257 /* allocate a picture (needs to do that in main thread to avoid
1258    potential locking problems */
1259 static void alloc_picture(void *opaque)
1260 {
1261     VideoState *is = opaque;
1262     VideoPicture *vp;
1263
1264     vp = &is->pictq[is->pictq_windex];
1265
1266     if (vp->bmp)
1267         SDL_FreeYUVOverlay(vp->bmp);
1268
1269 #if CONFIG_AVFILTER
1270     vp->width   = is->out_video_filter->inputs[0]->w;
1271     vp->height  = is->out_video_filter->inputs[0]->h;
1272     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1273 #else
1274     vp->width   = is->video_st->codec->width;
1275     vp->height  = is->video_st->codec->height;
1276     vp->pix_fmt = is->video_st->codec->pix_fmt;
1277 #endif
1278
1279     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1280                                    SDL_YV12_OVERLAY,
1281                                    screen);
1282     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1283         /* SDL allocates a buffer smaller than requested if the video
1284          * overlay hardware is unable to support the requested size. */
1285         fprintf(stderr, "Error: the video system does not support an image\n"
1286                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1287                         "to reduce the image size.\n", vp->width, vp->height );
1288         do_exit();
1289     }
1290
1291     SDL_LockMutex(is->pictq_mutex);
1292     vp->allocated = 1;
1293     SDL_CondSignal(is->pictq_cond);
1294     SDL_UnlockMutex(is->pictq_mutex);
1295 }
1296
1297 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1298  * guessed if not known. */
1299 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1300 {
1301     VideoPicture *vp;
1302 #if CONFIG_AVFILTER
1303     AVPicture pict_src;
1304 #else
1305     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1306 #endif
1307     /* wait until we have space to put a new picture */
1308     SDL_LockMutex(is->pictq_mutex);
1309
1310     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1311         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1312
1313     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1314            !is->videoq.abort_request) {
1315         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1316     }
1317     SDL_UnlockMutex(is->pictq_mutex);
1318
1319     if (is->videoq.abort_request)
1320         return -1;
1321
1322     vp = &is->pictq[is->pictq_windex];
1323
1324     /* alloc or resize hardware picture buffer */
1325     if (!vp->bmp || vp->reallocate ||
1326 #if CONFIG_AVFILTER
1327         vp->width  != is->out_video_filter->inputs[0]->w ||
1328         vp->height != is->out_video_filter->inputs[0]->h) {
1329 #else
1330         vp->width != is->video_st->codec->width ||
1331         vp->height != is->video_st->codec->height) {
1332 #endif
1333         SDL_Event event;
1334
1335         vp->allocated  = 0;
1336         vp->reallocate = 0;
1337
1338         /* the allocation must be done in the main thread to avoid
1339            locking problems */
1340         event.type = FF_ALLOC_EVENT;
1341         event.user.data1 = is;
1342         SDL_PushEvent(&event);
1343
1344         /* wait until the picture is allocated */
1345         SDL_LockMutex(is->pictq_mutex);
1346         while (!vp->allocated && !is->videoq.abort_request) {
1347             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1348         }
1349         SDL_UnlockMutex(is->pictq_mutex);
1350
1351         if (is->videoq.abort_request)
1352             return -1;
1353     }
1354
1355     /* if the frame is not skipped, then display it */
1356     if (vp->bmp) {
1357         AVPicture pict = { { 0 } };
1358
1359         /* get a pointer on the bitmap */
1360         SDL_LockYUVOverlay (vp->bmp);
1361
1362         pict.data[0] = vp->bmp->pixels[0];
1363         pict.data[1] = vp->bmp->pixels[2];
1364         pict.data[2] = vp->bmp->pixels[1];
1365
1366         pict.linesize[0] = vp->bmp->pitches[0];
1367         pict.linesize[1] = vp->bmp->pitches[2];
1368         pict.linesize[2] = vp->bmp->pitches[1];
1369
1370 #if CONFIG_AVFILTER
1371         pict_src.data[0] = src_frame->data[0];
1372         pict_src.data[1] = src_frame->data[1];
1373         pict_src.data[2] = src_frame->data[2];
1374
1375         pict_src.linesize[0] = src_frame->linesize[0];
1376         pict_src.linesize[1] = src_frame->linesize[1];
1377         pict_src.linesize[2] = src_frame->linesize[2];
1378
1379         // FIXME use direct rendering
1380         av_picture_copy(&pict, &pict_src,
1381                         vp->pix_fmt, vp->width, vp->height);
1382 #else
1383         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1384         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1385             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1386             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1387         if (is->img_convert_ctx == NULL) {
1388             fprintf(stderr, "Cannot initialize the conversion context\n");
1389             exit(1);
1390         }
1391         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1392                   0, vp->height, pict.data, pict.linesize);
1393 #endif
1394         /* update the bitmap content */
1395         SDL_UnlockYUVOverlay(vp->bmp);
1396
1397         vp->pts = pts;
1398         vp->pos = pos;
1399
1400         /* now we can update the picture count */
1401         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1402             is->pictq_windex = 0;
1403         SDL_LockMutex(is->pictq_mutex);
1404         vp->target_clock = compute_target_time(vp->pts, is);
1405
1406         is->pictq_size++;
1407         SDL_UnlockMutex(is->pictq_mutex);
1408     }
1409     return 0;
1410 }
1411
1412 /* Compute the exact PTS for the picture if it is omitted in the stream.
1413  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1414 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1415 {
1416     double frame_delay, pts;
1417     int ret;
1418
1419     pts = pts1;
1420
1421     if (pts != 0) {
1422         /* update video clock with pts, if present */
1423         is->video_clock = pts;
1424     } else {
1425         pts = is->video_clock;
1426     }
1427     /* update video clock for next frame */
1428     frame_delay = av_q2d(is->video_st->codec->time_base);
1429     /* for MPEG2, the frame can be repeated, so we update the
1430        clock accordingly */
1431     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1432     is->video_clock += frame_delay;
1433
1434     ret = queue_picture(is, src_frame, pts, pos);
1435     av_frame_unref(src_frame);
1436     return ret;
1437 }
1438
1439 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1440 {
1441     int got_picture, i;
1442
1443     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1444         return -1;
1445
1446     if (pkt->data == flush_pkt.data) {
1447         avcodec_flush_buffers(is->video_st->codec);
1448
1449         SDL_LockMutex(is->pictq_mutex);
1450         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1451         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1452             is->pictq[i].target_clock= 0;
1453         }
1454         while (is->pictq_size && !is->videoq.abort_request) {
1455             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1456         }
1457         is->video_current_pos = -1;
1458         SDL_UnlockMutex(is->pictq_mutex);
1459
1460         init_pts_correction(&is->pts_ctx);
1461         is->frame_last_pts = AV_NOPTS_VALUE;
1462         is->frame_last_delay = 0;
1463         is->frame_timer = (double)av_gettime() / 1000000.0;
1464         is->skip_frames = 1;
1465         is->skip_frames_index = 0;
1466         return 0;
1467     }
1468
1469     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1470
1471     if (got_picture) {
1472         if (decoder_reorder_pts == -1) {
1473             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1474         } else if (decoder_reorder_pts) {
1475             *pts = frame->pkt_pts;
1476         } else {
1477             *pts = frame->pkt_dts;
1478         }
1479
1480         if (*pts == AV_NOPTS_VALUE) {
1481             *pts = 0;
1482         }
1483         if (is->video_st->sample_aspect_ratio.num) {
1484             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1485         }
1486
1487         is->skip_frames_index += 1;
1488         if (is->skip_frames_index >= is->skip_frames) {
1489             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1490             return 1;
1491         }
1492         av_frame_unref(frame);
1493     }
1494     return 0;
1495 }
1496
1497 #if CONFIG_AVFILTER
1498 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1499 {
1500     char sws_flags_str[128];
1501     char buffersrc_args[256];
1502     int ret;
1503     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1504     AVCodecContext *codec = is->video_st->codec;
1505
1506     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1507     graph->scale_sws_opts = av_strdup(sws_flags_str);
1508
1509     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1510              codec->width, codec->height, codec->pix_fmt,
1511              is->video_st->time_base.num, is->video_st->time_base.den,
1512              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1513
1514
1515     if ((ret = avfilter_graph_create_filter(&filt_src,
1516                                             avfilter_get_by_name("buffer"),
1517                                             "src", buffersrc_args, NULL,
1518                                             graph)) < 0)
1519         return ret;
1520     if ((ret = avfilter_graph_create_filter(&filt_out,
1521                                             avfilter_get_by_name("buffersink"),
1522                                             "out", NULL, NULL, graph)) < 0)
1523         return ret;
1524
1525     if ((ret = avfilter_graph_create_filter(&filt_format,
1526                                             avfilter_get_by_name("format"),
1527                                             "format", "yuv420p", NULL, graph)) < 0)
1528         return ret;
1529     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1530         return ret;
1531
1532
1533     if (vfilters) {
1534         AVFilterInOut *outputs = avfilter_inout_alloc();
1535         AVFilterInOut *inputs  = avfilter_inout_alloc();
1536
1537         outputs->name    = av_strdup("in");
1538         outputs->filter_ctx = filt_src;
1539         outputs->pad_idx = 0;
1540         outputs->next    = NULL;
1541
1542         inputs->name    = av_strdup("out");
1543         inputs->filter_ctx = filt_format;
1544         inputs->pad_idx = 0;
1545         inputs->next    = NULL;
1546
1547         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1548             return ret;
1549     } else {
1550         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1551             return ret;
1552     }
1553
1554     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1555         return ret;
1556
1557     is->in_video_filter  = filt_src;
1558     is->out_video_filter = filt_out;
1559
1560     return ret;
1561 }
1562
1563 #endif  /* CONFIG_AVFILTER */
1564
1565 static int video_thread(void *arg)
1566 {
1567     AVPacket pkt = { 0 };
1568     VideoState *is = arg;
1569     AVFrame *frame = av_frame_alloc();
1570     int64_t pts_int;
1571     double pts;
1572     int ret;
1573
1574 #if CONFIG_AVFILTER
1575     AVFilterGraph *graph = avfilter_graph_alloc();
1576     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1577     int last_w = is->video_st->codec->width;
1578     int last_h = is->video_st->codec->height;
1579
1580     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1581         goto the_end;
1582     filt_in  = is->in_video_filter;
1583     filt_out = is->out_video_filter;
1584 #endif
1585
1586     for (;;) {
1587 #if CONFIG_AVFILTER
1588         AVRational tb;
1589 #endif
1590         while (is->paused && !is->videoq.abort_request)
1591             SDL_Delay(10);
1592
1593         av_free_packet(&pkt);
1594
1595         ret = get_video_frame(is, frame, &pts_int, &pkt);
1596         if (ret < 0)
1597             goto the_end;
1598
1599         if (!ret)
1600             continue;
1601
1602 #if CONFIG_AVFILTER
1603         if (   last_w != is->video_st->codec->width
1604             || last_h != is->video_st->codec->height) {
1605             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1606                     is->video_st->codec->width, is->video_st->codec->height);
1607             avfilter_graph_free(&graph);
1608             graph = avfilter_graph_alloc();
1609             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1610                 goto the_end;
1611             filt_in  = is->in_video_filter;
1612             filt_out = is->out_video_filter;
1613             last_w = is->video_st->codec->width;
1614             last_h = is->video_st->codec->height;
1615         }
1616
1617         frame->pts = pts_int;
1618         ret = av_buffersrc_add_frame(filt_in, frame);
1619         if (ret < 0)
1620             goto the_end;
1621
1622         while (ret >= 0) {
1623             ret = av_buffersink_get_frame(filt_out, frame);
1624             if (ret < 0) {
1625                 ret = 0;
1626                 break;
1627             }
1628
1629             pts_int = frame->pts;
1630             tb      = filt_out->inputs[0]->time_base;
1631             if (av_cmp_q(tb, is->video_st->time_base)) {
1632                 av_unused int64_t pts1 = pts_int;
1633                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1634                 av_dlog(NULL, "video_thread(): "
1635                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1636                         tb.num, tb.den, pts1,
1637                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1638             }
1639             pts = pts_int * av_q2d(is->video_st->time_base);
1640             ret = output_picture2(is, frame, pts, 0);
1641         }
1642 #else
1643         pts = pts_int * av_q2d(is->video_st->time_base);
1644         ret = output_picture2(is, frame, pts,  pkt.pos);
1645 #endif
1646
1647         if (ret < 0)
1648             goto the_end;
1649
1650
1651         if (step)
1652             if (cur_stream)
1653                 stream_pause(cur_stream);
1654     }
1655  the_end:
1656 #if CONFIG_AVFILTER
1657     av_freep(&vfilters);
1658     avfilter_graph_free(&graph);
1659 #endif
1660     av_free_packet(&pkt);
1661     av_frame_free(&frame);
1662     return 0;
1663 }
1664
1665 static int subtitle_thread(void *arg)
1666 {
1667     VideoState *is = arg;
1668     SubPicture *sp;
1669     AVPacket pkt1, *pkt = &pkt1;
1670     int got_subtitle;
1671     double pts;
1672     int i, j;
1673     int r, g, b, y, u, v, a;
1674
1675     for (;;) {
1676         while (is->paused && !is->subtitleq.abort_request) {
1677             SDL_Delay(10);
1678         }
1679         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1680             break;
1681
1682         if (pkt->data == flush_pkt.data) {
1683             avcodec_flush_buffers(is->subtitle_st->codec);
1684             continue;
1685         }
1686         SDL_LockMutex(is->subpq_mutex);
1687         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1688                !is->subtitleq.abort_request) {
1689             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1690         }
1691         SDL_UnlockMutex(is->subpq_mutex);
1692
1693         if (is->subtitleq.abort_request)
1694             return 0;
1695
1696         sp = &is->subpq[is->subpq_windex];
1697
1698        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1699            this packet, if any */
1700         pts = 0;
1701         if (pkt->pts != AV_NOPTS_VALUE)
1702             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1703
1704         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1705                                  &got_subtitle, pkt);
1706
1707         if (got_subtitle && sp->sub.format == 0) {
1708             sp->pts = pts;
1709
1710             for (i = 0; i < sp->sub.num_rects; i++)
1711             {
1712                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1713                 {
1714                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1715                     y = RGB_TO_Y_CCIR(r, g, b);
1716                     u = RGB_TO_U_CCIR(r, g, b, 0);
1717                     v = RGB_TO_V_CCIR(r, g, b, 0);
1718                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1719                 }
1720             }
1721
1722             /* now we can update the picture count */
1723             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1724                 is->subpq_windex = 0;
1725             SDL_LockMutex(is->subpq_mutex);
1726             is->subpq_size++;
1727             SDL_UnlockMutex(is->subpq_mutex);
1728         }
1729         av_free_packet(pkt);
1730     }
1731     return 0;
1732 }
1733
1734 /* copy samples for viewing in editor window */
1735 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1736 {
1737     int size, len;
1738
1739     size = samples_size / sizeof(short);
1740     while (size > 0) {
1741         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1742         if (len > size)
1743             len = size;
1744         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1745         samples += len;
1746         is->sample_array_index += len;
1747         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1748             is->sample_array_index = 0;
1749         size -= len;
1750     }
1751 }
1752
1753 /* return the new audio buffer size (samples can be added or deleted
1754    to get better sync if video or external master clock) */
1755 static int synchronize_audio(VideoState *is, short *samples,
1756                              int samples_size1, double pts)
1757 {
1758     int n, samples_size;
1759     double ref_clock;
1760
1761     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1762     samples_size = samples_size1;
1763
1764     /* if not master, then we try to remove or add samples to correct the clock */
1765     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1766          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1767         double diff, avg_diff;
1768         int wanted_size, min_size, max_size, nb_samples;
1769
1770         ref_clock = get_master_clock(is);
1771         diff = get_audio_clock(is) - ref_clock;
1772
1773         if (diff < AV_NOSYNC_THRESHOLD) {
1774             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1775             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1776                 /* not enough measures to have a correct estimate */
1777                 is->audio_diff_avg_count++;
1778             } else {
1779                 /* estimate the A-V difference */
1780                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1781
1782                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1783                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1784                     nb_samples = samples_size / n;
1785
1786                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1787                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1788                     if (wanted_size < min_size)
1789                         wanted_size = min_size;
1790                     else if (wanted_size > max_size)
1791                         wanted_size = max_size;
1792
1793                     /* add or remove samples to correction the synchro */
1794                     if (wanted_size < samples_size) {
1795                         /* remove samples */
1796                         samples_size = wanted_size;
1797                     } else if (wanted_size > samples_size) {
1798                         uint8_t *samples_end, *q;
1799                         int nb;
1800
1801                         /* add samples */
1802                         nb = (samples_size - wanted_size);
1803                         samples_end = (uint8_t *)samples + samples_size - n;
1804                         q = samples_end + n;
1805                         while (nb > 0) {
1806                             memcpy(q, samples_end, n);
1807                             q += n;
1808                             nb -= n;
1809                         }
1810                         samples_size = wanted_size;
1811                     }
1812                 }
1813                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1814                         diff, avg_diff, samples_size - samples_size1,
1815                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1816             }
1817         } else {
1818             /* too big difference : may be initial PTS errors, so
1819                reset A-V filter */
1820             is->audio_diff_avg_count = 0;
1821             is->audio_diff_cum       = 0;
1822         }
1823     }
1824
1825     return samples_size;
1826 }
1827
1828 /* decode one audio frame and returns its uncompressed size */
1829 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1830 {
1831     AVPacket *pkt_temp = &is->audio_pkt_temp;
1832     AVPacket *pkt = &is->audio_pkt;
1833     AVCodecContext *dec = is->audio_st->codec;
1834     int n, len1, data_size, got_frame;
1835     double pts;
1836     int new_packet = 0;
1837     int flush_complete = 0;
1838
1839     for (;;) {
1840         /* NOTE: the audio packet can contain several frames */
1841         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1842             int resample_changed, audio_resample;
1843
1844             if (!is->frame) {
1845                 if (!(is->frame = avcodec_alloc_frame()))
1846                     return AVERROR(ENOMEM);
1847             } else
1848                 avcodec_get_frame_defaults(is->frame);
1849
1850             if (flush_complete)
1851                 break;
1852             new_packet = 0;
1853             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1854             if (len1 < 0) {
1855                 /* if error, we skip the frame */
1856                 pkt_temp->size = 0;
1857                 break;
1858             }
1859
1860             pkt_temp->data += len1;
1861             pkt_temp->size -= len1;
1862
1863             if (!got_frame) {
1864                 /* stop sending empty packets if the decoder is finished */
1865                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1866                     flush_complete = 1;
1867                 continue;
1868             }
1869             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1870                                                    is->frame->nb_samples,
1871                                                    is->frame->format, 1);
1872
1873             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1874                              is->frame->channel_layout != is->sdl_channel_layout ||
1875                              is->frame->sample_rate    != is->sdl_sample_rate;
1876
1877             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1878                                is->frame->channel_layout != is->resample_channel_layout ||
1879                                is->frame->sample_rate    != is->resample_sample_rate;
1880
1881             if ((!is->avr && audio_resample) || resample_changed) {
1882                 int ret;
1883                 if (is->avr)
1884                     avresample_close(is->avr);
1885                 else if (audio_resample) {
1886                     is->avr = avresample_alloc_context();
1887                     if (!is->avr) {
1888                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1889                         break;
1890                     }
1891                 }
1892                 if (audio_resample) {
1893                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1894                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1895                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1896                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1897                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1898                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1899
1900                     if ((ret = avresample_open(is->avr)) < 0) {
1901                         fprintf(stderr, "error initializing libavresample\n");
1902                         break;
1903                     }
1904                 }
1905                 is->resample_sample_fmt     = is->frame->format;
1906                 is->resample_channel_layout = is->frame->channel_layout;
1907                 is->resample_sample_rate    = is->frame->sample_rate;
1908             }
1909
1910             if (audio_resample) {
1911                 void *tmp_out;
1912                 int out_samples, out_size, out_linesize;
1913                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1914                 int nb_samples = is->frame->nb_samples;
1915
1916                 out_size = av_samples_get_buffer_size(&out_linesize,
1917                                                       is->sdl_channels,
1918                                                       nb_samples,
1919                                                       is->sdl_sample_fmt, 0);
1920                 tmp_out = av_realloc(is->audio_buf1, out_size);
1921                 if (!tmp_out)
1922                     return AVERROR(ENOMEM);
1923                 is->audio_buf1 = tmp_out;
1924
1925                 out_samples = avresample_convert(is->avr,
1926                                                  &is->audio_buf1,
1927                                                  out_linesize, nb_samples,
1928                                                  is->frame->data,
1929                                                  is->frame->linesize[0],
1930                                                  is->frame->nb_samples);
1931                 if (out_samples < 0) {
1932                     fprintf(stderr, "avresample_convert() failed\n");
1933                     break;
1934                 }
1935                 is->audio_buf = is->audio_buf1;
1936                 data_size = out_samples * osize * is->sdl_channels;
1937             } else {
1938                 is->audio_buf = is->frame->data[0];
1939             }
1940
1941             /* if no pts, then compute it */
1942             pts = is->audio_clock;
1943             *pts_ptr = pts;
1944             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1945             is->audio_clock += (double)data_size /
1946                 (double)(n * is->sdl_sample_rate);
1947 #ifdef DEBUG
1948             {
1949                 static double last_clock;
1950                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1951                        is->audio_clock - last_clock,
1952                        is->audio_clock, pts);
1953                 last_clock = is->audio_clock;
1954             }
1955 #endif
1956             return data_size;
1957         }
1958
1959         /* free the current packet */
1960         if (pkt->data)
1961             av_free_packet(pkt);
1962         memset(pkt_temp, 0, sizeof(*pkt_temp));
1963
1964         if (is->paused || is->audioq.abort_request) {
1965             return -1;
1966         }
1967
1968         /* read next packet */
1969         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1970             return -1;
1971
1972         if (pkt->data == flush_pkt.data) {
1973             avcodec_flush_buffers(dec);
1974             flush_complete = 0;
1975         }
1976
1977         *pkt_temp = *pkt;
1978
1979         /* if update the audio clock with the pts */
1980         if (pkt->pts != AV_NOPTS_VALUE) {
1981             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1982         }
1983     }
1984 }
1985
1986 /* prepare a new audio buffer */
1987 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1988 {
1989     VideoState *is = opaque;
1990     int audio_size, len1;
1991     double pts;
1992
1993     audio_callback_time = av_gettime();
1994
1995     while (len > 0) {
1996         if (is->audio_buf_index >= is->audio_buf_size) {
1997            audio_size = audio_decode_frame(is, &pts);
1998            if (audio_size < 0) {
1999                 /* if error, just output silence */
2000                is->audio_buf      = is->silence_buf;
2001                is->audio_buf_size = sizeof(is->silence_buf);
2002            } else {
2003                if (is->show_audio)
2004                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2005                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2006                                               pts);
2007                is->audio_buf_size = audio_size;
2008            }
2009            is->audio_buf_index = 0;
2010         }
2011         len1 = is->audio_buf_size - is->audio_buf_index;
2012         if (len1 > len)
2013             len1 = len;
2014         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2015         len -= len1;
2016         stream += len1;
2017         is->audio_buf_index += len1;
2018     }
2019 }
2020
2021 /* open a given stream. Return 0 if OK */
2022 static int stream_component_open(VideoState *is, int stream_index)
2023 {
2024     AVFormatContext *ic = is->ic;
2025     AVCodecContext *avctx;
2026     AVCodec *codec;
2027     SDL_AudioSpec wanted_spec, spec;
2028     AVDictionary *opts;
2029     AVDictionaryEntry *t = NULL;
2030
2031     if (stream_index < 0 || stream_index >= ic->nb_streams)
2032         return -1;
2033     avctx = ic->streams[stream_index]->codec;
2034
2035     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2036
2037     codec = avcodec_find_decoder(avctx->codec_id);
2038     avctx->debug_mv          = debug_mv;
2039     avctx->workaround_bugs   = workaround_bugs;
2040     avctx->idct_algo         = idct;
2041     avctx->skip_frame        = skip_frame;
2042     avctx->skip_idct         = skip_idct;
2043     avctx->skip_loop_filter  = skip_loop_filter;
2044     avctx->error_concealment = error_concealment;
2045
2046     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2047
2048     if (!av_dict_get(opts, "threads", NULL, 0))
2049         av_dict_set(&opts, "threads", "auto", 0);
2050     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2051         av_dict_set(&opts, "refcounted_frames", "1", 0);
2052     if (!codec ||
2053         avcodec_open2(avctx, codec, &opts) < 0)
2054         return -1;
2055     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2056         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2057         return AVERROR_OPTION_NOT_FOUND;
2058     }
2059
2060     /* prepare audio output */
2061     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2062         is->sdl_sample_rate = avctx->sample_rate;
2063
2064         if (!avctx->channel_layout)
2065             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2066         if (!avctx->channel_layout) {
2067             fprintf(stderr, "unable to guess channel layout\n");
2068             return -1;
2069         }
2070         if (avctx->channels == 1)
2071             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2072         else
2073             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2074         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2075
2076         wanted_spec.format = AUDIO_S16SYS;
2077         wanted_spec.freq = is->sdl_sample_rate;
2078         wanted_spec.channels = is->sdl_channels;
2079         wanted_spec.silence = 0;
2080         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2081         wanted_spec.callback = sdl_audio_callback;
2082         wanted_spec.userdata = is;
2083         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2084             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2085             return -1;
2086         }
2087         is->audio_hw_buf_size = spec.size;
2088         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2089         is->resample_sample_fmt     = is->sdl_sample_fmt;
2090         is->resample_channel_layout = avctx->channel_layout;
2091         is->resample_sample_rate    = avctx->sample_rate;
2092     }
2093
2094     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2095     switch (avctx->codec_type) {
2096     case AVMEDIA_TYPE_AUDIO:
2097         is->audio_stream = stream_index;
2098         is->audio_st = ic->streams[stream_index];
2099         is->audio_buf_size  = 0;
2100         is->audio_buf_index = 0;
2101
2102         /* init averaging filter */
2103         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2104         is->audio_diff_avg_count = 0;
2105         /* since we do not have a precise anough audio fifo fullness,
2106            we correct audio sync only if larger than this threshold */
2107         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2108
2109         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2110         packet_queue_init(&is->audioq);
2111         SDL_PauseAudio(0);
2112         break;
2113     case AVMEDIA_TYPE_VIDEO:
2114         is->video_stream = stream_index;
2115         is->video_st = ic->streams[stream_index];
2116
2117         packet_queue_init(&is->videoq);
2118         is->video_tid = SDL_CreateThread(video_thread, is);
2119         break;
2120     case AVMEDIA_TYPE_SUBTITLE:
2121         is->subtitle_stream = stream_index;
2122         is->subtitle_st = ic->streams[stream_index];
2123         packet_queue_init(&is->subtitleq);
2124
2125         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2126         break;
2127     default:
2128         break;
2129     }
2130     return 0;
2131 }
2132
2133 static void stream_component_close(VideoState *is, int stream_index)
2134 {
2135     AVFormatContext *ic = is->ic;
2136     AVCodecContext *avctx;
2137
2138     if (stream_index < 0 || stream_index >= ic->nb_streams)
2139         return;
2140     avctx = ic->streams[stream_index]->codec;
2141
2142     switch (avctx->codec_type) {
2143     case AVMEDIA_TYPE_AUDIO:
2144         packet_queue_abort(&is->audioq);
2145
2146         SDL_CloseAudio();
2147
2148         packet_queue_end(&is->audioq);
2149         av_free_packet(&is->audio_pkt);
2150         if (is->avr)
2151             avresample_free(&is->avr);
2152         av_freep(&is->audio_buf1);
2153         is->audio_buf = NULL;
2154         avcodec_free_frame(&is->frame);
2155
2156         if (is->rdft) {
2157             av_rdft_end(is->rdft);
2158             av_freep(&is->rdft_data);
2159             is->rdft = NULL;
2160             is->rdft_bits = 0;
2161         }
2162         break;
2163     case AVMEDIA_TYPE_VIDEO:
2164         packet_queue_abort(&is->videoq);
2165
2166         /* note: we also signal this mutex to make sure we deblock the
2167            video thread in all cases */
2168         SDL_LockMutex(is->pictq_mutex);
2169         SDL_CondSignal(is->pictq_cond);
2170         SDL_UnlockMutex(is->pictq_mutex);
2171
2172         SDL_WaitThread(is->video_tid, NULL);
2173
2174         packet_queue_end(&is->videoq);
2175         break;
2176     case AVMEDIA_TYPE_SUBTITLE:
2177         packet_queue_abort(&is->subtitleq);
2178
2179         /* note: we also signal this mutex to make sure we deblock the
2180            video thread in all cases */
2181         SDL_LockMutex(is->subpq_mutex);
2182         is->subtitle_stream_changed = 1;
2183
2184         SDL_CondSignal(is->subpq_cond);
2185         SDL_UnlockMutex(is->subpq_mutex);
2186
2187         SDL_WaitThread(is->subtitle_tid, NULL);
2188
2189         packet_queue_end(&is->subtitleq);
2190         break;
2191     default:
2192         break;
2193     }
2194
2195     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2196     avcodec_close(avctx);
2197     switch (avctx->codec_type) {
2198     case AVMEDIA_TYPE_AUDIO:
2199         is->audio_st = NULL;
2200         is->audio_stream = -1;
2201         break;
2202     case AVMEDIA_TYPE_VIDEO:
2203         is->video_st = NULL;
2204         is->video_stream = -1;
2205         break;
2206     case AVMEDIA_TYPE_SUBTITLE:
2207         is->subtitle_st = NULL;
2208         is->subtitle_stream = -1;
2209         break;
2210     default:
2211         break;
2212     }
2213 }
2214
2215 /* since we have only one decoding thread, we can use a global
2216    variable instead of a thread local variable */
2217 static VideoState *global_video_state;
2218
2219 static int decode_interrupt_cb(void *ctx)
2220 {
2221     return global_video_state && global_video_state->abort_request;
2222 }
2223
2224 /* this thread gets the stream from the disk or the network */
2225 static int decode_thread(void *arg)
2226 {
2227     VideoState *is = arg;
2228     AVFormatContext *ic = NULL;
2229     int err, i, ret;
2230     int st_index[AVMEDIA_TYPE_NB];
2231     AVPacket pkt1, *pkt = &pkt1;
2232     int eof = 0;
2233     int pkt_in_play_range = 0;
2234     AVDictionaryEntry *t;
2235     AVDictionary **opts;
2236     int orig_nb_streams;
2237
2238     memset(st_index, -1, sizeof(st_index));
2239     is->video_stream = -1;
2240     is->audio_stream = -1;
2241     is->subtitle_stream = -1;
2242
2243     global_video_state = is;
2244
2245     ic = avformat_alloc_context();
2246     ic->interrupt_callback.callback = decode_interrupt_cb;
2247     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2248     if (err < 0) {
2249         print_error(is->filename, err);
2250         ret = -1;
2251         goto fail;
2252     }
2253     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2254         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2255         ret = AVERROR_OPTION_NOT_FOUND;
2256         goto fail;
2257     }
2258     is->ic = ic;
2259
2260     if (genpts)
2261         ic->flags |= AVFMT_FLAG_GENPTS;
2262
2263     opts = setup_find_stream_info_opts(ic, codec_opts);
2264     orig_nb_streams = ic->nb_streams;
2265
2266     err = avformat_find_stream_info(ic, opts);
2267     if (err < 0) {
2268         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2269         ret = -1;
2270         goto fail;
2271     }
2272     for (i = 0; i < orig_nb_streams; i++)
2273         av_dict_free(&opts[i]);
2274     av_freep(&opts);
2275
2276     if (ic->pb)
2277         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2278
2279     if (seek_by_bytes < 0)
2280         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2281
2282     /* if seeking requested, we execute it */
2283     if (start_time != AV_NOPTS_VALUE) {
2284         int64_t timestamp;
2285
2286         timestamp = start_time;
2287         /* add the stream start time */
2288         if (ic->start_time != AV_NOPTS_VALUE)
2289             timestamp += ic->start_time;
2290         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2291         if (ret < 0) {
2292             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2293                     is->filename, (double)timestamp / AV_TIME_BASE);
2294         }
2295     }
2296
2297     for (i = 0; i < ic->nb_streams; i++)
2298         ic->streams[i]->discard = AVDISCARD_ALL;
2299     if (!video_disable)
2300         st_index[AVMEDIA_TYPE_VIDEO] =
2301             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2302                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2303     if (!audio_disable)
2304         st_index[AVMEDIA_TYPE_AUDIO] =
2305             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2306                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2307                                 st_index[AVMEDIA_TYPE_VIDEO],
2308                                 NULL, 0);
2309     if (!video_disable)
2310         st_index[AVMEDIA_TYPE_SUBTITLE] =
2311             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2312                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2313                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2314                                  st_index[AVMEDIA_TYPE_AUDIO] :
2315                                  st_index[AVMEDIA_TYPE_VIDEO]),
2316                                 NULL, 0);
2317     if (show_status) {
2318         av_dump_format(ic, 0, is->filename, 0);
2319     }
2320
2321     /* open the streams */
2322     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2323         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2324     }
2325
2326     ret = -1;
2327     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2328         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2329     }
2330     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2331     if (ret < 0) {
2332         if (!display_disable)
2333             is->show_audio = 2;
2334     }
2335
2336     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2337         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2338     }
2339
2340     if (is->video_stream < 0 && is->audio_stream < 0) {
2341         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2342         ret = -1;
2343         goto fail;
2344     }
2345
2346     for (;;) {
2347         if (is->abort_request)
2348             break;
2349         if (is->paused != is->last_paused) {
2350             is->last_paused = is->paused;
2351             if (is->paused)
2352                 is->read_pause_return = av_read_pause(ic);
2353             else
2354                 av_read_play(ic);
2355         }
2356 #if CONFIG_RTSP_DEMUXER
2357         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2358             /* wait 10 ms to avoid trying to get another packet */
2359             /* XXX: horrible */
2360             SDL_Delay(10);
2361             continue;
2362         }
2363 #endif
2364         if (is->seek_req) {
2365             int64_t seek_target = is->seek_pos;
2366             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2367             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2368 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2369 //      of the seek_pos/seek_rel variables
2370
2371             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2372             if (ret < 0) {
2373                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2374             } else {
2375                 if (is->audio_stream >= 0) {
2376                     packet_queue_flush(&is->audioq);
2377                     packet_queue_put(&is->audioq, &flush_pkt);
2378                 }
2379                 if (is->subtitle_stream >= 0) {
2380                     packet_queue_flush(&is->subtitleq);
2381                     packet_queue_put(&is->subtitleq, &flush_pkt);
2382                 }
2383                 if (is->video_stream >= 0) {
2384                     packet_queue_flush(&is->videoq);
2385                     packet_queue_put(&is->videoq, &flush_pkt);
2386                 }
2387             }
2388             is->seek_req = 0;
2389             eof = 0;
2390         }
2391
2392         /* if the queue are full, no need to read more */
2393         if (!infinite_buffer &&
2394               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2395             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2396                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2397                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2398             /* wait 10 ms */
2399             SDL_Delay(10);
2400             continue;
2401         }
2402         if (eof) {
2403             if (is->video_stream >= 0) {
2404                 av_init_packet(pkt);
2405                 pkt->data = NULL;
2406                 pkt->size = 0;
2407                 pkt->stream_index = is->video_stream;
2408                 packet_queue_put(&is->videoq, pkt);
2409             }
2410             if (is->audio_stream >= 0 &&
2411                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2412                 av_init_packet(pkt);
2413                 pkt->data = NULL;
2414                 pkt->size = 0;
2415                 pkt->stream_index = is->audio_stream;
2416                 packet_queue_put(&is->audioq, pkt);
2417             }
2418             SDL_Delay(10);
2419             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2420                 if (loop != 1 && (!loop || --loop)) {
2421                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2422                 } else if (autoexit) {
2423                     ret = AVERROR_EOF;
2424                     goto fail;
2425                 }
2426             }
2427             continue;
2428         }
2429         ret = av_read_frame(ic, pkt);
2430         if (ret < 0) {
2431             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2432                 eof = 1;
2433             if (ic->pb && ic->pb->error)
2434                 break;
2435             SDL_Delay(100); /* wait for user event */
2436             continue;
2437         }
2438         /* check if packet is in play range specified by user, then queue, otherwise discard */
2439         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2440                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2441                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2442                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2443                 <= ((double)duration / 1000000);
2444         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2445             packet_queue_put(&is->audioq, pkt);
2446         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2447             packet_queue_put(&is->videoq, pkt);
2448         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2449             packet_queue_put(&is->subtitleq, pkt);
2450         } else {
2451             av_free_packet(pkt);
2452         }
2453     }
2454     /* wait until the end */
2455     while (!is->abort_request) {
2456         SDL_Delay(100);
2457     }
2458
2459     ret = 0;
2460  fail:
2461     /* disable interrupting */
2462     global_video_state = NULL;
2463
2464     /* close each stream */
2465     if (is->audio_stream >= 0)
2466         stream_component_close(is, is->audio_stream);
2467     if (is->video_stream >= 0)
2468         stream_component_close(is, is->video_stream);
2469     if (is->subtitle_stream >= 0)
2470         stream_component_close(is, is->subtitle_stream);
2471     if (is->ic) {
2472         avformat_close_input(&is->ic);
2473     }
2474
2475     if (ret != 0) {
2476         SDL_Event event;
2477
2478         event.type = FF_QUIT_EVENT;
2479         event.user.data1 = is;
2480         SDL_PushEvent(&event);
2481     }
2482     return 0;
2483 }
2484
2485 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2486 {
2487     VideoState *is;
2488
2489     is = av_mallocz(sizeof(VideoState));
2490     if (!is)
2491         return NULL;
2492     av_strlcpy(is->filename, filename, sizeof(is->filename));
2493     is->iformat = iformat;
2494     is->ytop    = 0;
2495     is->xleft   = 0;
2496
2497     /* start video display */
2498     is->pictq_mutex = SDL_CreateMutex();
2499     is->pictq_cond  = SDL_CreateCond();
2500
2501     is->subpq_mutex = SDL_CreateMutex();
2502     is->subpq_cond  = SDL_CreateCond();
2503
2504     is->av_sync_type = av_sync_type;
2505     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2506     if (!is->parse_tid) {
2507         av_free(is);
2508         return NULL;
2509     }
2510     return is;
2511 }
2512
2513 static void stream_cycle_channel(VideoState *is, int codec_type)
2514 {
2515     AVFormatContext *ic = is->ic;
2516     int start_index, stream_index;
2517     AVStream *st;
2518
2519     if (codec_type == AVMEDIA_TYPE_VIDEO)
2520         start_index = is->video_stream;
2521     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2522         start_index = is->audio_stream;
2523     else
2524         start_index = is->subtitle_stream;
2525     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2526         return;
2527     stream_index = start_index;
2528     for (;;) {
2529         if (++stream_index >= is->ic->nb_streams)
2530         {
2531             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2532             {
2533                 stream_index = -1;
2534                 goto the_end;
2535             } else
2536                 stream_index = 0;
2537         }
2538         if (stream_index == start_index)
2539             return;
2540         st = ic->streams[stream_index];
2541         if (st->codec->codec_type == codec_type) {
2542             /* check that parameters are OK */
2543             switch (codec_type) {
2544             case AVMEDIA_TYPE_AUDIO:
2545                 if (st->codec->sample_rate != 0 &&
2546                     st->codec->channels != 0)
2547                     goto the_end;
2548                 break;
2549             case AVMEDIA_TYPE_VIDEO:
2550             case AVMEDIA_TYPE_SUBTITLE:
2551                 goto the_end;
2552             default:
2553                 break;
2554             }
2555         }
2556     }
2557  the_end:
2558     stream_component_close(is, start_index);
2559     stream_component_open(is, stream_index);
2560 }
2561
2562
2563 static void toggle_full_screen(void)
2564 {
2565 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2566     /* OS X needs to empty the picture_queue */
2567     int i;
2568     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2569         cur_stream->pictq[i].reallocate = 1;
2570 #endif
2571     is_full_screen = !is_full_screen;
2572     video_open(cur_stream);
2573 }
2574
2575 static void toggle_pause(void)
2576 {
2577     if (cur_stream)
2578         stream_pause(cur_stream);
2579     step = 0;
2580 }
2581
2582 static void step_to_next_frame(void)
2583 {
2584     if (cur_stream) {
2585         /* if the stream is paused unpause it, then step */
2586         if (cur_stream->paused)
2587             stream_pause(cur_stream);
2588     }
2589     step = 1;
2590 }
2591
2592 static void toggle_audio_display(void)
2593 {
2594     if (cur_stream) {
2595         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2596         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2597         fill_rectangle(screen,
2598                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2599                        bgcolor);
2600         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2601     }
2602 }
2603
2604 /* handle an event sent by the GUI */
2605 static void event_loop(void)
2606 {
2607     SDL_Event event;
2608     double incr, pos, frac;
2609
2610     for (;;) {
2611         double x;
2612         SDL_WaitEvent(&event);
2613         switch (event.type) {
2614         case SDL_KEYDOWN:
2615             if (exit_on_keydown) {
2616                 do_exit();
2617                 break;
2618             }
2619             switch (event.key.keysym.sym) {
2620             case SDLK_ESCAPE:
2621             case SDLK_q:
2622                 do_exit();
2623                 break;
2624             case SDLK_f:
2625                 toggle_full_screen();
2626                 break;
2627             case SDLK_p:
2628             case SDLK_SPACE:
2629                 toggle_pause();
2630                 break;
2631             case SDLK_s: // S: Step to next frame
2632                 step_to_next_frame();
2633                 break;
2634             case SDLK_a:
2635                 if (cur_stream)
2636                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2637                 break;
2638             case SDLK_v:
2639                 if (cur_stream)
2640                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2641                 break;
2642             case SDLK_t:
2643                 if (cur_stream)
2644                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2645                 break;
2646             case SDLK_w:
2647                 toggle_audio_display();
2648                 break;
2649             case SDLK_LEFT:
2650                 incr = -10.0;
2651                 goto do_seek;
2652             case SDLK_RIGHT:
2653                 incr = 10.0;
2654                 goto do_seek;
2655             case SDLK_UP:
2656                 incr = 60.0;
2657                 goto do_seek;
2658             case SDLK_DOWN:
2659                 incr = -60.0;
2660             do_seek:
2661                 if (cur_stream) {
2662                     if (seek_by_bytes) {
2663                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2664                             pos = cur_stream->video_current_pos;
2665                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2666                             pos = cur_stream->audio_pkt.pos;
2667                         } else
2668                             pos = avio_tell(cur_stream->ic->pb);
2669                         if (cur_stream->ic->bit_rate)
2670                             incr *= cur_stream->ic->bit_rate / 8.0;
2671                         else
2672                             incr *= 180000.0;
2673                         pos += incr;
2674                         stream_seek(cur_stream, pos, incr, 1);
2675                     } else {
2676                         pos = get_master_clock(cur_stream);
2677                         pos += incr;
2678                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2679                     }
2680                 }
2681                 break;
2682             default:
2683                 break;
2684             }
2685             break;
2686         case SDL_MOUSEBUTTONDOWN:
2687             if (exit_on_mousedown) {
2688                 do_exit();
2689                 break;
2690             }
2691         case SDL_MOUSEMOTION:
2692             if (event.type == SDL_MOUSEBUTTONDOWN) {
2693                 x = event.button.x;
2694             } else {
2695                 if (event.motion.state != SDL_PRESSED)
2696                     break;
2697                 x = event.motion.x;
2698             }
2699             if (cur_stream) {
2700                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2701                     uint64_t size =  avio_size(cur_stream->ic->pb);
2702                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2703                 } else {
2704                     int64_t ts;
2705                     int ns, hh, mm, ss;
2706                     int tns, thh, tmm, tss;
2707                     tns  = cur_stream->ic->duration / 1000000LL;
2708                     thh  = tns / 3600;
2709                     tmm  = (tns % 3600) / 60;
2710                     tss  = (tns % 60);
2711                     frac = x / cur_stream->width;
2712                     ns   = frac * tns;
2713                     hh   = ns / 3600;
2714                     mm   = (ns % 3600) / 60;
2715                     ss   = (ns % 60);
2716                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2717                             hh, mm, ss, thh, tmm, tss);
2718                     ts = frac * cur_stream->ic->duration;
2719                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2720                         ts += cur_stream->ic->start_time;
2721                     stream_seek(cur_stream, ts, 0, 0);
2722                 }
2723             }
2724             break;
2725         case SDL_VIDEORESIZE:
2726             if (cur_stream) {
2727                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2728                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2729                 screen_width  = cur_stream->width  = event.resize.w;
2730                 screen_height = cur_stream->height = event.resize.h;
2731             }
2732             break;
2733         case SDL_QUIT:
2734         case FF_QUIT_EVENT:
2735             do_exit();
2736             break;
2737         case FF_ALLOC_EVENT:
2738             video_open(event.user.data1);
2739             alloc_picture(event.user.data1);
2740             break;
2741         case FF_REFRESH_EVENT:
2742             video_refresh_timer(event.user.data1);
2743             cur_stream->refresh = 0;
2744             break;
2745         default:
2746             break;
2747         }
2748     }
2749 }
2750
2751 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2752 {
2753     av_log(NULL, AV_LOG_ERROR,
2754            "Option '%s' has been removed, use private format options instead\n", opt);
2755     return AVERROR(EINVAL);
2756 }
2757
2758 static int opt_width(void *optctx, const char *opt, const char *arg)
2759 {
2760     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2761     return 0;
2762 }
2763
2764 static int opt_height(void *optctx, const char *opt, const char *arg)
2765 {
2766     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2767     return 0;
2768 }
2769
2770 static int opt_format(void *optctx, const char *opt, const char *arg)
2771 {
2772     file_iformat = av_find_input_format(arg);
2773     if (!file_iformat) {
2774         fprintf(stderr, "Unknown input format: %s\n", arg);
2775         return AVERROR(EINVAL);
2776     }
2777     return 0;
2778 }
2779
2780 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2781 {
2782     av_log(NULL, AV_LOG_ERROR,
2783            "Option '%s' has been removed, use private format options instead\n", opt);
2784     return AVERROR(EINVAL);
2785 }
2786
2787 static int opt_sync(void *optctx, const char *opt, const char *arg)
2788 {
2789     if (!strcmp(arg, "audio"))
2790         av_sync_type = AV_SYNC_AUDIO_MASTER;
2791     else if (!strcmp(arg, "video"))
2792         av_sync_type = AV_SYNC_VIDEO_MASTER;
2793     else if (!strcmp(arg, "ext"))
2794         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2795     else {
2796         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2797         exit(1);
2798     }
2799     return 0;
2800 }
2801
2802 static int opt_seek(void *optctx, const char *opt, const char *arg)
2803 {
2804     start_time = parse_time_or_die(opt, arg, 1);
2805     return 0;
2806 }
2807
2808 static int opt_duration(void *optctx, const char *opt, const char *arg)
2809 {
2810     duration = parse_time_or_die(opt, arg, 1);
2811     return 0;
2812 }
2813
2814 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2815 {
2816     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2817     return 0;
2818 }
2819
2820 static const OptionDef options[] = {
2821 #include "cmdutils_common_opts.h"
2822     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2823     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2824     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2825     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2826     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2827     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2828     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2829     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2830     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2831     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2832     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2833     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2834     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2835     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2836     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2837     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2838     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2839     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2840     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2841     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2842     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2843     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2844     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2845     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2846     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2847     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2848     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2849     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2850     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2851     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2852     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2853     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2854     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2855     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2856 #if CONFIG_AVFILTER
2857     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2858 #endif
2859     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2860     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2861     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2862     { NULL, },
2863 };
2864
2865 static void show_usage(void)
2866 {
2867     printf("Simple media player\n");
2868     printf("usage: %s [options] input_file\n", program_name);
2869     printf("\n");
2870 }
2871
2872 void show_help_default(const char *opt, const char *arg)
2873 {
2874     av_log_set_callback(log_callback_help);
2875     show_usage();
2876     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2877     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2878     printf("\n");
2879     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2880     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2881 #if !CONFIG_AVFILTER
2882     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2883 #endif
2884     printf("\nWhile playing:\n"
2885            "q, ESC              quit\n"
2886            "f                   toggle full screen\n"
2887            "p, SPC              pause\n"
2888            "a                   cycle audio channel\n"
2889            "v                   cycle video channel\n"
2890            "t                   cycle subtitle channel\n"
2891            "w                   show audio waves\n"
2892            "s                   activate frame-step mode\n"
2893            "left/right          seek backward/forward 10 seconds\n"
2894            "down/up             seek backward/forward 1 minute\n"
2895            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2896            );
2897 }
2898
2899 static void opt_input_file(void *optctx, const char *filename)
2900 {
2901     if (input_filename) {
2902         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2903                 filename, input_filename);
2904         exit(1);
2905     }
2906     if (!strcmp(filename, "-"))
2907         filename = "pipe:";
2908     input_filename = filename;
2909 }
2910
2911 /* Called from the main */
2912 int main(int argc, char **argv)
2913 {
2914     int flags;
2915
2916     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2917     parse_loglevel(argc, argv, options);
2918
2919     /* register all codecs, demux and protocols */
2920     avcodec_register_all();
2921 #if CONFIG_AVDEVICE
2922     avdevice_register_all();
2923 #endif
2924 #if CONFIG_AVFILTER
2925     avfilter_register_all();
2926 #endif
2927     av_register_all();
2928     avformat_network_init();
2929
2930     init_opts();
2931
2932     show_banner();
2933
2934     parse_options(NULL, argc, argv, options, opt_input_file);
2935
2936     if (!input_filename) {
2937         show_usage();
2938         fprintf(stderr, "An input file must be specified\n");
2939         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2940         exit(1);
2941     }
2942
2943     if (display_disable) {
2944         video_disable = 1;
2945     }
2946     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2947 #if !defined(__MINGW32__) && !defined(__APPLE__)
2948     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2949 #endif
2950     if (SDL_Init (flags)) {
2951         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2952         exit(1);
2953     }
2954
2955     if (!display_disable) {
2956         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2957         fs_screen_width = vi->current_w;
2958         fs_screen_height = vi->current_h;
2959     }
2960
2961     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2962     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2963     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2964
2965     av_init_packet(&flush_pkt);
2966     flush_pkt.data = "FLUSH";
2967
2968     cur_stream = stream_open(input_filename, file_iformat);
2969
2970     event_loop();
2971
2972     /* never returns */
2973
2974     return 0;
2975 }