]> git.sesse.net Git - ffmpeg/blob - avplay.c
eamad: allocate a dummy reference frame when the real one is missing
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int64_t sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;             // presentation timestamp for this picture
103     double target_clock;    // av_gettime() time at which this should be displayed ideally
104     int64_t pos;            // byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum AVPixelFormat pix_fmt;
110
111     AVRational sar;
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
156     uint8_t *audio_buf;
157     uint8_t *audio_buf1;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat sdl_sample_fmt;
163     uint64_t sdl_channel_layout;
164     int sdl_channels;
165     int sdl_sample_rate;
166     enum AVSampleFormat resample_sample_fmt;
167     uint64_t resample_channel_layout;
168     int resample_sample_rate;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;      // current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;   // the first filter in the video chain
217     AVFilterContext *out_video_filter;  // the last filter in the video chain
218 #endif
219
220     float skip_frames;
221     float skip_frames_index;
222     int refresh;
223 } VideoState;
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width  = 0;
232 static int screen_height = 0;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB] = {
236     [AVMEDIA_TYPE_AUDIO]    = -1,
237     [AVMEDIA_TYPE_VIDEO]    = -1,
238     [AVMEDIA_TYPE_SUBTITLE] = -1,
239 };
240 static int seek_by_bytes = -1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts = -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop = 1;
262 static int framedrop = 1;
263 static int infinite_buffer = 0;
264
265 static int rdftspeed = 20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284
285 /* packet queue handling */
286 static void packet_queue_init(PacketQueue *q)
287 {
288     memset(q, 0, sizeof(PacketQueue));
289     q->mutex = SDL_CreateMutex();
290     q->cond = SDL_CreateCond();
291     packet_queue_put(q, &flush_pkt);
292 }
293
294 static void packet_queue_flush(PacketQueue *q)
295 {
296     AVPacketList *pkt, *pkt1;
297
298     SDL_LockMutex(q->mutex);
299     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300         pkt1 = pkt->next;
301         av_free_packet(&pkt->pkt);
302         av_freep(&pkt);
303     }
304     q->last_pkt = NULL;
305     q->first_pkt = NULL;
306     q->nb_packets = 0;
307     q->size = 0;
308     SDL_UnlockMutex(q->mutex);
309 }
310
311 static void packet_queue_end(PacketQueue *q)
312 {
313     packet_queue_flush(q);
314     SDL_DestroyMutex(q->mutex);
315     SDL_DestroyCond(q->cond);
316 }
317
318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319 {
320     AVPacketList *pkt1;
321
322     /* duplicate the packet */
323     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
324         return -1;
325
326     pkt1 = av_malloc(sizeof(AVPacketList));
327     if (!pkt1)
328         return -1;
329     pkt1->pkt = *pkt;
330     pkt1->next = NULL;
331
332
333     SDL_LockMutex(q->mutex);
334
335     if (!q->last_pkt)
336
337         q->first_pkt = pkt1;
338     else
339         q->last_pkt->next = pkt1;
340     q->last_pkt = pkt1;
341     q->nb_packets++;
342     q->size += pkt1->pkt.size + sizeof(*pkt1);
343     /* XXX: should duplicate packet data in DV case */
344     SDL_CondSignal(q->cond);
345
346     SDL_UnlockMutex(q->mutex);
347     return 0;
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352     SDL_LockMutex(q->mutex);
353
354     q->abort_request = 1;
355
356     SDL_CondSignal(q->cond);
357
358     SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364     AVPacketList *pkt1;
365     int ret;
366
367     SDL_LockMutex(q->mutex);
368
369     for (;;) {
370         if (q->abort_request) {
371             ret = -1;
372             break;
373         }
374
375         pkt1 = q->first_pkt;
376         if (pkt1) {
377             q->first_pkt = pkt1->next;
378             if (!q->first_pkt)
379                 q->last_pkt = NULL;
380             q->nb_packets--;
381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
382             *pkt = pkt1->pkt;
383             av_free(pkt1);
384             ret = 1;
385             break;
386         } else if (!block) {
387             ret = 0;
388             break;
389         } else {
390             SDL_CondWait(q->cond, q->mutex);
391         }
392     }
393     SDL_UnlockMutex(q->mutex);
394     return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398                                   int x, int y, int w, int h, int color)
399 {
400     SDL_Rect rect;
401     rect.x = x;
402     rect.y = y;
403     rect.w = w;
404     rect.h = h;
405     SDL_FillRect(screen, &rect, color);
406 }
407
408 #define ALPHA_BLEND(a, oldp, newp, s)\
409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410
411 #define RGBA_IN(r, g, b, a, s)\
412 {\
413     unsigned int v = ((const uint32_t *)(s))[0];\
414     a = (v >> 24) & 0xff;\
415     r = (v >> 16) & 0xff;\
416     g = (v >> 8) & 0xff;\
417     b = v & 0xff;\
418 }
419
420 #define YUVA_IN(y, u, v, a, s, pal)\
421 {\
422     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
423     a = (val >> 24) & 0xff;\
424     y = (val >> 16) & 0xff;\
425     u = (val >> 8) & 0xff;\
426     v = val & 0xff;\
427 }
428
429 #define YUVA_OUT(d, y, u, v, a)\
430 {\
431     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 }
433
434
435 #define BPP 1
436
437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 {
439     int wrap, wrap3, width2, skip2;
440     int y, u, v, a, u1, v1, a1, w, h;
441     uint8_t *lum, *cb, *cr;
442     const uint8_t *p;
443     const uint32_t *pal;
444     int dstx, dsty, dstw, dsth;
445
446     dstw = av_clip(rect->w, 0, imgw);
447     dsth = av_clip(rect->h, 0, imgh);
448     dstx = av_clip(rect->x, 0, imgw - dstw);
449     dsty = av_clip(rect->y, 0, imgh - dsth);
450     lum = dst->data[0] + dsty * dst->linesize[0];
451     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
452     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453
454     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
455     skip2 = dstx >> 1;
456     wrap = dst->linesize[0];
457     wrap3 = rect->pict.linesize[0];
458     p = rect->pict.data[0];
459     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
460
461     if (dsty & 1) {
462         lum += dstx;
463         cb += skip2;
464         cr += skip2;
465
466         if (dstx & 1) {
467             YUVA_IN(y, u, v, a, p, pal);
468             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
469             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
470             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
471             cb++;
472             cr++;
473             lum++;
474             p += BPP;
475         }
476         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
477             YUVA_IN(y, u, v, a, p, pal);
478             u1 = u;
479             v1 = v;
480             a1 = a;
481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482
483             YUVA_IN(y, u, v, a, p + BPP, pal);
484             u1 += u;
485             v1 += v;
486             a1 += a;
487             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
488             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
489             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
490             cb++;
491             cr++;
492             p += 2 * BPP;
493             lum += 2;
494         }
495         if (w) {
496             YUVA_IN(y, u, v, a, p, pal);
497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
499             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500             p++;
501             lum++;
502         }
503         p += wrap3 - dstw * BPP;
504         lum += wrap - dstw - dstx;
505         cb += dst->linesize[1] - width2 - skip2;
506         cr += dst->linesize[2] - width2 - skip2;
507     }
508     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
509         lum += dstx;
510         cb += skip2;
511         cr += skip2;
512
513         if (dstx & 1) {
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 = u;
516             v1 = v;
517             a1 = a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519             p += wrap3;
520             lum += wrap;
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 += u;
523             v1 += v;
524             a1 += a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528             cb++;
529             cr++;
530             p += -wrap3 + BPP;
531             lum += -wrap + 1;
532         }
533         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
534             YUVA_IN(y, u, v, a, p, pal);
535             u1 = u;
536             v1 = v;
537             a1 = a;
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
540             YUVA_IN(y, u, v, a, p + BPP, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545             p += wrap3;
546             lum += wrap;
547
548             YUVA_IN(y, u, v, a, p, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
554             YUVA_IN(y, u, v, a, p + BPP, pal);
555             u1 += u;
556             v1 += v;
557             a1 += a;
558             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559
560             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
561             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562
563             cb++;
564             cr++;
565             p += -wrap3 + 2 * BPP;
566             lum += -wrap + 2;
567         }
568         if (w) {
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 = u;
571             v1 = v;
572             a1 = a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574             p += wrap3;
575             lum += wrap;
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += -wrap3 + BPP;
586             lum += -wrap + 1;
587         }
588         p += wrap3 + (wrap3 - dstw * BPP);
589         lum += wrap + (wrap - dstw - dstx);
590         cb += dst->linesize[1] - width2 - skip2;
591         cr += dst->linesize[2] - width2 - skip2;
592     }
593     /* handle odd height */
594     if (h) {
595         lum += dstx;
596         cb += skip2;
597         cr += skip2;
598
599         if (dstx & 1) {
600             YUVA_IN(y, u, v, a, p, pal);
601             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
603             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
604             cb++;
605             cr++;
606             lum++;
607             p += BPP;
608         }
609         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
616             YUVA_IN(y, u, v, a, p + BPP, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
623             cb++;
624             cr++;
625             p += 2 * BPP;
626             lum += 2;
627         }
628         if (w) {
629             YUVA_IN(y, u, v, a, p, pal);
630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
632             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633         }
634     }
635 }
636
637 static void free_subpicture(SubPicture *sp)
638 {
639     avsubtitle_free(&sp->sub);
640 }
641
642 static void video_image_display(VideoState *is)
643 {
644     VideoPicture *vp;
645     SubPicture *sp;
646     AVPicture pict;
647     float aspect_ratio;
648     int width, height, x, y;
649     SDL_Rect rect;
650     int i;
651
652     vp = &is->pictq[is->pictq_rindex];
653     if (vp->bmp) {
654 #if CONFIG_AVFILTER
655          if (!vp->sar.num)
656              aspect_ratio = 0;
657          else
658              aspect_ratio = av_q2d(vp->sar);
659 #else
660
661         /* XXX: use variable in the frame */
662         if (is->video_st->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664         else if (is->video_st->codec->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666         else
667             aspect_ratio = 0;
668 #endif
669         if (aspect_ratio <= 0.0)
670             aspect_ratio = 1.0;
671         aspect_ratio *= (float)vp->width / (float)vp->height;
672
673         if (is->subtitle_st)
674         {
675             if (is->subpq_size > 0)
676             {
677                 sp = &is->subpq[is->subpq_rindex];
678
679                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
680                 {
681                     SDL_LockYUVOverlay (vp->bmp);
682
683                     pict.data[0] = vp->bmp->pixels[0];
684                     pict.data[1] = vp->bmp->pixels[2];
685                     pict.data[2] = vp->bmp->pixels[1];
686
687                     pict.linesize[0] = vp->bmp->pitches[0];
688                     pict.linesize[1] = vp->bmp->pitches[2];
689                     pict.linesize[2] = vp->bmp->pitches[1];
690
691                     for (i = 0; i < sp->sub.num_rects; i++)
692                         blend_subrect(&pict, sp->sub.rects[i],
693                                       vp->bmp->w, vp->bmp->h);
694
695                     SDL_UnlockYUVOverlay (vp->bmp);
696                 }
697             }
698         }
699
700
701         /* XXX: we suppose the screen has a 1.0 pixel ratio */
702         height = is->height;
703         width = ((int)rint(height * aspect_ratio)) & ~1;
704         if (width > is->width) {
705             width = is->width;
706             height = ((int)rint(width / aspect_ratio)) & ~1;
707         }
708         x = (is->width - width) / 2;
709         y = (is->height - height) / 2;
710         is->no_background = 0;
711         rect.x = is->xleft + x;
712         rect.y = is->ytop  + y;
713         rect.w = width;
714         rect.h = height;
715         SDL_DisplayYUVOverlay(vp->bmp, &rect);
716     }
717 }
718
719 /* get the current audio output buffer size, in samples. With SDL, we
720    cannot have a precise information */
721 static int audio_write_get_buf_size(VideoState *is)
722 {
723     return is->audio_buf_size - is->audio_buf_index;
724 }
725
726 static inline int compute_mod(int a, int b)
727 {
728     a = a % b;
729     if (a >= 0)
730         return a;
731     else
732         return a + b;
733 }
734
735 static void video_audio_display(VideoState *s)
736 {
737     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
738     int ch, channels, h, h2, bgcolor, fgcolor;
739     int16_t time_diff;
740     int rdft_bits, nb_freq;
741
742     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
743         ;
744     nb_freq = 1 << (rdft_bits - 1);
745
746     /* compute display index : center on currently output samples */
747     channels = s->sdl_channels;
748     nb_display_channels = channels;
749     if (!s->paused) {
750         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
751         n = 2 * channels;
752         delay = audio_write_get_buf_size(s);
753         delay /= n;
754
755         /* to be more precise, we take into account the time spent since
756            the last buffer computation */
757         if (audio_callback_time) {
758             time_diff = av_gettime() - audio_callback_time;
759             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
760         }
761
762         delay += 2 * data_used;
763         if (delay < data_used)
764             delay = data_used;
765
766         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
767         if (s->show_audio == 1) {
768             h = INT_MIN;
769             for (i = 0; i < 1000; i += channels) {
770                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
771                 int a = s->sample_array[idx];
772                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
773                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
774                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
775                 int score = a - d;
776                 if (h < score && (b ^ c) < 0) {
777                     h = score;
778                     i_start = idx;
779                 }
780             }
781         }
782
783         s->last_i_start = i_start;
784     } else {
785         i_start = s->last_i_start;
786     }
787
788     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
789     if (s->show_audio == 1) {
790         fill_rectangle(screen,
791                        s->xleft, s->ytop, s->width, s->height,
792                        bgcolor);
793
794         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
795
796         /* total height for one channel */
797         h = s->height / nb_display_channels;
798         /* graph height / 2 */
799         h2 = (h * 9) / 20;
800         for (ch = 0; ch < nb_display_channels; ch++) {
801             i = i_start + ch;
802             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
803             for (x = 0; x < s->width; x++) {
804                 y = (s->sample_array[i] * h2) >> 15;
805                 if (y < 0) {
806                     y = -y;
807                     ys = y1 - y;
808                 } else {
809                     ys = y1;
810                 }
811                 fill_rectangle(screen,
812                                s->xleft + x, ys, 1, y,
813                                fgcolor);
814                 i += channels;
815                 if (i >= SAMPLE_ARRAY_SIZE)
816                     i -= SAMPLE_ARRAY_SIZE;
817             }
818         }
819
820         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
821
822         for (ch = 1; ch < nb_display_channels; ch++) {
823             y = s->ytop + ch * h;
824             fill_rectangle(screen,
825                            s->xleft, y, s->width, 1,
826                            fgcolor);
827         }
828         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
829     } else {
830         nb_display_channels= FFMIN(nb_display_channels, 2);
831         if (rdft_bits != s->rdft_bits) {
832             av_rdft_end(s->rdft);
833             av_free(s->rdft_data);
834             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
835             s->rdft_bits = rdft_bits;
836             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
837         }
838         {
839             FFTSample *data[2];
840             for (ch = 0; ch < nb_display_channels; ch++) {
841                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
842                 i = i_start + ch;
843                 for (x = 0; x < 2 * nb_freq; x++) {
844                     double w = (x-nb_freq) * (1.0 / nb_freq);
845                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
846                     i += channels;
847                     if (i >= SAMPLE_ARRAY_SIZE)
848                         i -= SAMPLE_ARRAY_SIZE;
849                 }
850                 av_rdft_calc(s->rdft, data[ch]);
851             }
852             // least efficient way to do this, we should of course directly access it but its more than fast enough
853             for (y = 0; y < s->height; y++) {
854                 double w = 1 / sqrt(nb_freq);
855                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
856                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
857                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
858                 a = FFMIN(a, 255);
859                 b = FFMIN(b, 255);
860                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
861
862                 fill_rectangle(screen,
863                             s->xpos, s->height-y, 1, 1,
864                             fgcolor);
865             }
866         }
867         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
868         s->xpos++;
869         if (s->xpos >= s->width)
870             s->xpos= s->xleft;
871     }
872 }
873
874 static int video_open(VideoState *is)
875 {
876     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
877     int w,h;
878
879     if (is_full_screen) flags |= SDL_FULLSCREEN;
880     else                flags |= SDL_RESIZABLE;
881
882     if (is_full_screen && fs_screen_width) {
883         w = fs_screen_width;
884         h = fs_screen_height;
885     } else if (!is_full_screen && screen_width) {
886         w = screen_width;
887         h = screen_height;
888 #if CONFIG_AVFILTER
889     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
890         w = is->out_video_filter->inputs[0]->w;
891         h = is->out_video_filter->inputs[0]->h;
892 #else
893     } else if (is->video_st && is->video_st->codec->width) {
894         w = is->video_st->codec->width;
895         h = is->video_st->codec->height;
896 #endif
897     } else {
898         w = 640;
899         h = 480;
900     }
901     if (screen && is->width == screen->w && screen->w == w
902        && is->height== screen->h && screen->h == h)
903         return 0;
904
905 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
906     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
907     screen = SDL_SetVideoMode(w, h, 24, flags);
908 #else
909     screen = SDL_SetVideoMode(w, h, 0, flags);
910 #endif
911     if (!screen) {
912         fprintf(stderr, "SDL: could not set video mode - exiting\n");
913         return -1;
914     }
915     if (!window_title)
916         window_title = input_filename;
917     SDL_WM_SetCaption(window_title, window_title);
918
919     is->width  = screen->w;
920     is->height = screen->h;
921
922     return 0;
923 }
924
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928     if (!screen)
929         video_open(cur_stream);
930     if (is->audio_st && is->show_audio)
931         video_audio_display(is);
932     else if (is->video_st)
933         video_image_display(is);
934 }
935
936 static int refresh_thread(void *opaque)
937 {
938     VideoState *is= opaque;
939     while (!is->abort_request) {
940         SDL_Event event;
941         event.type = FF_REFRESH_EVENT;
942         event.user.data1 = opaque;
943         if (!is->refresh) {
944             is->refresh = 1;
945             SDL_PushEvent(&event);
946         }
947         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948     }
949     return 0;
950 }
951
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955     double pts;
956     int hw_buf_size, bytes_per_sec;
957     pts = is->audio_clock;
958     hw_buf_size = audio_write_get_buf_size(is);
959     bytes_per_sec = 0;
960     if (is->audio_st) {
961         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
962                         av_get_bytes_per_sample(is->sdl_sample_fmt);
963     }
964     if (bytes_per_sec)
965         pts -= (double)hw_buf_size / bytes_per_sec;
966     return pts;
967 }
968
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972     if (is->paused) {
973         return is->video_current_pts;
974     } else {
975         return is->video_current_pts_drift + av_gettime() / 1000000.0;
976     }
977 }
978
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982     int64_t ti;
983     ti = av_gettime();
984     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990     double val;
991
992     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993         if (is->video_st)
994             val = get_video_clock(is);
995         else
996             val = get_audio_clock(is);
997     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998         if (is->audio_st)
999             val = get_audio_clock(is);
1000         else
1001             val = get_video_clock(is);
1002     } else {
1003         val = get_external_clock(is);
1004     }
1005     return val;
1006 }
1007
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011     if (!is->seek_req) {
1012         is->seek_pos = pos;
1013         is->seek_rel = rel;
1014         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015         if (seek_by_bytes)
1016             is->seek_flags |= AVSEEK_FLAG_BYTE;
1017         is->seek_req = 1;
1018     }
1019 }
1020
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024     if (is->paused) {
1025         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026         if (is->read_pause_return != AVERROR(ENOSYS)) {
1027             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1028         }
1029         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1030     }
1031     is->paused = !is->paused;
1032 }
1033
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036     double delay, sync_threshold, diff;
1037
1038     /* compute nominal delay */
1039     delay = frame_current_pts - is->frame_last_pts;
1040     if (delay <= 0 || delay >= 10.0) {
1041         /* if incorrect delay, use previous one */
1042         delay = is->frame_last_delay;
1043     } else {
1044         is->frame_last_delay = delay;
1045     }
1046     is->frame_last_pts = frame_current_pts;
1047
1048     /* update delay to follow master synchronisation source */
1049     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1050          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1051         /* if video is slave, we try to correct big delays by
1052            duplicating or deleting a frame */
1053         diff = get_video_clock(is) - get_master_clock(is);
1054
1055         /* skip or repeat frame. We take into account the
1056            delay to compute the threshold. I still don't know
1057            if it is the best guess */
1058         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060             if (diff <= -sync_threshold)
1061                 delay = 0;
1062             else if (diff >= sync_threshold)
1063                 delay = 2 * delay;
1064         }
1065     }
1066     is->frame_timer += delay;
1067
1068     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069             delay, frame_current_pts, -diff);
1070
1071     return is->frame_timer;
1072 }
1073
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077     VideoState *is = opaque;
1078     VideoPicture *vp;
1079
1080     SubPicture *sp, *sp2;
1081
1082     if (is->video_st) {
1083 retry:
1084         if (is->pictq_size == 0) {
1085             // nothing to do, no picture to display in the que
1086         } else {
1087             double time = av_gettime() / 1000000.0;
1088             double next_target;
1089             /* dequeue the picture */
1090             vp = &is->pictq[is->pictq_rindex];
1091
1092             if (time < vp->target_clock)
1093                 return;
1094             /* update current video pts */
1095             is->video_current_pts = vp->pts;
1096             is->video_current_pts_drift = is->video_current_pts - time;
1097             is->video_current_pos = vp->pos;
1098             if (is->pictq_size > 1) {
1099                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1100                 assert(nextvp->target_clock >= vp->target_clock);
1101                 next_target= nextvp->target_clock;
1102             } else {
1103                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1104             }
1105             if (framedrop && time > next_target) {
1106                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1108                     /* update queue size and signal for next picture */
1109                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110                         is->pictq_rindex = 0;
1111
1112                     SDL_LockMutex(is->pictq_mutex);
1113                     is->pictq_size--;
1114                     SDL_CondSignal(is->pictq_cond);
1115                     SDL_UnlockMutex(is->pictq_mutex);
1116                     goto retry;
1117                 }
1118             }
1119
1120             if (is->subtitle_st) {
1121                 if (is->subtitle_stream_changed) {
1122                     SDL_LockMutex(is->subpq_mutex);
1123
1124                     while (is->subpq_size) {
1125                         free_subpicture(&is->subpq[is->subpq_rindex]);
1126
1127                         /* update queue size and signal for next picture */
1128                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129                             is->subpq_rindex = 0;
1130
1131                         is->subpq_size--;
1132                     }
1133                     is->subtitle_stream_changed = 0;
1134
1135                     SDL_CondSignal(is->subpq_cond);
1136                     SDL_UnlockMutex(is->subpq_mutex);
1137                 } else {
1138                     if (is->subpq_size > 0) {
1139                         sp = &is->subpq[is->subpq_rindex];
1140
1141                         if (is->subpq_size > 1)
1142                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143                         else
1144                             sp2 = NULL;
1145
1146                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148                         {
1149                             free_subpicture(sp);
1150
1151                             /* update queue size and signal for next picture */
1152                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153                                 is->subpq_rindex = 0;
1154
1155                             SDL_LockMutex(is->subpq_mutex);
1156                             is->subpq_size--;
1157                             SDL_CondSignal(is->subpq_cond);
1158                             SDL_UnlockMutex(is->subpq_mutex);
1159                         }
1160                     }
1161                 }
1162             }
1163
1164             /* display picture */
1165             if (!display_disable)
1166                 video_display(is);
1167
1168             /* update queue size and signal for next picture */
1169             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1170                 is->pictq_rindex = 0;
1171
1172             SDL_LockMutex(is->pictq_mutex);
1173             is->pictq_size--;
1174             SDL_CondSignal(is->pictq_cond);
1175             SDL_UnlockMutex(is->pictq_mutex);
1176         }
1177     } else if (is->audio_st) {
1178         /* draw the next audio frame */
1179
1180         /* if only audio stream, then display the audio bars (better
1181            than nothing, just to test the implementation */
1182
1183         /* display picture */
1184         if (!display_disable)
1185             video_display(is);
1186     }
1187     if (show_status) {
1188         static int64_t last_time;
1189         int64_t cur_time;
1190         int aqsize, vqsize, sqsize;
1191         double av_diff;
1192
1193         cur_time = av_gettime();
1194         if (!last_time || (cur_time - last_time) >= 30000) {
1195             aqsize = 0;
1196             vqsize = 0;
1197             sqsize = 0;
1198             if (is->audio_st)
1199                 aqsize = is->audioq.size;
1200             if (is->video_st)
1201                 vqsize = is->videoq.size;
1202             if (is->subtitle_st)
1203                 sqsize = is->subtitleq.size;
1204             av_diff = 0;
1205             if (is->audio_st && is->video_st)
1206                 av_diff = get_audio_clock(is) - get_video_clock(is);
1207             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1208                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1209                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1210             fflush(stdout);
1211             last_time = cur_time;
1212         }
1213     }
1214 }
1215
1216 static void stream_close(VideoState *is)
1217 {
1218     VideoPicture *vp;
1219     int i;
1220     /* XXX: use a special url_shutdown call to abort parse cleanly */
1221     is->abort_request = 1;
1222     SDL_WaitThread(is->parse_tid, NULL);
1223     SDL_WaitThread(is->refresh_tid, NULL);
1224
1225     /* free all pictures */
1226     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1227         vp = &is->pictq[i];
1228         if (vp->bmp) {
1229             SDL_FreeYUVOverlay(vp->bmp);
1230             vp->bmp = NULL;
1231         }
1232     }
1233     SDL_DestroyMutex(is->pictq_mutex);
1234     SDL_DestroyCond(is->pictq_cond);
1235     SDL_DestroyMutex(is->subpq_mutex);
1236     SDL_DestroyCond(is->subpq_cond);
1237 #if !CONFIG_AVFILTER
1238     if (is->img_convert_ctx)
1239         sws_freeContext(is->img_convert_ctx);
1240 #endif
1241     av_free(is);
1242 }
1243
1244 static void do_exit(void)
1245 {
1246     if (cur_stream) {
1247         stream_close(cur_stream);
1248         cur_stream = NULL;
1249     }
1250     uninit_opts();
1251 #if CONFIG_AVFILTER
1252     avfilter_uninit();
1253 #endif
1254     avformat_network_deinit();
1255     if (show_status)
1256         printf("\n");
1257     SDL_Quit();
1258     av_log(NULL, AV_LOG_QUIET, "");
1259     exit(0);
1260 }
1261
1262 /* allocate a picture (needs to do that in main thread to avoid
1263    potential locking problems */
1264 static void alloc_picture(void *opaque)
1265 {
1266     VideoState *is = opaque;
1267     VideoPicture *vp;
1268
1269     vp = &is->pictq[is->pictq_windex];
1270
1271     if (vp->bmp)
1272         SDL_FreeYUVOverlay(vp->bmp);
1273
1274 #if CONFIG_AVFILTER
1275     vp->width   = is->out_video_filter->inputs[0]->w;
1276     vp->height  = is->out_video_filter->inputs[0]->h;
1277     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1278 #else
1279     vp->width   = is->video_st->codec->width;
1280     vp->height  = is->video_st->codec->height;
1281     vp->pix_fmt = is->video_st->codec->pix_fmt;
1282 #endif
1283
1284     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1285                                    SDL_YV12_OVERLAY,
1286                                    screen);
1287     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1288         /* SDL allocates a buffer smaller than requested if the video
1289          * overlay hardware is unable to support the requested size. */
1290         fprintf(stderr, "Error: the video system does not support an image\n"
1291                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1292                         "to reduce the image size.\n", vp->width, vp->height );
1293         do_exit();
1294     }
1295
1296     SDL_LockMutex(is->pictq_mutex);
1297     vp->allocated = 1;
1298     SDL_CondSignal(is->pictq_cond);
1299     SDL_UnlockMutex(is->pictq_mutex);
1300 }
1301
1302 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1303  * guessed if not known. */
1304 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1305 {
1306     VideoPicture *vp;
1307 #if CONFIG_AVFILTER
1308     AVPicture pict_src;
1309 #else
1310     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1311 #endif
1312     /* wait until we have space to put a new picture */
1313     SDL_LockMutex(is->pictq_mutex);
1314
1315     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1316         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1317
1318     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1319            !is->videoq.abort_request) {
1320         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1321     }
1322     SDL_UnlockMutex(is->pictq_mutex);
1323
1324     if (is->videoq.abort_request)
1325         return -1;
1326
1327     vp = &is->pictq[is->pictq_windex];
1328
1329     /* alloc or resize hardware picture buffer */
1330     if (!vp->bmp || vp->reallocate ||
1331 #if CONFIG_AVFILTER
1332         vp->width  != is->out_video_filter->inputs[0]->w ||
1333         vp->height != is->out_video_filter->inputs[0]->h) {
1334 #else
1335         vp->width != is->video_st->codec->width ||
1336         vp->height != is->video_st->codec->height) {
1337 #endif
1338         SDL_Event event;
1339
1340         vp->allocated  = 0;
1341         vp->reallocate = 0;
1342
1343         /* the allocation must be done in the main thread to avoid
1344            locking problems */
1345         event.type = FF_ALLOC_EVENT;
1346         event.user.data1 = is;
1347         SDL_PushEvent(&event);
1348
1349         /* wait until the picture is allocated */
1350         SDL_LockMutex(is->pictq_mutex);
1351         while (!vp->allocated && !is->videoq.abort_request) {
1352             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1353         }
1354         SDL_UnlockMutex(is->pictq_mutex);
1355
1356         if (is->videoq.abort_request)
1357             return -1;
1358     }
1359
1360     /* if the frame is not skipped, then display it */
1361     if (vp->bmp) {
1362         AVPicture pict = { { 0 } };
1363
1364         /* get a pointer on the bitmap */
1365         SDL_LockYUVOverlay (vp->bmp);
1366
1367         pict.data[0] = vp->bmp->pixels[0];
1368         pict.data[1] = vp->bmp->pixels[2];
1369         pict.data[2] = vp->bmp->pixels[1];
1370
1371         pict.linesize[0] = vp->bmp->pitches[0];
1372         pict.linesize[1] = vp->bmp->pitches[2];
1373         pict.linesize[2] = vp->bmp->pitches[1];
1374
1375 #if CONFIG_AVFILTER
1376         pict_src.data[0] = src_frame->data[0];
1377         pict_src.data[1] = src_frame->data[1];
1378         pict_src.data[2] = src_frame->data[2];
1379
1380         pict_src.linesize[0] = src_frame->linesize[0];
1381         pict_src.linesize[1] = src_frame->linesize[1];
1382         pict_src.linesize[2] = src_frame->linesize[2];
1383
1384         // FIXME use direct rendering
1385         av_picture_copy(&pict, &pict_src,
1386                         vp->pix_fmt, vp->width, vp->height);
1387 #else
1388         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1389         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1390             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1391             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1392         if (is->img_convert_ctx == NULL) {
1393             fprintf(stderr, "Cannot initialize the conversion context\n");
1394             exit(1);
1395         }
1396         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1397                   0, vp->height, pict.data, pict.linesize);
1398 #endif
1399         /* update the bitmap content */
1400         SDL_UnlockYUVOverlay(vp->bmp);
1401
1402         vp->pts = pts;
1403         vp->pos = pos;
1404
1405         /* now we can update the picture count */
1406         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1407             is->pictq_windex = 0;
1408         SDL_LockMutex(is->pictq_mutex);
1409         vp->target_clock = compute_target_time(vp->pts, is);
1410
1411         is->pictq_size++;
1412         SDL_UnlockMutex(is->pictq_mutex);
1413     }
1414     return 0;
1415 }
1416
1417 /* Compute the exact PTS for the picture if it is omitted in the stream.
1418  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1419 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1420 {
1421     double frame_delay, pts;
1422     int ret;
1423
1424     pts = pts1;
1425
1426     if (pts != 0) {
1427         /* update video clock with pts, if present */
1428         is->video_clock = pts;
1429     } else {
1430         pts = is->video_clock;
1431     }
1432     /* update video clock for next frame */
1433     frame_delay = av_q2d(is->video_st->codec->time_base);
1434     /* for MPEG2, the frame can be repeated, so we update the
1435        clock accordingly */
1436     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1437     is->video_clock += frame_delay;
1438
1439     ret = queue_picture(is, src_frame, pts, pos);
1440     av_frame_unref(src_frame);
1441     return ret;
1442 }
1443
1444 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1445 {
1446     int got_picture, i;
1447
1448     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1449         return -1;
1450
1451     if (pkt->data == flush_pkt.data) {
1452         avcodec_flush_buffers(is->video_st->codec);
1453
1454         SDL_LockMutex(is->pictq_mutex);
1455         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1456         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1457             is->pictq[i].target_clock= 0;
1458         }
1459         while (is->pictq_size && !is->videoq.abort_request) {
1460             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1461         }
1462         is->video_current_pos = -1;
1463         SDL_UnlockMutex(is->pictq_mutex);
1464
1465         init_pts_correction(&is->pts_ctx);
1466         is->frame_last_pts = AV_NOPTS_VALUE;
1467         is->frame_last_delay = 0;
1468         is->frame_timer = (double)av_gettime() / 1000000.0;
1469         is->skip_frames = 1;
1470         is->skip_frames_index = 0;
1471         return 0;
1472     }
1473
1474     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1475
1476     if (got_picture) {
1477         if (decoder_reorder_pts == -1) {
1478             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1479         } else if (decoder_reorder_pts) {
1480             *pts = frame->pkt_pts;
1481         } else {
1482             *pts = frame->pkt_dts;
1483         }
1484
1485         if (*pts == AV_NOPTS_VALUE) {
1486             *pts = 0;
1487         }
1488         if (is->video_st->sample_aspect_ratio.num) {
1489             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1490         }
1491
1492         is->skip_frames_index += 1;
1493         if (is->skip_frames_index >= is->skip_frames) {
1494             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1495             return 1;
1496         }
1497         av_frame_unref(frame);
1498     }
1499     return 0;
1500 }
1501
1502 #if CONFIG_AVFILTER
1503 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1504 {
1505     char sws_flags_str[128];
1506     char buffersrc_args[256];
1507     int ret;
1508     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1509     AVCodecContext *codec = is->video_st->codec;
1510
1511     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1512     graph->scale_sws_opts = av_strdup(sws_flags_str);
1513
1514     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1515              codec->width, codec->height, codec->pix_fmt,
1516              is->video_st->time_base.num, is->video_st->time_base.den,
1517              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1518
1519
1520     if ((ret = avfilter_graph_create_filter(&filt_src,
1521                                             avfilter_get_by_name("buffer"),
1522                                             "src", buffersrc_args, NULL,
1523                                             graph)) < 0)
1524         return ret;
1525     if ((ret = avfilter_graph_create_filter(&filt_out,
1526                                             avfilter_get_by_name("buffersink"),
1527                                             "out", NULL, NULL, graph)) < 0)
1528         return ret;
1529
1530     if ((ret = avfilter_graph_create_filter(&filt_format,
1531                                             avfilter_get_by_name("format"),
1532                                             "format", "yuv420p", NULL, graph)) < 0)
1533         return ret;
1534     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1535         return ret;
1536
1537
1538     if (vfilters) {
1539         AVFilterInOut *outputs = avfilter_inout_alloc();
1540         AVFilterInOut *inputs  = avfilter_inout_alloc();
1541
1542         outputs->name    = av_strdup("in");
1543         outputs->filter_ctx = filt_src;
1544         outputs->pad_idx = 0;
1545         outputs->next    = NULL;
1546
1547         inputs->name    = av_strdup("out");
1548         inputs->filter_ctx = filt_format;
1549         inputs->pad_idx = 0;
1550         inputs->next    = NULL;
1551
1552         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1553             return ret;
1554     } else {
1555         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1556             return ret;
1557     }
1558
1559     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1560         return ret;
1561
1562     is->in_video_filter  = filt_src;
1563     is->out_video_filter = filt_out;
1564
1565     return ret;
1566 }
1567
1568 #endif  /* CONFIG_AVFILTER */
1569
1570 static int video_thread(void *arg)
1571 {
1572     AVPacket pkt = { 0 };
1573     VideoState *is = arg;
1574     AVFrame *frame = av_frame_alloc();
1575     int64_t pts_int;
1576     double pts;
1577     int ret;
1578
1579 #if CONFIG_AVFILTER
1580     AVFilterGraph *graph = avfilter_graph_alloc();
1581     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1582     int last_w = is->video_st->codec->width;
1583     int last_h = is->video_st->codec->height;
1584
1585     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1586         goto the_end;
1587     filt_in  = is->in_video_filter;
1588     filt_out = is->out_video_filter;
1589 #endif
1590
1591     for (;;) {
1592 #if CONFIG_AVFILTER
1593         AVRational tb;
1594 #endif
1595         while (is->paused && !is->videoq.abort_request)
1596             SDL_Delay(10);
1597
1598         av_free_packet(&pkt);
1599
1600         ret = get_video_frame(is, frame, &pts_int, &pkt);
1601         if (ret < 0)
1602             goto the_end;
1603
1604         if (!ret)
1605             continue;
1606
1607 #if CONFIG_AVFILTER
1608         if (   last_w != is->video_st->codec->width
1609             || last_h != is->video_st->codec->height) {
1610             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1611                     is->video_st->codec->width, is->video_st->codec->height);
1612             avfilter_graph_free(&graph);
1613             graph = avfilter_graph_alloc();
1614             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1615                 goto the_end;
1616             filt_in  = is->in_video_filter;
1617             filt_out = is->out_video_filter;
1618             last_w = is->video_st->codec->width;
1619             last_h = is->video_st->codec->height;
1620         }
1621
1622         frame->pts = pts_int;
1623         ret = av_buffersrc_add_frame(filt_in, frame);
1624         if (ret < 0)
1625             goto the_end;
1626
1627         while (ret >= 0) {
1628             ret = av_buffersink_get_frame(filt_out, frame);
1629             if (ret < 0) {
1630                 ret = 0;
1631                 break;
1632             }
1633
1634             pts_int = frame->pts;
1635             tb      = filt_out->inputs[0]->time_base;
1636             if (av_cmp_q(tb, is->video_st->time_base)) {
1637                 av_unused int64_t pts1 = pts_int;
1638                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1639                 av_dlog(NULL, "video_thread(): "
1640                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1641                         tb.num, tb.den, pts1,
1642                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1643             }
1644             pts = pts_int * av_q2d(is->video_st->time_base);
1645             ret = output_picture2(is, frame, pts, 0);
1646         }
1647 #else
1648         pts = pts_int * av_q2d(is->video_st->time_base);
1649         ret = output_picture2(is, frame, pts,  pkt.pos);
1650 #endif
1651
1652         if (ret < 0)
1653             goto the_end;
1654
1655
1656         if (step)
1657             if (cur_stream)
1658                 stream_pause(cur_stream);
1659     }
1660  the_end:
1661 #if CONFIG_AVFILTER
1662     av_freep(&vfilters);
1663     avfilter_graph_free(&graph);
1664 #endif
1665     av_free_packet(&pkt);
1666     av_frame_free(&frame);
1667     return 0;
1668 }
1669
1670 static int subtitle_thread(void *arg)
1671 {
1672     VideoState *is = arg;
1673     SubPicture *sp;
1674     AVPacket pkt1, *pkt = &pkt1;
1675     int got_subtitle;
1676     double pts;
1677     int i, j;
1678     int r, g, b, y, u, v, a;
1679
1680     for (;;) {
1681         while (is->paused && !is->subtitleq.abort_request) {
1682             SDL_Delay(10);
1683         }
1684         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1685             break;
1686
1687         if (pkt->data == flush_pkt.data) {
1688             avcodec_flush_buffers(is->subtitle_st->codec);
1689             continue;
1690         }
1691         SDL_LockMutex(is->subpq_mutex);
1692         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1693                !is->subtitleq.abort_request) {
1694             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1695         }
1696         SDL_UnlockMutex(is->subpq_mutex);
1697
1698         if (is->subtitleq.abort_request)
1699             return 0;
1700
1701         sp = &is->subpq[is->subpq_windex];
1702
1703        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1704            this packet, if any */
1705         pts = 0;
1706         if (pkt->pts != AV_NOPTS_VALUE)
1707             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1708
1709         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1710                                  &got_subtitle, pkt);
1711
1712         if (got_subtitle && sp->sub.format == 0) {
1713             sp->pts = pts;
1714
1715             for (i = 0; i < sp->sub.num_rects; i++)
1716             {
1717                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1718                 {
1719                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1720                     y = RGB_TO_Y_CCIR(r, g, b);
1721                     u = RGB_TO_U_CCIR(r, g, b, 0);
1722                     v = RGB_TO_V_CCIR(r, g, b, 0);
1723                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1724                 }
1725             }
1726
1727             /* now we can update the picture count */
1728             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1729                 is->subpq_windex = 0;
1730             SDL_LockMutex(is->subpq_mutex);
1731             is->subpq_size++;
1732             SDL_UnlockMutex(is->subpq_mutex);
1733         }
1734         av_free_packet(pkt);
1735     }
1736     return 0;
1737 }
1738
1739 /* copy samples for viewing in editor window */
1740 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1741 {
1742     int size, len;
1743
1744     size = samples_size / sizeof(short);
1745     while (size > 0) {
1746         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1747         if (len > size)
1748             len = size;
1749         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1750         samples += len;
1751         is->sample_array_index += len;
1752         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1753             is->sample_array_index = 0;
1754         size -= len;
1755     }
1756 }
1757
1758 /* return the new audio buffer size (samples can be added or deleted
1759    to get better sync if video or external master clock) */
1760 static int synchronize_audio(VideoState *is, short *samples,
1761                              int samples_size1, double pts)
1762 {
1763     int n, samples_size;
1764     double ref_clock;
1765
1766     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1767     samples_size = samples_size1;
1768
1769     /* if not master, then we try to remove or add samples to correct the clock */
1770     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1771          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1772         double diff, avg_diff;
1773         int wanted_size, min_size, max_size, nb_samples;
1774
1775         ref_clock = get_master_clock(is);
1776         diff = get_audio_clock(is) - ref_clock;
1777
1778         if (diff < AV_NOSYNC_THRESHOLD) {
1779             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1780             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1781                 /* not enough measures to have a correct estimate */
1782                 is->audio_diff_avg_count++;
1783             } else {
1784                 /* estimate the A-V difference */
1785                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1786
1787                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1788                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1789                     nb_samples = samples_size / n;
1790
1791                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1792                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1793                     if (wanted_size < min_size)
1794                         wanted_size = min_size;
1795                     else if (wanted_size > max_size)
1796                         wanted_size = max_size;
1797
1798                     /* add or remove samples to correction the synchro */
1799                     if (wanted_size < samples_size) {
1800                         /* remove samples */
1801                         samples_size = wanted_size;
1802                     } else if (wanted_size > samples_size) {
1803                         uint8_t *samples_end, *q;
1804                         int nb;
1805
1806                         /* add samples */
1807                         nb = (samples_size - wanted_size);
1808                         samples_end = (uint8_t *)samples + samples_size - n;
1809                         q = samples_end + n;
1810                         while (nb > 0) {
1811                             memcpy(q, samples_end, n);
1812                             q += n;
1813                             nb -= n;
1814                         }
1815                         samples_size = wanted_size;
1816                     }
1817                 }
1818                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1819                         diff, avg_diff, samples_size - samples_size1,
1820                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1821             }
1822         } else {
1823             /* too big difference : may be initial PTS errors, so
1824                reset A-V filter */
1825             is->audio_diff_avg_count = 0;
1826             is->audio_diff_cum       = 0;
1827         }
1828     }
1829
1830     return samples_size;
1831 }
1832
1833 /* decode one audio frame and returns its uncompressed size */
1834 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1835 {
1836     AVPacket *pkt_temp = &is->audio_pkt_temp;
1837     AVPacket *pkt = &is->audio_pkt;
1838     AVCodecContext *dec = is->audio_st->codec;
1839     int n, len1, data_size, got_frame;
1840     double pts;
1841     int new_packet = 0;
1842     int flush_complete = 0;
1843
1844     for (;;) {
1845         /* NOTE: the audio packet can contain several frames */
1846         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1847             int resample_changed, audio_resample;
1848
1849             if (!is->frame) {
1850                 if (!(is->frame = avcodec_alloc_frame()))
1851                     return AVERROR(ENOMEM);
1852             } else
1853                 avcodec_get_frame_defaults(is->frame);
1854
1855             if (flush_complete)
1856                 break;
1857             new_packet = 0;
1858             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1859             if (len1 < 0) {
1860                 /* if error, we skip the frame */
1861                 pkt_temp->size = 0;
1862                 break;
1863             }
1864
1865             pkt_temp->data += len1;
1866             pkt_temp->size -= len1;
1867
1868             if (!got_frame) {
1869                 /* stop sending empty packets if the decoder is finished */
1870                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1871                     flush_complete = 1;
1872                 continue;
1873             }
1874             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1875                                                    is->frame->nb_samples,
1876                                                    is->frame->format, 1);
1877
1878             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1879                              is->frame->channel_layout != is->sdl_channel_layout ||
1880                              is->frame->sample_rate    != is->sdl_sample_rate;
1881
1882             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1883                                is->frame->channel_layout != is->resample_channel_layout ||
1884                                is->frame->sample_rate    != is->resample_sample_rate;
1885
1886             if ((!is->avr && audio_resample) || resample_changed) {
1887                 int ret;
1888                 if (is->avr)
1889                     avresample_close(is->avr);
1890                 else if (audio_resample) {
1891                     is->avr = avresample_alloc_context();
1892                     if (!is->avr) {
1893                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1894                         break;
1895                     }
1896                 }
1897                 if (audio_resample) {
1898                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1899                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1900                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1901                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1902                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1903                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1904
1905                     if ((ret = avresample_open(is->avr)) < 0) {
1906                         fprintf(stderr, "error initializing libavresample\n");
1907                         break;
1908                     }
1909                 }
1910                 is->resample_sample_fmt     = is->frame->format;
1911                 is->resample_channel_layout = is->frame->channel_layout;
1912                 is->resample_sample_rate    = is->frame->sample_rate;
1913             }
1914
1915             if (audio_resample) {
1916                 void *tmp_out;
1917                 int out_samples, out_size, out_linesize;
1918                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1919                 int nb_samples = is->frame->nb_samples;
1920
1921                 out_size = av_samples_get_buffer_size(&out_linesize,
1922                                                       is->sdl_channels,
1923                                                       nb_samples,
1924                                                       is->sdl_sample_fmt, 0);
1925                 tmp_out = av_realloc(is->audio_buf1, out_size);
1926                 if (!tmp_out)
1927                     return AVERROR(ENOMEM);
1928                 is->audio_buf1 = tmp_out;
1929
1930                 out_samples = avresample_convert(is->avr,
1931                                                  &is->audio_buf1,
1932                                                  out_linesize, nb_samples,
1933                                                  is->frame->data,
1934                                                  is->frame->linesize[0],
1935                                                  is->frame->nb_samples);
1936                 if (out_samples < 0) {
1937                     fprintf(stderr, "avresample_convert() failed\n");
1938                     break;
1939                 }
1940                 is->audio_buf = is->audio_buf1;
1941                 data_size = out_samples * osize * is->sdl_channels;
1942             } else {
1943                 is->audio_buf = is->frame->data[0];
1944             }
1945
1946             /* if no pts, then compute it */
1947             pts = is->audio_clock;
1948             *pts_ptr = pts;
1949             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1950             is->audio_clock += (double)data_size /
1951                 (double)(n * is->sdl_sample_rate);
1952 #ifdef DEBUG
1953             {
1954                 static double last_clock;
1955                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1956                        is->audio_clock - last_clock,
1957                        is->audio_clock, pts);
1958                 last_clock = is->audio_clock;
1959             }
1960 #endif
1961             return data_size;
1962         }
1963
1964         /* free the current packet */
1965         if (pkt->data)
1966             av_free_packet(pkt);
1967         memset(pkt_temp, 0, sizeof(*pkt_temp));
1968
1969         if (is->paused || is->audioq.abort_request) {
1970             return -1;
1971         }
1972
1973         /* read next packet */
1974         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1975             return -1;
1976
1977         if (pkt->data == flush_pkt.data) {
1978             avcodec_flush_buffers(dec);
1979             flush_complete = 0;
1980         }
1981
1982         *pkt_temp = *pkt;
1983
1984         /* if update the audio clock with the pts */
1985         if (pkt->pts != AV_NOPTS_VALUE) {
1986             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1987         }
1988     }
1989 }
1990
1991 /* prepare a new audio buffer */
1992 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1993 {
1994     VideoState *is = opaque;
1995     int audio_size, len1;
1996     double pts;
1997
1998     audio_callback_time = av_gettime();
1999
2000     while (len > 0) {
2001         if (is->audio_buf_index >= is->audio_buf_size) {
2002            audio_size = audio_decode_frame(is, &pts);
2003            if (audio_size < 0) {
2004                 /* if error, just output silence */
2005                is->audio_buf      = is->silence_buf;
2006                is->audio_buf_size = sizeof(is->silence_buf);
2007            } else {
2008                if (is->show_audio)
2009                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2010                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2011                                               pts);
2012                is->audio_buf_size = audio_size;
2013            }
2014            is->audio_buf_index = 0;
2015         }
2016         len1 = is->audio_buf_size - is->audio_buf_index;
2017         if (len1 > len)
2018             len1 = len;
2019         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2020         len -= len1;
2021         stream += len1;
2022         is->audio_buf_index += len1;
2023     }
2024 }
2025
2026 /* open a given stream. Return 0 if OK */
2027 static int stream_component_open(VideoState *is, int stream_index)
2028 {
2029     AVFormatContext *ic = is->ic;
2030     AVCodecContext *avctx;
2031     AVCodec *codec;
2032     SDL_AudioSpec wanted_spec, spec;
2033     AVDictionary *opts;
2034     AVDictionaryEntry *t = NULL;
2035
2036     if (stream_index < 0 || stream_index >= ic->nb_streams)
2037         return -1;
2038     avctx = ic->streams[stream_index]->codec;
2039
2040     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2041
2042     codec = avcodec_find_decoder(avctx->codec_id);
2043     avctx->debug_mv          = debug_mv;
2044     avctx->debug             = debug;
2045     avctx->workaround_bugs   = workaround_bugs;
2046     avctx->idct_algo         = idct;
2047     avctx->skip_frame        = skip_frame;
2048     avctx->skip_idct         = skip_idct;
2049     avctx->skip_loop_filter  = skip_loop_filter;
2050     avctx->error_concealment = error_concealment;
2051
2052     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2053
2054     if (!av_dict_get(opts, "threads", NULL, 0))
2055         av_dict_set(&opts, "threads", "auto", 0);
2056     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2057         av_dict_set(&opts, "refcounted_frames", "1", 0);
2058     if (!codec ||
2059         avcodec_open2(avctx, codec, &opts) < 0)
2060         return -1;
2061     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2062         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2063         return AVERROR_OPTION_NOT_FOUND;
2064     }
2065
2066     /* prepare audio output */
2067     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2068         is->sdl_sample_rate = avctx->sample_rate;
2069
2070         if (!avctx->channel_layout)
2071             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2072         if (!avctx->channel_layout) {
2073             fprintf(stderr, "unable to guess channel layout\n");
2074             return -1;
2075         }
2076         if (avctx->channels == 1)
2077             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2078         else
2079             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2080         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2081
2082         wanted_spec.format = AUDIO_S16SYS;
2083         wanted_spec.freq = is->sdl_sample_rate;
2084         wanted_spec.channels = is->sdl_channels;
2085         wanted_spec.silence = 0;
2086         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2087         wanted_spec.callback = sdl_audio_callback;
2088         wanted_spec.userdata = is;
2089         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2090             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2091             return -1;
2092         }
2093         is->audio_hw_buf_size = spec.size;
2094         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2095         is->resample_sample_fmt     = is->sdl_sample_fmt;
2096         is->resample_channel_layout = avctx->channel_layout;
2097         is->resample_sample_rate    = avctx->sample_rate;
2098     }
2099
2100     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2101     switch (avctx->codec_type) {
2102     case AVMEDIA_TYPE_AUDIO:
2103         is->audio_stream = stream_index;
2104         is->audio_st = ic->streams[stream_index];
2105         is->audio_buf_size  = 0;
2106         is->audio_buf_index = 0;
2107
2108         /* init averaging filter */
2109         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2110         is->audio_diff_avg_count = 0;
2111         /* since we do not have a precise anough audio fifo fullness,
2112            we correct audio sync only if larger than this threshold */
2113         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2114
2115         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2116         packet_queue_init(&is->audioq);
2117         SDL_PauseAudio(0);
2118         break;
2119     case AVMEDIA_TYPE_VIDEO:
2120         is->video_stream = stream_index;
2121         is->video_st = ic->streams[stream_index];
2122
2123         packet_queue_init(&is->videoq);
2124         is->video_tid = SDL_CreateThread(video_thread, is);
2125         break;
2126     case AVMEDIA_TYPE_SUBTITLE:
2127         is->subtitle_stream = stream_index;
2128         is->subtitle_st = ic->streams[stream_index];
2129         packet_queue_init(&is->subtitleq);
2130
2131         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2132         break;
2133     default:
2134         break;
2135     }
2136     return 0;
2137 }
2138
2139 static void stream_component_close(VideoState *is, int stream_index)
2140 {
2141     AVFormatContext *ic = is->ic;
2142     AVCodecContext *avctx;
2143
2144     if (stream_index < 0 || stream_index >= ic->nb_streams)
2145         return;
2146     avctx = ic->streams[stream_index]->codec;
2147
2148     switch (avctx->codec_type) {
2149     case AVMEDIA_TYPE_AUDIO:
2150         packet_queue_abort(&is->audioq);
2151
2152         SDL_CloseAudio();
2153
2154         packet_queue_end(&is->audioq);
2155         av_free_packet(&is->audio_pkt);
2156         if (is->avr)
2157             avresample_free(&is->avr);
2158         av_freep(&is->audio_buf1);
2159         is->audio_buf = NULL;
2160         avcodec_free_frame(&is->frame);
2161
2162         if (is->rdft) {
2163             av_rdft_end(is->rdft);
2164             av_freep(&is->rdft_data);
2165             is->rdft = NULL;
2166             is->rdft_bits = 0;
2167         }
2168         break;
2169     case AVMEDIA_TYPE_VIDEO:
2170         packet_queue_abort(&is->videoq);
2171
2172         /* note: we also signal this mutex to make sure we deblock the
2173            video thread in all cases */
2174         SDL_LockMutex(is->pictq_mutex);
2175         SDL_CondSignal(is->pictq_cond);
2176         SDL_UnlockMutex(is->pictq_mutex);
2177
2178         SDL_WaitThread(is->video_tid, NULL);
2179
2180         packet_queue_end(&is->videoq);
2181         break;
2182     case AVMEDIA_TYPE_SUBTITLE:
2183         packet_queue_abort(&is->subtitleq);
2184
2185         /* note: we also signal this mutex to make sure we deblock the
2186            video thread in all cases */
2187         SDL_LockMutex(is->subpq_mutex);
2188         is->subtitle_stream_changed = 1;
2189
2190         SDL_CondSignal(is->subpq_cond);
2191         SDL_UnlockMutex(is->subpq_mutex);
2192
2193         SDL_WaitThread(is->subtitle_tid, NULL);
2194
2195         packet_queue_end(&is->subtitleq);
2196         break;
2197     default:
2198         break;
2199     }
2200
2201     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2202     avcodec_close(avctx);
2203     switch (avctx->codec_type) {
2204     case AVMEDIA_TYPE_AUDIO:
2205         is->audio_st = NULL;
2206         is->audio_stream = -1;
2207         break;
2208     case AVMEDIA_TYPE_VIDEO:
2209         is->video_st = NULL;
2210         is->video_stream = -1;
2211         break;
2212     case AVMEDIA_TYPE_SUBTITLE:
2213         is->subtitle_st = NULL;
2214         is->subtitle_stream = -1;
2215         break;
2216     default:
2217         break;
2218     }
2219 }
2220
2221 /* since we have only one decoding thread, we can use a global
2222    variable instead of a thread local variable */
2223 static VideoState *global_video_state;
2224
2225 static int decode_interrupt_cb(void *ctx)
2226 {
2227     return global_video_state && global_video_state->abort_request;
2228 }
2229
2230 /* this thread gets the stream from the disk or the network */
2231 static int decode_thread(void *arg)
2232 {
2233     VideoState *is = arg;
2234     AVFormatContext *ic = NULL;
2235     int err, i, ret;
2236     int st_index[AVMEDIA_TYPE_NB];
2237     AVPacket pkt1, *pkt = &pkt1;
2238     int eof = 0;
2239     int pkt_in_play_range = 0;
2240     AVDictionaryEntry *t;
2241     AVDictionary **opts;
2242     int orig_nb_streams;
2243
2244     memset(st_index, -1, sizeof(st_index));
2245     is->video_stream = -1;
2246     is->audio_stream = -1;
2247     is->subtitle_stream = -1;
2248
2249     global_video_state = is;
2250
2251     ic = avformat_alloc_context();
2252     ic->interrupt_callback.callback = decode_interrupt_cb;
2253     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2254     if (err < 0) {
2255         print_error(is->filename, err);
2256         ret = -1;
2257         goto fail;
2258     }
2259     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2260         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2261         ret = AVERROR_OPTION_NOT_FOUND;
2262         goto fail;
2263     }
2264     is->ic = ic;
2265
2266     if (genpts)
2267         ic->flags |= AVFMT_FLAG_GENPTS;
2268
2269     opts = setup_find_stream_info_opts(ic, codec_opts);
2270     orig_nb_streams = ic->nb_streams;
2271
2272     err = avformat_find_stream_info(ic, opts);
2273     if (err < 0) {
2274         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2275         ret = -1;
2276         goto fail;
2277     }
2278     for (i = 0; i < orig_nb_streams; i++)
2279         av_dict_free(&opts[i]);
2280     av_freep(&opts);
2281
2282     if (ic->pb)
2283         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2284
2285     if (seek_by_bytes < 0)
2286         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2287
2288     /* if seeking requested, we execute it */
2289     if (start_time != AV_NOPTS_VALUE) {
2290         int64_t timestamp;
2291
2292         timestamp = start_time;
2293         /* add the stream start time */
2294         if (ic->start_time != AV_NOPTS_VALUE)
2295             timestamp += ic->start_time;
2296         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2297         if (ret < 0) {
2298             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2299                     is->filename, (double)timestamp / AV_TIME_BASE);
2300         }
2301     }
2302
2303     for (i = 0; i < ic->nb_streams; i++)
2304         ic->streams[i]->discard = AVDISCARD_ALL;
2305     if (!video_disable)
2306         st_index[AVMEDIA_TYPE_VIDEO] =
2307             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2308                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2309     if (!audio_disable)
2310         st_index[AVMEDIA_TYPE_AUDIO] =
2311             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2312                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2313                                 st_index[AVMEDIA_TYPE_VIDEO],
2314                                 NULL, 0);
2315     if (!video_disable)
2316         st_index[AVMEDIA_TYPE_SUBTITLE] =
2317             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2318                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2319                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2320                                  st_index[AVMEDIA_TYPE_AUDIO] :
2321                                  st_index[AVMEDIA_TYPE_VIDEO]),
2322                                 NULL, 0);
2323     if (show_status) {
2324         av_dump_format(ic, 0, is->filename, 0);
2325     }
2326
2327     /* open the streams */
2328     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2329         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2330     }
2331
2332     ret = -1;
2333     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2334         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2335     }
2336     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2337     if (ret < 0) {
2338         if (!display_disable)
2339             is->show_audio = 2;
2340     }
2341
2342     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2343         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2344     }
2345
2346     if (is->video_stream < 0 && is->audio_stream < 0) {
2347         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2348         ret = -1;
2349         goto fail;
2350     }
2351
2352     for (;;) {
2353         if (is->abort_request)
2354             break;
2355         if (is->paused != is->last_paused) {
2356             is->last_paused = is->paused;
2357             if (is->paused)
2358                 is->read_pause_return = av_read_pause(ic);
2359             else
2360                 av_read_play(ic);
2361         }
2362 #if CONFIG_RTSP_DEMUXER
2363         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2364             /* wait 10 ms to avoid trying to get another packet */
2365             /* XXX: horrible */
2366             SDL_Delay(10);
2367             continue;
2368         }
2369 #endif
2370         if (is->seek_req) {
2371             int64_t seek_target = is->seek_pos;
2372             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2373             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2374 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2375 //      of the seek_pos/seek_rel variables
2376
2377             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2378             if (ret < 0) {
2379                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2380             } else {
2381                 if (is->audio_stream >= 0) {
2382                     packet_queue_flush(&is->audioq);
2383                     packet_queue_put(&is->audioq, &flush_pkt);
2384                 }
2385                 if (is->subtitle_stream >= 0) {
2386                     packet_queue_flush(&is->subtitleq);
2387                     packet_queue_put(&is->subtitleq, &flush_pkt);
2388                 }
2389                 if (is->video_stream >= 0) {
2390                     packet_queue_flush(&is->videoq);
2391                     packet_queue_put(&is->videoq, &flush_pkt);
2392                 }
2393             }
2394             is->seek_req = 0;
2395             eof = 0;
2396         }
2397
2398         /* if the queue are full, no need to read more */
2399         if (!infinite_buffer &&
2400               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2401             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2402                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2403                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2404             /* wait 10 ms */
2405             SDL_Delay(10);
2406             continue;
2407         }
2408         if (eof) {
2409             if (is->video_stream >= 0) {
2410                 av_init_packet(pkt);
2411                 pkt->data = NULL;
2412                 pkt->size = 0;
2413                 pkt->stream_index = is->video_stream;
2414                 packet_queue_put(&is->videoq, pkt);
2415             }
2416             if (is->audio_stream >= 0 &&
2417                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2418                 av_init_packet(pkt);
2419                 pkt->data = NULL;
2420                 pkt->size = 0;
2421                 pkt->stream_index = is->audio_stream;
2422                 packet_queue_put(&is->audioq, pkt);
2423             }
2424             SDL_Delay(10);
2425             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2426                 if (loop != 1 && (!loop || --loop)) {
2427                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2428                 } else if (autoexit) {
2429                     ret = AVERROR_EOF;
2430                     goto fail;
2431                 }
2432             }
2433             continue;
2434         }
2435         ret = av_read_frame(ic, pkt);
2436         if (ret < 0) {
2437             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2438                 eof = 1;
2439             if (ic->pb && ic->pb->error)
2440                 break;
2441             SDL_Delay(100); /* wait for user event */
2442             continue;
2443         }
2444         /* check if packet is in play range specified by user, then queue, otherwise discard */
2445         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2446                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2447                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2448                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2449                 <= ((double)duration / 1000000);
2450         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2451             packet_queue_put(&is->audioq, pkt);
2452         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2453             packet_queue_put(&is->videoq, pkt);
2454         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2455             packet_queue_put(&is->subtitleq, pkt);
2456         } else {
2457             av_free_packet(pkt);
2458         }
2459     }
2460     /* wait until the end */
2461     while (!is->abort_request) {
2462         SDL_Delay(100);
2463     }
2464
2465     ret = 0;
2466  fail:
2467     /* disable interrupting */
2468     global_video_state = NULL;
2469
2470     /* close each stream */
2471     if (is->audio_stream >= 0)
2472         stream_component_close(is, is->audio_stream);
2473     if (is->video_stream >= 0)
2474         stream_component_close(is, is->video_stream);
2475     if (is->subtitle_stream >= 0)
2476         stream_component_close(is, is->subtitle_stream);
2477     if (is->ic) {
2478         avformat_close_input(&is->ic);
2479     }
2480
2481     if (ret != 0) {
2482         SDL_Event event;
2483
2484         event.type = FF_QUIT_EVENT;
2485         event.user.data1 = is;
2486         SDL_PushEvent(&event);
2487     }
2488     return 0;
2489 }
2490
2491 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2492 {
2493     VideoState *is;
2494
2495     is = av_mallocz(sizeof(VideoState));
2496     if (!is)
2497         return NULL;
2498     av_strlcpy(is->filename, filename, sizeof(is->filename));
2499     is->iformat = iformat;
2500     is->ytop    = 0;
2501     is->xleft   = 0;
2502
2503     /* start video display */
2504     is->pictq_mutex = SDL_CreateMutex();
2505     is->pictq_cond  = SDL_CreateCond();
2506
2507     is->subpq_mutex = SDL_CreateMutex();
2508     is->subpq_cond  = SDL_CreateCond();
2509
2510     is->av_sync_type = av_sync_type;
2511     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2512     if (!is->parse_tid) {
2513         av_free(is);
2514         return NULL;
2515     }
2516     return is;
2517 }
2518
2519 static void stream_cycle_channel(VideoState *is, int codec_type)
2520 {
2521     AVFormatContext *ic = is->ic;
2522     int start_index, stream_index;
2523     AVStream *st;
2524
2525     if (codec_type == AVMEDIA_TYPE_VIDEO)
2526         start_index = is->video_stream;
2527     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2528         start_index = is->audio_stream;
2529     else
2530         start_index = is->subtitle_stream;
2531     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2532         return;
2533     stream_index = start_index;
2534     for (;;) {
2535         if (++stream_index >= is->ic->nb_streams)
2536         {
2537             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2538             {
2539                 stream_index = -1;
2540                 goto the_end;
2541             } else
2542                 stream_index = 0;
2543         }
2544         if (stream_index == start_index)
2545             return;
2546         st = ic->streams[stream_index];
2547         if (st->codec->codec_type == codec_type) {
2548             /* check that parameters are OK */
2549             switch (codec_type) {
2550             case AVMEDIA_TYPE_AUDIO:
2551                 if (st->codec->sample_rate != 0 &&
2552                     st->codec->channels != 0)
2553                     goto the_end;
2554                 break;
2555             case AVMEDIA_TYPE_VIDEO:
2556             case AVMEDIA_TYPE_SUBTITLE:
2557                 goto the_end;
2558             default:
2559                 break;
2560             }
2561         }
2562     }
2563  the_end:
2564     stream_component_close(is, start_index);
2565     stream_component_open(is, stream_index);
2566 }
2567
2568
2569 static void toggle_full_screen(void)
2570 {
2571 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2572     /* OS X needs to empty the picture_queue */
2573     int i;
2574     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2575         cur_stream->pictq[i].reallocate = 1;
2576 #endif
2577     is_full_screen = !is_full_screen;
2578     video_open(cur_stream);
2579 }
2580
2581 static void toggle_pause(void)
2582 {
2583     if (cur_stream)
2584         stream_pause(cur_stream);
2585     step = 0;
2586 }
2587
2588 static void step_to_next_frame(void)
2589 {
2590     if (cur_stream) {
2591         /* if the stream is paused unpause it, then step */
2592         if (cur_stream->paused)
2593             stream_pause(cur_stream);
2594     }
2595     step = 1;
2596 }
2597
2598 static void toggle_audio_display(void)
2599 {
2600     if (cur_stream) {
2601         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2602         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2603         fill_rectangle(screen,
2604                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2605                        bgcolor);
2606         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2607     }
2608 }
2609
2610 /* handle an event sent by the GUI */
2611 static void event_loop(void)
2612 {
2613     SDL_Event event;
2614     double incr, pos, frac;
2615
2616     for (;;) {
2617         double x;
2618         SDL_WaitEvent(&event);
2619         switch (event.type) {
2620         case SDL_KEYDOWN:
2621             if (exit_on_keydown) {
2622                 do_exit();
2623                 break;
2624             }
2625             switch (event.key.keysym.sym) {
2626             case SDLK_ESCAPE:
2627             case SDLK_q:
2628                 do_exit();
2629                 break;
2630             case SDLK_f:
2631                 toggle_full_screen();
2632                 break;
2633             case SDLK_p:
2634             case SDLK_SPACE:
2635                 toggle_pause();
2636                 break;
2637             case SDLK_s: // S: Step to next frame
2638                 step_to_next_frame();
2639                 break;
2640             case SDLK_a:
2641                 if (cur_stream)
2642                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2643                 break;
2644             case SDLK_v:
2645                 if (cur_stream)
2646                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2647                 break;
2648             case SDLK_t:
2649                 if (cur_stream)
2650                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2651                 break;
2652             case SDLK_w:
2653                 toggle_audio_display();
2654                 break;
2655             case SDLK_LEFT:
2656                 incr = -10.0;
2657                 goto do_seek;
2658             case SDLK_RIGHT:
2659                 incr = 10.0;
2660                 goto do_seek;
2661             case SDLK_UP:
2662                 incr = 60.0;
2663                 goto do_seek;
2664             case SDLK_DOWN:
2665                 incr = -60.0;
2666             do_seek:
2667                 if (cur_stream) {
2668                     if (seek_by_bytes) {
2669                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2670                             pos = cur_stream->video_current_pos;
2671                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2672                             pos = cur_stream->audio_pkt.pos;
2673                         } else
2674                             pos = avio_tell(cur_stream->ic->pb);
2675                         if (cur_stream->ic->bit_rate)
2676                             incr *= cur_stream->ic->bit_rate / 8.0;
2677                         else
2678                             incr *= 180000.0;
2679                         pos += incr;
2680                         stream_seek(cur_stream, pos, incr, 1);
2681                     } else {
2682                         pos = get_master_clock(cur_stream);
2683                         pos += incr;
2684                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2685                     }
2686                 }
2687                 break;
2688             default:
2689                 break;
2690             }
2691             break;
2692         case SDL_MOUSEBUTTONDOWN:
2693             if (exit_on_mousedown) {
2694                 do_exit();
2695                 break;
2696             }
2697         case SDL_MOUSEMOTION:
2698             if (event.type == SDL_MOUSEBUTTONDOWN) {
2699                 x = event.button.x;
2700             } else {
2701                 if (event.motion.state != SDL_PRESSED)
2702                     break;
2703                 x = event.motion.x;
2704             }
2705             if (cur_stream) {
2706                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2707                     uint64_t size =  avio_size(cur_stream->ic->pb);
2708                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2709                 } else {
2710                     int64_t ts;
2711                     int ns, hh, mm, ss;
2712                     int tns, thh, tmm, tss;
2713                     tns  = cur_stream->ic->duration / 1000000LL;
2714                     thh  = tns / 3600;
2715                     tmm  = (tns % 3600) / 60;
2716                     tss  = (tns % 60);
2717                     frac = x / cur_stream->width;
2718                     ns   = frac * tns;
2719                     hh   = ns / 3600;
2720                     mm   = (ns % 3600) / 60;
2721                     ss   = (ns % 60);
2722                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2723                             hh, mm, ss, thh, tmm, tss);
2724                     ts = frac * cur_stream->ic->duration;
2725                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2726                         ts += cur_stream->ic->start_time;
2727                     stream_seek(cur_stream, ts, 0, 0);
2728                 }
2729             }
2730             break;
2731         case SDL_VIDEORESIZE:
2732             if (cur_stream) {
2733                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2734                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2735                 screen_width  = cur_stream->width  = event.resize.w;
2736                 screen_height = cur_stream->height = event.resize.h;
2737             }
2738             break;
2739         case SDL_QUIT:
2740         case FF_QUIT_EVENT:
2741             do_exit();
2742             break;
2743         case FF_ALLOC_EVENT:
2744             video_open(event.user.data1);
2745             alloc_picture(event.user.data1);
2746             break;
2747         case FF_REFRESH_EVENT:
2748             video_refresh_timer(event.user.data1);
2749             cur_stream->refresh = 0;
2750             break;
2751         default:
2752             break;
2753         }
2754     }
2755 }
2756
2757 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2758 {
2759     av_log(NULL, AV_LOG_ERROR,
2760            "Option '%s' has been removed, use private format options instead\n", opt);
2761     return AVERROR(EINVAL);
2762 }
2763
2764 static int opt_width(void *optctx, const char *opt, const char *arg)
2765 {
2766     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2767     return 0;
2768 }
2769
2770 static int opt_height(void *optctx, const char *opt, const char *arg)
2771 {
2772     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2773     return 0;
2774 }
2775
2776 static int opt_format(void *optctx, const char *opt, const char *arg)
2777 {
2778     file_iformat = av_find_input_format(arg);
2779     if (!file_iformat) {
2780         fprintf(stderr, "Unknown input format: %s\n", arg);
2781         return AVERROR(EINVAL);
2782     }
2783     return 0;
2784 }
2785
2786 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2787 {
2788     av_log(NULL, AV_LOG_ERROR,
2789            "Option '%s' has been removed, use private format options instead\n", opt);
2790     return AVERROR(EINVAL);
2791 }
2792
2793 static int opt_sync(void *optctx, const char *opt, const char *arg)
2794 {
2795     if (!strcmp(arg, "audio"))
2796         av_sync_type = AV_SYNC_AUDIO_MASTER;
2797     else if (!strcmp(arg, "video"))
2798         av_sync_type = AV_SYNC_VIDEO_MASTER;
2799     else if (!strcmp(arg, "ext"))
2800         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2801     else {
2802         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2803         exit(1);
2804     }
2805     return 0;
2806 }
2807
2808 static int opt_seek(void *optctx, const char *opt, const char *arg)
2809 {
2810     start_time = parse_time_or_die(opt, arg, 1);
2811     return 0;
2812 }
2813
2814 static int opt_duration(void *optctx, const char *opt, const char *arg)
2815 {
2816     duration = parse_time_or_die(opt, arg, 1);
2817     return 0;
2818 }
2819
2820 static int opt_debug(void *optctx, const char *opt, const char *arg)
2821 {
2822     av_log_set_level(99);
2823     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2824     return 0;
2825 }
2826
2827 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2828 {
2829     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2830     return 0;
2831 }
2832
2833 static const OptionDef options[] = {
2834 #include "cmdutils_common_opts.h"
2835     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2836     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2837     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2838     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2839     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2840     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2841     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2842     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2843     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2844     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2845     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2846     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2847     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2848     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2849     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2850     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2851     { "debug", HAS_ARG | OPT_EXPERT, { .func_arg = opt_debug }, "print specific debug info", "" },
2852     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2853     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2854     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2855     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2856     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2857     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2858     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2859     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2860     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2861     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2862     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2863     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2864     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2865     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2866     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2867     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2868     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2869     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2870 #if CONFIG_AVFILTER
2871     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2872 #endif
2873     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2874     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2875     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2876     { NULL, },
2877 };
2878
2879 static void show_usage(void)
2880 {
2881     printf("Simple media player\n");
2882     printf("usage: %s [options] input_file\n", program_name);
2883     printf("\n");
2884 }
2885
2886 void show_help_default(const char *opt, const char *arg)
2887 {
2888     av_log_set_callback(log_callback_help);
2889     show_usage();
2890     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2891     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2892     printf("\n");
2893     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2894     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2895 #if !CONFIG_AVFILTER
2896     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2897 #endif
2898     printf("\nWhile playing:\n"
2899            "q, ESC              quit\n"
2900            "f                   toggle full screen\n"
2901            "p, SPC              pause\n"
2902            "a                   cycle audio channel\n"
2903            "v                   cycle video channel\n"
2904            "t                   cycle subtitle channel\n"
2905            "w                   show audio waves\n"
2906            "s                   activate frame-step mode\n"
2907            "left/right          seek backward/forward 10 seconds\n"
2908            "down/up             seek backward/forward 1 minute\n"
2909            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2910            );
2911 }
2912
2913 static void opt_input_file(void *optctx, const char *filename)
2914 {
2915     if (input_filename) {
2916         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2917                 filename, input_filename);
2918         exit(1);
2919     }
2920     if (!strcmp(filename, "-"))
2921         filename = "pipe:";
2922     input_filename = filename;
2923 }
2924
2925 /* Called from the main */
2926 int main(int argc, char **argv)
2927 {
2928     int flags;
2929
2930     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2931     parse_loglevel(argc, argv, options);
2932
2933     /* register all codecs, demux and protocols */
2934     avcodec_register_all();
2935 #if CONFIG_AVDEVICE
2936     avdevice_register_all();
2937 #endif
2938 #if CONFIG_AVFILTER
2939     avfilter_register_all();
2940 #endif
2941     av_register_all();
2942     avformat_network_init();
2943
2944     init_opts();
2945
2946     show_banner();
2947
2948     parse_options(NULL, argc, argv, options, opt_input_file);
2949
2950     if (!input_filename) {
2951         show_usage();
2952         fprintf(stderr, "An input file must be specified\n");
2953         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2954         exit(1);
2955     }
2956
2957     if (display_disable) {
2958         video_disable = 1;
2959     }
2960     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2961 #if !defined(__MINGW32__) && !defined(__APPLE__)
2962     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2963 #endif
2964     if (SDL_Init (flags)) {
2965         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2966         exit(1);
2967     }
2968
2969     if (!display_disable) {
2970 #if HAVE_SDL_VIDEO_SIZE
2971         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2972         fs_screen_width = vi->current_w;
2973         fs_screen_height = vi->current_h;
2974 #endif
2975     }
2976
2977     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2978     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2979     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2980
2981     av_init_packet(&flush_pkt);
2982     flush_pkt.data = "FLUSH";
2983
2984     cur_stream = stream_open(input_filename, file_iformat);
2985
2986     event_loop();
2987
2988     /* never returns */
2989
2990     return 0;
2991 }