]> git.sesse.net Git - ffmpeg/blob - avplay.c
dfa: check for invalid access in decode_wdlt().
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int64_t sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;             // presentation timestamp for this picture
103     double target_clock;    // av_gettime() time at which this should be displayed ideally
104     int64_t pos;            // byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum AVPixelFormat pix_fmt;
110
111     AVRational sar;
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
156     uint8_t *audio_buf;
157     uint8_t *audio_buf1;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat sdl_sample_fmt;
163     uint64_t sdl_channel_layout;
164     int sdl_channels;
165     int sdl_sample_rate;
166     enum AVSampleFormat resample_sample_fmt;
167     uint64_t resample_channel_layout;
168     int resample_sample_rate;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;      // current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;   // the first filter in the video chain
217     AVFilterContext *out_video_filter;  // the last filter in the video chain
218 #endif
219
220     float skip_frames;
221     float skip_frames_index;
222     int refresh;
223 } VideoState;
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width  = 0;
232 static int screen_height = 0;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB] = {
236     [AVMEDIA_TYPE_AUDIO]    = -1,
237     [AVMEDIA_TYPE_VIDEO]    = -1,
238     [AVMEDIA_TYPE_SUBTITLE] = -1,
239 };
240 static int seek_by_bytes = -1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug_mv = 0;
247 static int step = 0;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts = -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop = 1;
261 static int framedrop = 1;
262 static int infinite_buffer = 0;
263
264 static int rdftspeed = 20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for (;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (!vp->sar.num)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->sar);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st)
673         {
674             if (is->subpq_size > 0)
675             {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679                 {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = width;
713         rect.h = height;
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 /* get the current audio output buffer size, in samples. With SDL, we
719    cannot have a precise information */
720 static int audio_write_get_buf_size(VideoState *is)
721 {
722     return is->audio_buf_size - is->audio_buf_index;
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     a = a % b;
728     if (a >= 0)
729         return a;
730     else
731         return a + b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
742         ;
743     nb_freq = 1 << (rdft_bits - 1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->sdl_channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
750         n = 2 * channels;
751         delay = audio_write_get_buf_size(s);
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
759         }
760
761         delay += 2 * data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if (s->show_audio == 1) {
767             h = INT_MIN;
768             for (i = 0; i < 1000; i += channels) {
769                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a = s->sample_array[idx];
771                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
772                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
773                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
774                 int score = a - d;
775                 if (h < score && (b ^ c) < 0) {
776                     h = score;
777                     i_start = idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if (s->show_audio == 1) {
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for (ch = 0; ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for (x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for (ch = 1; ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     } else {
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if (rdft_bits != s->rdft_bits) {
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits = rdft_bits;
835             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for (ch = 0; ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
841                 i = i_start + ch;
842                 for (x = 0; x < 2 * nb_freq; x++) {
843                     double w = (x-nb_freq) * (1.0 / nb_freq);
844                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             // least efficient way to do this, we should of course directly access it but its more than fast enough
852             for (y = 0; y < s->height; y++) {
853                 double w = 1 / sqrt(nb_freq);
854                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
855                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
856                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
857                 a = FFMIN(a, 255);
858                 b = FFMIN(b, 255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if (s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static int video_open(VideoState *is)
874 {
875     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
876     int w,h;
877
878     if (is_full_screen) flags |= SDL_FULLSCREEN;
879     else                flags |= SDL_RESIZABLE;
880
881     if (is_full_screen && fs_screen_width) {
882         w = fs_screen_width;
883         h = fs_screen_height;
884     } else if (!is_full_screen && screen_width) {
885         w = screen_width;
886         h = screen_height;
887 #if CONFIG_AVFILTER
888     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
889         w = is->out_video_filter->inputs[0]->w;
890         h = is->out_video_filter->inputs[0]->h;
891 #else
892     } else if (is->video_st && is->video_st->codec->width) {
893         w = is->video_st->codec->width;
894         h = is->video_st->codec->height;
895 #endif
896     } else {
897         w = 640;
898         h = 480;
899     }
900     if (screen && is->width == screen->w && screen->w == w
901        && is->height== screen->h && screen->h == h)
902         return 0;
903
904 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
905     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
906     screen = SDL_SetVideoMode(w, h, 24, flags);
907 #else
908     screen = SDL_SetVideoMode(w, h, 0, flags);
909 #endif
910     if (!screen) {
911         fprintf(stderr, "SDL: could not set video mode - exiting\n");
912         return -1;
913     }
914     if (!window_title)
915         window_title = input_filename;
916     SDL_WM_SetCaption(window_title, window_title);
917
918     is->width  = screen->w;
919     is->height = screen->h;
920
921     return 0;
922 }
923
924 /* display the current picture, if any */
925 static void video_display(VideoState *is)
926 {
927     if (!screen)
928         video_open(cur_stream);
929     if (is->audio_st && is->show_audio)
930         video_audio_display(is);
931     else if (is->video_st)
932         video_image_display(is);
933 }
934
935 static int refresh_thread(void *opaque)
936 {
937     VideoState *is= opaque;
938     while (!is->abort_request) {
939         SDL_Event event;
940         event.type = FF_REFRESH_EVENT;
941         event.user.data1 = opaque;
942         if (!is->refresh) {
943             is->refresh = 1;
944             SDL_PushEvent(&event);
945         }
946         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
947     }
948     return 0;
949 }
950
951 /* get the current audio clock value */
952 static double get_audio_clock(VideoState *is)
953 {
954     double pts;
955     int hw_buf_size, bytes_per_sec;
956     pts = is->audio_clock;
957     hw_buf_size = audio_write_get_buf_size(is);
958     bytes_per_sec = 0;
959     if (is->audio_st) {
960         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
961                         av_get_bytes_per_sample(is->sdl_sample_fmt);
962     }
963     if (bytes_per_sec)
964         pts -= (double)hw_buf_size / bytes_per_sec;
965     return pts;
966 }
967
968 /* get the current video clock value */
969 static double get_video_clock(VideoState *is)
970 {
971     if (is->paused) {
972         return is->video_current_pts;
973     } else {
974         return is->video_current_pts_drift + av_gettime() / 1000000.0;
975     }
976 }
977
978 /* get the current external clock value */
979 static double get_external_clock(VideoState *is)
980 {
981     int64_t ti;
982     ti = av_gettime();
983     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
984 }
985
986 /* get the current master clock value */
987 static double get_master_clock(VideoState *is)
988 {
989     double val;
990
991     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
992         if (is->video_st)
993             val = get_video_clock(is);
994         else
995             val = get_audio_clock(is);
996     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
997         if (is->audio_st)
998             val = get_audio_clock(is);
999         else
1000             val = get_video_clock(is);
1001     } else {
1002         val = get_external_clock(is);
1003     }
1004     return val;
1005 }
1006
1007 /* seek in the stream */
1008 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1009 {
1010     if (!is->seek_req) {
1011         is->seek_pos = pos;
1012         is->seek_rel = rel;
1013         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1014         if (seek_by_bytes)
1015             is->seek_flags |= AVSEEK_FLAG_BYTE;
1016         is->seek_req = 1;
1017     }
1018 }
1019
1020 /* pause or resume the video */
1021 static void stream_pause(VideoState *is)
1022 {
1023     if (is->paused) {
1024         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1025         if (is->read_pause_return != AVERROR(ENOSYS)) {
1026             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1027         }
1028         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1029     }
1030     is->paused = !is->paused;
1031 }
1032
1033 static double compute_target_time(double frame_current_pts, VideoState *is)
1034 {
1035     double delay, sync_threshold, diff;
1036
1037     /* compute nominal delay */
1038     delay = frame_current_pts - is->frame_last_pts;
1039     if (delay <= 0 || delay >= 10.0) {
1040         /* if incorrect delay, use previous one */
1041         delay = is->frame_last_delay;
1042     } else {
1043         is->frame_last_delay = delay;
1044     }
1045     is->frame_last_pts = frame_current_pts;
1046
1047     /* update delay to follow master synchronisation source */
1048     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1049          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1050         /* if video is slave, we try to correct big delays by
1051            duplicating or deleting a frame */
1052         diff = get_video_clock(is) - get_master_clock(is);
1053
1054         /* skip or repeat frame. We take into account the
1055            delay to compute the threshold. I still don't know
1056            if it is the best guess */
1057         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1058         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1059             if (diff <= -sync_threshold)
1060                 delay = 0;
1061             else if (diff >= sync_threshold)
1062                 delay = 2 * delay;
1063         }
1064     }
1065     is->frame_timer += delay;
1066
1067     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1068             delay, frame_current_pts, -diff);
1069
1070     return is->frame_timer;
1071 }
1072
1073 /* called to display each frame */
1074 static void video_refresh_timer(void *opaque)
1075 {
1076     VideoState *is = opaque;
1077     VideoPicture *vp;
1078
1079     SubPicture *sp, *sp2;
1080
1081     if (is->video_st) {
1082 retry:
1083         if (is->pictq_size == 0) {
1084             // nothing to do, no picture to display in the que
1085         } else {
1086             double time = av_gettime() / 1000000.0;
1087             double next_target;
1088             /* dequeue the picture */
1089             vp = &is->pictq[is->pictq_rindex];
1090
1091             if (time < vp->target_clock)
1092                 return;
1093             /* update current video pts */
1094             is->video_current_pts = vp->pts;
1095             is->video_current_pts_drift = is->video_current_pts - time;
1096             is->video_current_pos = vp->pos;
1097             if (is->pictq_size > 1) {
1098                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1099                 assert(nextvp->target_clock >= vp->target_clock);
1100                 next_target= nextvp->target_clock;
1101             } else {
1102                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1103             }
1104             if (framedrop && time > next_target) {
1105                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1106                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1107                     /* update queue size and signal for next picture */
1108                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1109                         is->pictq_rindex = 0;
1110
1111                     SDL_LockMutex(is->pictq_mutex);
1112                     is->pictq_size--;
1113                     SDL_CondSignal(is->pictq_cond);
1114                     SDL_UnlockMutex(is->pictq_mutex);
1115                     goto retry;
1116                 }
1117             }
1118
1119             if (is->subtitle_st) {
1120                 if (is->subtitle_stream_changed) {
1121                     SDL_LockMutex(is->subpq_mutex);
1122
1123                     while (is->subpq_size) {
1124                         free_subpicture(&is->subpq[is->subpq_rindex]);
1125
1126                         /* update queue size and signal for next picture */
1127                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1128                             is->subpq_rindex = 0;
1129
1130                         is->subpq_size--;
1131                     }
1132                     is->subtitle_stream_changed = 0;
1133
1134                     SDL_CondSignal(is->subpq_cond);
1135                     SDL_UnlockMutex(is->subpq_mutex);
1136                 } else {
1137                     if (is->subpq_size > 0) {
1138                         sp = &is->subpq[is->subpq_rindex];
1139
1140                         if (is->subpq_size > 1)
1141                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1142                         else
1143                             sp2 = NULL;
1144
1145                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1146                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1147                         {
1148                             free_subpicture(sp);
1149
1150                             /* update queue size and signal for next picture */
1151                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1152                                 is->subpq_rindex = 0;
1153
1154                             SDL_LockMutex(is->subpq_mutex);
1155                             is->subpq_size--;
1156                             SDL_CondSignal(is->subpq_cond);
1157                             SDL_UnlockMutex(is->subpq_mutex);
1158                         }
1159                     }
1160                 }
1161             }
1162
1163             /* display picture */
1164             if (!display_disable)
1165                 video_display(is);
1166
1167             /* update queue size and signal for next picture */
1168             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1169                 is->pictq_rindex = 0;
1170
1171             SDL_LockMutex(is->pictq_mutex);
1172             is->pictq_size--;
1173             SDL_CondSignal(is->pictq_cond);
1174             SDL_UnlockMutex(is->pictq_mutex);
1175         }
1176     } else if (is->audio_st) {
1177         /* draw the next audio frame */
1178
1179         /* if only audio stream, then display the audio bars (better
1180            than nothing, just to test the implementation */
1181
1182         /* display picture */
1183         if (!display_disable)
1184             video_display(is);
1185     }
1186     if (show_status) {
1187         static int64_t last_time;
1188         int64_t cur_time;
1189         int aqsize, vqsize, sqsize;
1190         double av_diff;
1191
1192         cur_time = av_gettime();
1193         if (!last_time || (cur_time - last_time) >= 30000) {
1194             aqsize = 0;
1195             vqsize = 0;
1196             sqsize = 0;
1197             if (is->audio_st)
1198                 aqsize = is->audioq.size;
1199             if (is->video_st)
1200                 vqsize = is->videoq.size;
1201             if (is->subtitle_st)
1202                 sqsize = is->subtitleq.size;
1203             av_diff = 0;
1204             if (is->audio_st && is->video_st)
1205                 av_diff = get_audio_clock(is) - get_video_clock(is);
1206             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1207                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1208                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1209             fflush(stdout);
1210             last_time = cur_time;
1211         }
1212     }
1213 }
1214
1215 static void stream_close(VideoState *is)
1216 {
1217     VideoPicture *vp;
1218     int i;
1219     /* XXX: use a special url_shutdown call to abort parse cleanly */
1220     is->abort_request = 1;
1221     SDL_WaitThread(is->parse_tid, NULL);
1222     SDL_WaitThread(is->refresh_tid, NULL);
1223
1224     /* free all pictures */
1225     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1226         vp = &is->pictq[i];
1227         if (vp->bmp) {
1228             SDL_FreeYUVOverlay(vp->bmp);
1229             vp->bmp = NULL;
1230         }
1231     }
1232     SDL_DestroyMutex(is->pictq_mutex);
1233     SDL_DestroyCond(is->pictq_cond);
1234     SDL_DestroyMutex(is->subpq_mutex);
1235     SDL_DestroyCond(is->subpq_cond);
1236 #if !CONFIG_AVFILTER
1237     if (is->img_convert_ctx)
1238         sws_freeContext(is->img_convert_ctx);
1239 #endif
1240     av_free(is);
1241 }
1242
1243 static void do_exit(void)
1244 {
1245     if (cur_stream) {
1246         stream_close(cur_stream);
1247         cur_stream = NULL;
1248     }
1249     uninit_opts();
1250 #if CONFIG_AVFILTER
1251     avfilter_uninit();
1252 #endif
1253     avformat_network_deinit();
1254     if (show_status)
1255         printf("\n");
1256     SDL_Quit();
1257     av_log(NULL, AV_LOG_QUIET, "");
1258     exit(0);
1259 }
1260
1261 /* allocate a picture (needs to do that in main thread to avoid
1262    potential locking problems */
1263 static void alloc_picture(void *opaque)
1264 {
1265     VideoState *is = opaque;
1266     VideoPicture *vp;
1267
1268     vp = &is->pictq[is->pictq_windex];
1269
1270     if (vp->bmp)
1271         SDL_FreeYUVOverlay(vp->bmp);
1272
1273 #if CONFIG_AVFILTER
1274     vp->width   = is->out_video_filter->inputs[0]->w;
1275     vp->height  = is->out_video_filter->inputs[0]->h;
1276     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1277 #else
1278     vp->width   = is->video_st->codec->width;
1279     vp->height  = is->video_st->codec->height;
1280     vp->pix_fmt = is->video_st->codec->pix_fmt;
1281 #endif
1282
1283     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1284                                    SDL_YV12_OVERLAY,
1285                                    screen);
1286     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1287         /* SDL allocates a buffer smaller than requested if the video
1288          * overlay hardware is unable to support the requested size. */
1289         fprintf(stderr, "Error: the video system does not support an image\n"
1290                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1291                         "to reduce the image size.\n", vp->width, vp->height );
1292         do_exit();
1293     }
1294
1295     SDL_LockMutex(is->pictq_mutex);
1296     vp->allocated = 1;
1297     SDL_CondSignal(is->pictq_cond);
1298     SDL_UnlockMutex(is->pictq_mutex);
1299 }
1300
1301 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1302  * guessed if not known. */
1303 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1304 {
1305     VideoPicture *vp;
1306 #if CONFIG_AVFILTER
1307     AVPicture pict_src;
1308 #else
1309     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1310 #endif
1311     /* wait until we have space to put a new picture */
1312     SDL_LockMutex(is->pictq_mutex);
1313
1314     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1315         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1316
1317     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1318            !is->videoq.abort_request) {
1319         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1320     }
1321     SDL_UnlockMutex(is->pictq_mutex);
1322
1323     if (is->videoq.abort_request)
1324         return -1;
1325
1326     vp = &is->pictq[is->pictq_windex];
1327
1328     /* alloc or resize hardware picture buffer */
1329     if (!vp->bmp || vp->reallocate ||
1330 #if CONFIG_AVFILTER
1331         vp->width  != is->out_video_filter->inputs[0]->w ||
1332         vp->height != is->out_video_filter->inputs[0]->h) {
1333 #else
1334         vp->width != is->video_st->codec->width ||
1335         vp->height != is->video_st->codec->height) {
1336 #endif
1337         SDL_Event event;
1338
1339         vp->allocated  = 0;
1340         vp->reallocate = 0;
1341
1342         /* the allocation must be done in the main thread to avoid
1343            locking problems */
1344         event.type = FF_ALLOC_EVENT;
1345         event.user.data1 = is;
1346         SDL_PushEvent(&event);
1347
1348         /* wait until the picture is allocated */
1349         SDL_LockMutex(is->pictq_mutex);
1350         while (!vp->allocated && !is->videoq.abort_request) {
1351             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352         }
1353         SDL_UnlockMutex(is->pictq_mutex);
1354
1355         if (is->videoq.abort_request)
1356             return -1;
1357     }
1358
1359     /* if the frame is not skipped, then display it */
1360     if (vp->bmp) {
1361         AVPicture pict = { { 0 } };
1362
1363         /* get a pointer on the bitmap */
1364         SDL_LockYUVOverlay (vp->bmp);
1365
1366         pict.data[0] = vp->bmp->pixels[0];
1367         pict.data[1] = vp->bmp->pixels[2];
1368         pict.data[2] = vp->bmp->pixels[1];
1369
1370         pict.linesize[0] = vp->bmp->pitches[0];
1371         pict.linesize[1] = vp->bmp->pitches[2];
1372         pict.linesize[2] = vp->bmp->pitches[1];
1373
1374 #if CONFIG_AVFILTER
1375         pict_src.data[0] = src_frame->data[0];
1376         pict_src.data[1] = src_frame->data[1];
1377         pict_src.data[2] = src_frame->data[2];
1378
1379         pict_src.linesize[0] = src_frame->linesize[0];
1380         pict_src.linesize[1] = src_frame->linesize[1];
1381         pict_src.linesize[2] = src_frame->linesize[2];
1382
1383         // FIXME use direct rendering
1384         av_picture_copy(&pict, &pict_src,
1385                         vp->pix_fmt, vp->width, vp->height);
1386 #else
1387         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1388         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1389             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1390             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1391         if (is->img_convert_ctx == NULL) {
1392             fprintf(stderr, "Cannot initialize the conversion context\n");
1393             exit(1);
1394         }
1395         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1396                   0, vp->height, pict.data, pict.linesize);
1397 #endif
1398         /* update the bitmap content */
1399         SDL_UnlockYUVOverlay(vp->bmp);
1400
1401         vp->pts = pts;
1402         vp->pos = pos;
1403
1404         /* now we can update the picture count */
1405         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1406             is->pictq_windex = 0;
1407         SDL_LockMutex(is->pictq_mutex);
1408         vp->target_clock = compute_target_time(vp->pts, is);
1409
1410         is->pictq_size++;
1411         SDL_UnlockMutex(is->pictq_mutex);
1412     }
1413     return 0;
1414 }
1415
1416 /* Compute the exact PTS for the picture if it is omitted in the stream.
1417  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1418 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1419 {
1420     double frame_delay, pts;
1421     int ret;
1422
1423     pts = pts1;
1424
1425     if (pts != 0) {
1426         /* update video clock with pts, if present */
1427         is->video_clock = pts;
1428     } else {
1429         pts = is->video_clock;
1430     }
1431     /* update video clock for next frame */
1432     frame_delay = av_q2d(is->video_st->codec->time_base);
1433     /* for MPEG2, the frame can be repeated, so we update the
1434        clock accordingly */
1435     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1436     is->video_clock += frame_delay;
1437
1438     ret = queue_picture(is, src_frame, pts, pos);
1439     av_frame_unref(src_frame);
1440     return ret;
1441 }
1442
1443 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1444 {
1445     int got_picture, i;
1446
1447     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1448         return -1;
1449
1450     if (pkt->data == flush_pkt.data) {
1451         avcodec_flush_buffers(is->video_st->codec);
1452
1453         SDL_LockMutex(is->pictq_mutex);
1454         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1455         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1456             is->pictq[i].target_clock= 0;
1457         }
1458         while (is->pictq_size && !is->videoq.abort_request) {
1459             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1460         }
1461         is->video_current_pos = -1;
1462         SDL_UnlockMutex(is->pictq_mutex);
1463
1464         init_pts_correction(&is->pts_ctx);
1465         is->frame_last_pts = AV_NOPTS_VALUE;
1466         is->frame_last_delay = 0;
1467         is->frame_timer = (double)av_gettime() / 1000000.0;
1468         is->skip_frames = 1;
1469         is->skip_frames_index = 0;
1470         return 0;
1471     }
1472
1473     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1474
1475     if (got_picture) {
1476         if (decoder_reorder_pts == -1) {
1477             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1478         } else if (decoder_reorder_pts) {
1479             *pts = frame->pkt_pts;
1480         } else {
1481             *pts = frame->pkt_dts;
1482         }
1483
1484         if (*pts == AV_NOPTS_VALUE) {
1485             *pts = 0;
1486         }
1487         if (is->video_st->sample_aspect_ratio.num) {
1488             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1489         }
1490
1491         is->skip_frames_index += 1;
1492         if (is->skip_frames_index >= is->skip_frames) {
1493             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1494             return 1;
1495         }
1496         av_frame_unref(frame);
1497     }
1498     return 0;
1499 }
1500
1501 #if CONFIG_AVFILTER
1502 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1503 {
1504     char sws_flags_str[128];
1505     char buffersrc_args[256];
1506     int ret;
1507     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1508     AVCodecContext *codec = is->video_st->codec;
1509
1510     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1511     graph->scale_sws_opts = av_strdup(sws_flags_str);
1512
1513     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1514              codec->width, codec->height, codec->pix_fmt,
1515              is->video_st->time_base.num, is->video_st->time_base.den,
1516              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1517
1518
1519     if ((ret = avfilter_graph_create_filter(&filt_src,
1520                                             avfilter_get_by_name("buffer"),
1521                                             "src", buffersrc_args, NULL,
1522                                             graph)) < 0)
1523         return ret;
1524     if ((ret = avfilter_graph_create_filter(&filt_out,
1525                                             avfilter_get_by_name("buffersink"),
1526                                             "out", NULL, NULL, graph)) < 0)
1527         return ret;
1528
1529     if ((ret = avfilter_graph_create_filter(&filt_format,
1530                                             avfilter_get_by_name("format"),
1531                                             "format", "yuv420p", NULL, graph)) < 0)
1532         return ret;
1533     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1534         return ret;
1535
1536
1537     if (vfilters) {
1538         AVFilterInOut *outputs = avfilter_inout_alloc();
1539         AVFilterInOut *inputs  = avfilter_inout_alloc();
1540
1541         outputs->name    = av_strdup("in");
1542         outputs->filter_ctx = filt_src;
1543         outputs->pad_idx = 0;
1544         outputs->next    = NULL;
1545
1546         inputs->name    = av_strdup("out");
1547         inputs->filter_ctx = filt_format;
1548         inputs->pad_idx = 0;
1549         inputs->next    = NULL;
1550
1551         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1552             return ret;
1553     } else {
1554         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1555             return ret;
1556     }
1557
1558     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1559         return ret;
1560
1561     is->in_video_filter  = filt_src;
1562     is->out_video_filter = filt_out;
1563
1564     return ret;
1565 }
1566
1567 #endif  /* CONFIG_AVFILTER */
1568
1569 static int video_thread(void *arg)
1570 {
1571     AVPacket pkt = { 0 };
1572     VideoState *is = arg;
1573     AVFrame *frame = av_frame_alloc();
1574     int64_t pts_int;
1575     double pts;
1576     int ret;
1577
1578 #if CONFIG_AVFILTER
1579     AVFilterGraph *graph = avfilter_graph_alloc();
1580     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1581     int last_w = is->video_st->codec->width;
1582     int last_h = is->video_st->codec->height;
1583
1584     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1585         goto the_end;
1586     filt_in  = is->in_video_filter;
1587     filt_out = is->out_video_filter;
1588 #endif
1589
1590     for (;;) {
1591 #if CONFIG_AVFILTER
1592         AVRational tb;
1593 #endif
1594         while (is->paused && !is->videoq.abort_request)
1595             SDL_Delay(10);
1596
1597         av_free_packet(&pkt);
1598
1599         ret = get_video_frame(is, frame, &pts_int, &pkt);
1600         if (ret < 0)
1601             goto the_end;
1602
1603         if (!ret)
1604             continue;
1605
1606 #if CONFIG_AVFILTER
1607         if (   last_w != is->video_st->codec->width
1608             || last_h != is->video_st->codec->height) {
1609             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1610                     is->video_st->codec->width, is->video_st->codec->height);
1611             avfilter_graph_free(&graph);
1612             graph = avfilter_graph_alloc();
1613             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1614                 goto the_end;
1615             filt_in  = is->in_video_filter;
1616             filt_out = is->out_video_filter;
1617             last_w = is->video_st->codec->width;
1618             last_h = is->video_st->codec->height;
1619         }
1620
1621         frame->pts = pts_int;
1622         ret = av_buffersrc_add_frame(filt_in, frame);
1623         if (ret < 0)
1624             goto the_end;
1625
1626         while (ret >= 0) {
1627             ret = av_buffersink_get_frame(filt_out, frame);
1628             if (ret < 0) {
1629                 ret = 0;
1630                 break;
1631             }
1632
1633             pts_int = frame->pts;
1634             tb      = filt_out->inputs[0]->time_base;
1635             if (av_cmp_q(tb, is->video_st->time_base)) {
1636                 av_unused int64_t pts1 = pts_int;
1637                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1638                 av_dlog(NULL, "video_thread(): "
1639                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1640                         tb.num, tb.den, pts1,
1641                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1642             }
1643             pts = pts_int * av_q2d(is->video_st->time_base);
1644             ret = output_picture2(is, frame, pts, 0);
1645         }
1646 #else
1647         pts = pts_int * av_q2d(is->video_st->time_base);
1648         ret = output_picture2(is, frame, pts,  pkt.pos);
1649 #endif
1650
1651         if (ret < 0)
1652             goto the_end;
1653
1654
1655         if (step)
1656             if (cur_stream)
1657                 stream_pause(cur_stream);
1658     }
1659  the_end:
1660 #if CONFIG_AVFILTER
1661     av_freep(&vfilters);
1662     avfilter_graph_free(&graph);
1663 #endif
1664     av_free_packet(&pkt);
1665     av_frame_free(&frame);
1666     return 0;
1667 }
1668
1669 static int subtitle_thread(void *arg)
1670 {
1671     VideoState *is = arg;
1672     SubPicture *sp;
1673     AVPacket pkt1, *pkt = &pkt1;
1674     int got_subtitle;
1675     double pts;
1676     int i, j;
1677     int r, g, b, y, u, v, a;
1678
1679     for (;;) {
1680         while (is->paused && !is->subtitleq.abort_request) {
1681             SDL_Delay(10);
1682         }
1683         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1684             break;
1685
1686         if (pkt->data == flush_pkt.data) {
1687             avcodec_flush_buffers(is->subtitle_st->codec);
1688             continue;
1689         }
1690         SDL_LockMutex(is->subpq_mutex);
1691         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1692                !is->subtitleq.abort_request) {
1693             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1694         }
1695         SDL_UnlockMutex(is->subpq_mutex);
1696
1697         if (is->subtitleq.abort_request)
1698             return 0;
1699
1700         sp = &is->subpq[is->subpq_windex];
1701
1702        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1703            this packet, if any */
1704         pts = 0;
1705         if (pkt->pts != AV_NOPTS_VALUE)
1706             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1707
1708         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1709                                  &got_subtitle, pkt);
1710
1711         if (got_subtitle && sp->sub.format == 0) {
1712             sp->pts = pts;
1713
1714             for (i = 0; i < sp->sub.num_rects; i++)
1715             {
1716                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1717                 {
1718                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1719                     y = RGB_TO_Y_CCIR(r, g, b);
1720                     u = RGB_TO_U_CCIR(r, g, b, 0);
1721                     v = RGB_TO_V_CCIR(r, g, b, 0);
1722                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1723                 }
1724             }
1725
1726             /* now we can update the picture count */
1727             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1728                 is->subpq_windex = 0;
1729             SDL_LockMutex(is->subpq_mutex);
1730             is->subpq_size++;
1731             SDL_UnlockMutex(is->subpq_mutex);
1732         }
1733         av_free_packet(pkt);
1734     }
1735     return 0;
1736 }
1737
1738 /* copy samples for viewing in editor window */
1739 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1740 {
1741     int size, len;
1742
1743     size = samples_size / sizeof(short);
1744     while (size > 0) {
1745         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1746         if (len > size)
1747             len = size;
1748         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1749         samples += len;
1750         is->sample_array_index += len;
1751         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1752             is->sample_array_index = 0;
1753         size -= len;
1754     }
1755 }
1756
1757 /* return the new audio buffer size (samples can be added or deleted
1758    to get better sync if video or external master clock) */
1759 static int synchronize_audio(VideoState *is, short *samples,
1760                              int samples_size1, double pts)
1761 {
1762     int n, samples_size;
1763     double ref_clock;
1764
1765     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1766     samples_size = samples_size1;
1767
1768     /* if not master, then we try to remove or add samples to correct the clock */
1769     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1770          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1771         double diff, avg_diff;
1772         int wanted_size, min_size, max_size, nb_samples;
1773
1774         ref_clock = get_master_clock(is);
1775         diff = get_audio_clock(is) - ref_clock;
1776
1777         if (diff < AV_NOSYNC_THRESHOLD) {
1778             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1779             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1780                 /* not enough measures to have a correct estimate */
1781                 is->audio_diff_avg_count++;
1782             } else {
1783                 /* estimate the A-V difference */
1784                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1785
1786                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1787                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1788                     nb_samples = samples_size / n;
1789
1790                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1791                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1792                     if (wanted_size < min_size)
1793                         wanted_size = min_size;
1794                     else if (wanted_size > max_size)
1795                         wanted_size = max_size;
1796
1797                     /* add or remove samples to correction the synchro */
1798                     if (wanted_size < samples_size) {
1799                         /* remove samples */
1800                         samples_size = wanted_size;
1801                     } else if (wanted_size > samples_size) {
1802                         uint8_t *samples_end, *q;
1803                         int nb;
1804
1805                         /* add samples */
1806                         nb = (samples_size - wanted_size);
1807                         samples_end = (uint8_t *)samples + samples_size - n;
1808                         q = samples_end + n;
1809                         while (nb > 0) {
1810                             memcpy(q, samples_end, n);
1811                             q += n;
1812                             nb -= n;
1813                         }
1814                         samples_size = wanted_size;
1815                     }
1816                 }
1817                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1818                         diff, avg_diff, samples_size - samples_size1,
1819                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1820             }
1821         } else {
1822             /* too big difference : may be initial PTS errors, so
1823                reset A-V filter */
1824             is->audio_diff_avg_count = 0;
1825             is->audio_diff_cum       = 0;
1826         }
1827     }
1828
1829     return samples_size;
1830 }
1831
1832 /* decode one audio frame and returns its uncompressed size */
1833 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1834 {
1835     AVPacket *pkt_temp = &is->audio_pkt_temp;
1836     AVPacket *pkt = &is->audio_pkt;
1837     AVCodecContext *dec = is->audio_st->codec;
1838     int n, len1, data_size, got_frame;
1839     double pts;
1840     int new_packet = 0;
1841     int flush_complete = 0;
1842
1843     for (;;) {
1844         /* NOTE: the audio packet can contain several frames */
1845         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1846             int resample_changed, audio_resample;
1847
1848             if (!is->frame) {
1849                 if (!(is->frame = avcodec_alloc_frame()))
1850                     return AVERROR(ENOMEM);
1851             } else
1852                 avcodec_get_frame_defaults(is->frame);
1853
1854             if (flush_complete)
1855                 break;
1856             new_packet = 0;
1857             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1858             if (len1 < 0) {
1859                 /* if error, we skip the frame */
1860                 pkt_temp->size = 0;
1861                 break;
1862             }
1863
1864             pkt_temp->data += len1;
1865             pkt_temp->size -= len1;
1866
1867             if (!got_frame) {
1868                 /* stop sending empty packets if the decoder is finished */
1869                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1870                     flush_complete = 1;
1871                 continue;
1872             }
1873             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1874                                                    is->frame->nb_samples,
1875                                                    is->frame->format, 1);
1876
1877             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1878                              is->frame->channel_layout != is->sdl_channel_layout ||
1879                              is->frame->sample_rate    != is->sdl_sample_rate;
1880
1881             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1882                                is->frame->channel_layout != is->resample_channel_layout ||
1883                                is->frame->sample_rate    != is->resample_sample_rate;
1884
1885             if ((!is->avr && audio_resample) || resample_changed) {
1886                 int ret;
1887                 if (is->avr)
1888                     avresample_close(is->avr);
1889                 else if (audio_resample) {
1890                     is->avr = avresample_alloc_context();
1891                     if (!is->avr) {
1892                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1893                         break;
1894                     }
1895                 }
1896                 if (audio_resample) {
1897                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1898                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1899                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1900                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1901                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1902                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1903
1904                     if ((ret = avresample_open(is->avr)) < 0) {
1905                         fprintf(stderr, "error initializing libavresample\n");
1906                         break;
1907                     }
1908                 }
1909                 is->resample_sample_fmt     = is->frame->format;
1910                 is->resample_channel_layout = is->frame->channel_layout;
1911                 is->resample_sample_rate    = is->frame->sample_rate;
1912             }
1913
1914             if (audio_resample) {
1915                 void *tmp_out;
1916                 int out_samples, out_size, out_linesize;
1917                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1918                 int nb_samples = is->frame->nb_samples;
1919
1920                 out_size = av_samples_get_buffer_size(&out_linesize,
1921                                                       is->sdl_channels,
1922                                                       nb_samples,
1923                                                       is->sdl_sample_fmt, 0);
1924                 tmp_out = av_realloc(is->audio_buf1, out_size);
1925                 if (!tmp_out)
1926                     return AVERROR(ENOMEM);
1927                 is->audio_buf1 = tmp_out;
1928
1929                 out_samples = avresample_convert(is->avr,
1930                                                  &is->audio_buf1,
1931                                                  out_linesize, nb_samples,
1932                                                  is->frame->data,
1933                                                  is->frame->linesize[0],
1934                                                  is->frame->nb_samples);
1935                 if (out_samples < 0) {
1936                     fprintf(stderr, "avresample_convert() failed\n");
1937                     break;
1938                 }
1939                 is->audio_buf = is->audio_buf1;
1940                 data_size = out_samples * osize * is->sdl_channels;
1941             } else {
1942                 is->audio_buf = is->frame->data[0];
1943             }
1944
1945             /* if no pts, then compute it */
1946             pts = is->audio_clock;
1947             *pts_ptr = pts;
1948             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1949             is->audio_clock += (double)data_size /
1950                 (double)(n * is->sdl_sample_rate);
1951 #ifdef DEBUG
1952             {
1953                 static double last_clock;
1954                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1955                        is->audio_clock - last_clock,
1956                        is->audio_clock, pts);
1957                 last_clock = is->audio_clock;
1958             }
1959 #endif
1960             return data_size;
1961         }
1962
1963         /* free the current packet */
1964         if (pkt->data)
1965             av_free_packet(pkt);
1966         memset(pkt_temp, 0, sizeof(*pkt_temp));
1967
1968         if (is->paused || is->audioq.abort_request) {
1969             return -1;
1970         }
1971
1972         /* read next packet */
1973         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1974             return -1;
1975
1976         if (pkt->data == flush_pkt.data) {
1977             avcodec_flush_buffers(dec);
1978             flush_complete = 0;
1979         }
1980
1981         *pkt_temp = *pkt;
1982
1983         /* if update the audio clock with the pts */
1984         if (pkt->pts != AV_NOPTS_VALUE) {
1985             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1986         }
1987     }
1988 }
1989
1990 /* prepare a new audio buffer */
1991 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1992 {
1993     VideoState *is = opaque;
1994     int audio_size, len1;
1995     double pts;
1996
1997     audio_callback_time = av_gettime();
1998
1999     while (len > 0) {
2000         if (is->audio_buf_index >= is->audio_buf_size) {
2001            audio_size = audio_decode_frame(is, &pts);
2002            if (audio_size < 0) {
2003                 /* if error, just output silence */
2004                is->audio_buf      = is->silence_buf;
2005                is->audio_buf_size = sizeof(is->silence_buf);
2006            } else {
2007                if (is->show_audio)
2008                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2009                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2010                                               pts);
2011                is->audio_buf_size = audio_size;
2012            }
2013            is->audio_buf_index = 0;
2014         }
2015         len1 = is->audio_buf_size - is->audio_buf_index;
2016         if (len1 > len)
2017             len1 = len;
2018         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2019         len -= len1;
2020         stream += len1;
2021         is->audio_buf_index += len1;
2022     }
2023 }
2024
2025 /* open a given stream. Return 0 if OK */
2026 static int stream_component_open(VideoState *is, int stream_index)
2027 {
2028     AVFormatContext *ic = is->ic;
2029     AVCodecContext *avctx;
2030     AVCodec *codec;
2031     SDL_AudioSpec wanted_spec, spec;
2032     AVDictionary *opts;
2033     AVDictionaryEntry *t = NULL;
2034
2035     if (stream_index < 0 || stream_index >= ic->nb_streams)
2036         return -1;
2037     avctx = ic->streams[stream_index]->codec;
2038
2039     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2040
2041     codec = avcodec_find_decoder(avctx->codec_id);
2042     avctx->debug_mv          = debug_mv;
2043     avctx->workaround_bugs   = workaround_bugs;
2044     avctx->idct_algo         = idct;
2045     avctx->skip_frame        = skip_frame;
2046     avctx->skip_idct         = skip_idct;
2047     avctx->skip_loop_filter  = skip_loop_filter;
2048     avctx->error_concealment = error_concealment;
2049
2050     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2051
2052     if (!av_dict_get(opts, "threads", NULL, 0))
2053         av_dict_set(&opts, "threads", "auto", 0);
2054     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2055         av_dict_set(&opts, "refcounted_frames", "1", 0);
2056     if (!codec ||
2057         avcodec_open2(avctx, codec, &opts) < 0)
2058         return -1;
2059     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2060         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2061         return AVERROR_OPTION_NOT_FOUND;
2062     }
2063
2064     /* prepare audio output */
2065     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2066         is->sdl_sample_rate = avctx->sample_rate;
2067
2068         if (!avctx->channel_layout)
2069             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2070         if (!avctx->channel_layout) {
2071             fprintf(stderr, "unable to guess channel layout\n");
2072             return -1;
2073         }
2074         if (avctx->channels == 1)
2075             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2076         else
2077             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2078         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2079
2080         wanted_spec.format = AUDIO_S16SYS;
2081         wanted_spec.freq = is->sdl_sample_rate;
2082         wanted_spec.channels = is->sdl_channels;
2083         wanted_spec.silence = 0;
2084         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2085         wanted_spec.callback = sdl_audio_callback;
2086         wanted_spec.userdata = is;
2087         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2088             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2089             return -1;
2090         }
2091         is->audio_hw_buf_size = spec.size;
2092         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2093         is->resample_sample_fmt     = is->sdl_sample_fmt;
2094         is->resample_channel_layout = avctx->channel_layout;
2095         is->resample_sample_rate    = avctx->sample_rate;
2096     }
2097
2098     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2099     switch (avctx->codec_type) {
2100     case AVMEDIA_TYPE_AUDIO:
2101         is->audio_stream = stream_index;
2102         is->audio_st = ic->streams[stream_index];
2103         is->audio_buf_size  = 0;
2104         is->audio_buf_index = 0;
2105
2106         /* init averaging filter */
2107         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2108         is->audio_diff_avg_count = 0;
2109         /* since we do not have a precise anough audio fifo fullness,
2110            we correct audio sync only if larger than this threshold */
2111         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2112
2113         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2114         packet_queue_init(&is->audioq);
2115         SDL_PauseAudio(0);
2116         break;
2117     case AVMEDIA_TYPE_VIDEO:
2118         is->video_stream = stream_index;
2119         is->video_st = ic->streams[stream_index];
2120
2121         packet_queue_init(&is->videoq);
2122         is->video_tid = SDL_CreateThread(video_thread, is);
2123         break;
2124     case AVMEDIA_TYPE_SUBTITLE:
2125         is->subtitle_stream = stream_index;
2126         is->subtitle_st = ic->streams[stream_index];
2127         packet_queue_init(&is->subtitleq);
2128
2129         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2130         break;
2131     default:
2132         break;
2133     }
2134     return 0;
2135 }
2136
2137 static void stream_component_close(VideoState *is, int stream_index)
2138 {
2139     AVFormatContext *ic = is->ic;
2140     AVCodecContext *avctx;
2141
2142     if (stream_index < 0 || stream_index >= ic->nb_streams)
2143         return;
2144     avctx = ic->streams[stream_index]->codec;
2145
2146     switch (avctx->codec_type) {
2147     case AVMEDIA_TYPE_AUDIO:
2148         packet_queue_abort(&is->audioq);
2149
2150         SDL_CloseAudio();
2151
2152         packet_queue_end(&is->audioq);
2153         av_free_packet(&is->audio_pkt);
2154         if (is->avr)
2155             avresample_free(&is->avr);
2156         av_freep(&is->audio_buf1);
2157         is->audio_buf = NULL;
2158         avcodec_free_frame(&is->frame);
2159
2160         if (is->rdft) {
2161             av_rdft_end(is->rdft);
2162             av_freep(&is->rdft_data);
2163             is->rdft = NULL;
2164             is->rdft_bits = 0;
2165         }
2166         break;
2167     case AVMEDIA_TYPE_VIDEO:
2168         packet_queue_abort(&is->videoq);
2169
2170         /* note: we also signal this mutex to make sure we deblock the
2171            video thread in all cases */
2172         SDL_LockMutex(is->pictq_mutex);
2173         SDL_CondSignal(is->pictq_cond);
2174         SDL_UnlockMutex(is->pictq_mutex);
2175
2176         SDL_WaitThread(is->video_tid, NULL);
2177
2178         packet_queue_end(&is->videoq);
2179         break;
2180     case AVMEDIA_TYPE_SUBTITLE:
2181         packet_queue_abort(&is->subtitleq);
2182
2183         /* note: we also signal this mutex to make sure we deblock the
2184            video thread in all cases */
2185         SDL_LockMutex(is->subpq_mutex);
2186         is->subtitle_stream_changed = 1;
2187
2188         SDL_CondSignal(is->subpq_cond);
2189         SDL_UnlockMutex(is->subpq_mutex);
2190
2191         SDL_WaitThread(is->subtitle_tid, NULL);
2192
2193         packet_queue_end(&is->subtitleq);
2194         break;
2195     default:
2196         break;
2197     }
2198
2199     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2200     avcodec_close(avctx);
2201     switch (avctx->codec_type) {
2202     case AVMEDIA_TYPE_AUDIO:
2203         is->audio_st = NULL;
2204         is->audio_stream = -1;
2205         break;
2206     case AVMEDIA_TYPE_VIDEO:
2207         is->video_st = NULL;
2208         is->video_stream = -1;
2209         break;
2210     case AVMEDIA_TYPE_SUBTITLE:
2211         is->subtitle_st = NULL;
2212         is->subtitle_stream = -1;
2213         break;
2214     default:
2215         break;
2216     }
2217 }
2218
2219 /* since we have only one decoding thread, we can use a global
2220    variable instead of a thread local variable */
2221 static VideoState *global_video_state;
2222
2223 static int decode_interrupt_cb(void *ctx)
2224 {
2225     return global_video_state && global_video_state->abort_request;
2226 }
2227
2228 /* this thread gets the stream from the disk or the network */
2229 static int decode_thread(void *arg)
2230 {
2231     VideoState *is = arg;
2232     AVFormatContext *ic = NULL;
2233     int err, i, ret;
2234     int st_index[AVMEDIA_TYPE_NB];
2235     AVPacket pkt1, *pkt = &pkt1;
2236     int eof = 0;
2237     int pkt_in_play_range = 0;
2238     AVDictionaryEntry *t;
2239     AVDictionary **opts;
2240     int orig_nb_streams;
2241
2242     memset(st_index, -1, sizeof(st_index));
2243     is->video_stream = -1;
2244     is->audio_stream = -1;
2245     is->subtitle_stream = -1;
2246
2247     global_video_state = is;
2248
2249     ic = avformat_alloc_context();
2250     ic->interrupt_callback.callback = decode_interrupt_cb;
2251     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2252     if (err < 0) {
2253         print_error(is->filename, err);
2254         ret = -1;
2255         goto fail;
2256     }
2257     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2258         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2259         ret = AVERROR_OPTION_NOT_FOUND;
2260         goto fail;
2261     }
2262     is->ic = ic;
2263
2264     if (genpts)
2265         ic->flags |= AVFMT_FLAG_GENPTS;
2266
2267     opts = setup_find_stream_info_opts(ic, codec_opts);
2268     orig_nb_streams = ic->nb_streams;
2269
2270     err = avformat_find_stream_info(ic, opts);
2271     if (err < 0) {
2272         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2273         ret = -1;
2274         goto fail;
2275     }
2276     for (i = 0; i < orig_nb_streams; i++)
2277         av_dict_free(&opts[i]);
2278     av_freep(&opts);
2279
2280     if (ic->pb)
2281         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2282
2283     if (seek_by_bytes < 0)
2284         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2285
2286     /* if seeking requested, we execute it */
2287     if (start_time != AV_NOPTS_VALUE) {
2288         int64_t timestamp;
2289
2290         timestamp = start_time;
2291         /* add the stream start time */
2292         if (ic->start_time != AV_NOPTS_VALUE)
2293             timestamp += ic->start_time;
2294         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2295         if (ret < 0) {
2296             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2297                     is->filename, (double)timestamp / AV_TIME_BASE);
2298         }
2299     }
2300
2301     for (i = 0; i < ic->nb_streams; i++)
2302         ic->streams[i]->discard = AVDISCARD_ALL;
2303     if (!video_disable)
2304         st_index[AVMEDIA_TYPE_VIDEO] =
2305             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2306                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2307     if (!audio_disable)
2308         st_index[AVMEDIA_TYPE_AUDIO] =
2309             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2310                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2311                                 st_index[AVMEDIA_TYPE_VIDEO],
2312                                 NULL, 0);
2313     if (!video_disable)
2314         st_index[AVMEDIA_TYPE_SUBTITLE] =
2315             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2316                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2317                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2318                                  st_index[AVMEDIA_TYPE_AUDIO] :
2319                                  st_index[AVMEDIA_TYPE_VIDEO]),
2320                                 NULL, 0);
2321     if (show_status) {
2322         av_dump_format(ic, 0, is->filename, 0);
2323     }
2324
2325     /* open the streams */
2326     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2327         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2328     }
2329
2330     ret = -1;
2331     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2332         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2333     }
2334     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2335     if (ret < 0) {
2336         if (!display_disable)
2337             is->show_audio = 2;
2338     }
2339
2340     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2341         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2342     }
2343
2344     if (is->video_stream < 0 && is->audio_stream < 0) {
2345         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2346         ret = -1;
2347         goto fail;
2348     }
2349
2350     for (;;) {
2351         if (is->abort_request)
2352             break;
2353         if (is->paused != is->last_paused) {
2354             is->last_paused = is->paused;
2355             if (is->paused)
2356                 is->read_pause_return = av_read_pause(ic);
2357             else
2358                 av_read_play(ic);
2359         }
2360 #if CONFIG_RTSP_DEMUXER
2361         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2362             /* wait 10 ms to avoid trying to get another packet */
2363             /* XXX: horrible */
2364             SDL_Delay(10);
2365             continue;
2366         }
2367 #endif
2368         if (is->seek_req) {
2369             int64_t seek_target = is->seek_pos;
2370             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2371             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2372 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2373 //      of the seek_pos/seek_rel variables
2374
2375             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2376             if (ret < 0) {
2377                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2378             } else {
2379                 if (is->audio_stream >= 0) {
2380                     packet_queue_flush(&is->audioq);
2381                     packet_queue_put(&is->audioq, &flush_pkt);
2382                 }
2383                 if (is->subtitle_stream >= 0) {
2384                     packet_queue_flush(&is->subtitleq);
2385                     packet_queue_put(&is->subtitleq, &flush_pkt);
2386                 }
2387                 if (is->video_stream >= 0) {
2388                     packet_queue_flush(&is->videoq);
2389                     packet_queue_put(&is->videoq, &flush_pkt);
2390                 }
2391             }
2392             is->seek_req = 0;
2393             eof = 0;
2394         }
2395
2396         /* if the queue are full, no need to read more */
2397         if (!infinite_buffer &&
2398               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2399             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2400                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2401                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2402             /* wait 10 ms */
2403             SDL_Delay(10);
2404             continue;
2405         }
2406         if (eof) {
2407             if (is->video_stream >= 0) {
2408                 av_init_packet(pkt);
2409                 pkt->data = NULL;
2410                 pkt->size = 0;
2411                 pkt->stream_index = is->video_stream;
2412                 packet_queue_put(&is->videoq, pkt);
2413             }
2414             if (is->audio_stream >= 0 &&
2415                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2416                 av_init_packet(pkt);
2417                 pkt->data = NULL;
2418                 pkt->size = 0;
2419                 pkt->stream_index = is->audio_stream;
2420                 packet_queue_put(&is->audioq, pkt);
2421             }
2422             SDL_Delay(10);
2423             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2424                 if (loop != 1 && (!loop || --loop)) {
2425                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2426                 } else if (autoexit) {
2427                     ret = AVERROR_EOF;
2428                     goto fail;
2429                 }
2430             }
2431             continue;
2432         }
2433         ret = av_read_frame(ic, pkt);
2434         if (ret < 0) {
2435             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2436                 eof = 1;
2437             if (ic->pb && ic->pb->error)
2438                 break;
2439             SDL_Delay(100); /* wait for user event */
2440             continue;
2441         }
2442         /* check if packet is in play range specified by user, then queue, otherwise discard */
2443         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2444                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2445                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2446                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2447                 <= ((double)duration / 1000000);
2448         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2449             packet_queue_put(&is->audioq, pkt);
2450         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2451             packet_queue_put(&is->videoq, pkt);
2452         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2453             packet_queue_put(&is->subtitleq, pkt);
2454         } else {
2455             av_free_packet(pkt);
2456         }
2457     }
2458     /* wait until the end */
2459     while (!is->abort_request) {
2460         SDL_Delay(100);
2461     }
2462
2463     ret = 0;
2464  fail:
2465     /* disable interrupting */
2466     global_video_state = NULL;
2467
2468     /* close each stream */
2469     if (is->audio_stream >= 0)
2470         stream_component_close(is, is->audio_stream);
2471     if (is->video_stream >= 0)
2472         stream_component_close(is, is->video_stream);
2473     if (is->subtitle_stream >= 0)
2474         stream_component_close(is, is->subtitle_stream);
2475     if (is->ic) {
2476         avformat_close_input(&is->ic);
2477     }
2478
2479     if (ret != 0) {
2480         SDL_Event event;
2481
2482         event.type = FF_QUIT_EVENT;
2483         event.user.data1 = is;
2484         SDL_PushEvent(&event);
2485     }
2486     return 0;
2487 }
2488
2489 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2490 {
2491     VideoState *is;
2492
2493     is = av_mallocz(sizeof(VideoState));
2494     if (!is)
2495         return NULL;
2496     av_strlcpy(is->filename, filename, sizeof(is->filename));
2497     is->iformat = iformat;
2498     is->ytop    = 0;
2499     is->xleft   = 0;
2500
2501     /* start video display */
2502     is->pictq_mutex = SDL_CreateMutex();
2503     is->pictq_cond  = SDL_CreateCond();
2504
2505     is->subpq_mutex = SDL_CreateMutex();
2506     is->subpq_cond  = SDL_CreateCond();
2507
2508     is->av_sync_type = av_sync_type;
2509     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2510     if (!is->parse_tid) {
2511         av_free(is);
2512         return NULL;
2513     }
2514     return is;
2515 }
2516
2517 static void stream_cycle_channel(VideoState *is, int codec_type)
2518 {
2519     AVFormatContext *ic = is->ic;
2520     int start_index, stream_index;
2521     AVStream *st;
2522
2523     if (codec_type == AVMEDIA_TYPE_VIDEO)
2524         start_index = is->video_stream;
2525     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2526         start_index = is->audio_stream;
2527     else
2528         start_index = is->subtitle_stream;
2529     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2530         return;
2531     stream_index = start_index;
2532     for (;;) {
2533         if (++stream_index >= is->ic->nb_streams)
2534         {
2535             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2536             {
2537                 stream_index = -1;
2538                 goto the_end;
2539             } else
2540                 stream_index = 0;
2541         }
2542         if (stream_index == start_index)
2543             return;
2544         st = ic->streams[stream_index];
2545         if (st->codec->codec_type == codec_type) {
2546             /* check that parameters are OK */
2547             switch (codec_type) {
2548             case AVMEDIA_TYPE_AUDIO:
2549                 if (st->codec->sample_rate != 0 &&
2550                     st->codec->channels != 0)
2551                     goto the_end;
2552                 break;
2553             case AVMEDIA_TYPE_VIDEO:
2554             case AVMEDIA_TYPE_SUBTITLE:
2555                 goto the_end;
2556             default:
2557                 break;
2558             }
2559         }
2560     }
2561  the_end:
2562     stream_component_close(is, start_index);
2563     stream_component_open(is, stream_index);
2564 }
2565
2566
2567 static void toggle_full_screen(void)
2568 {
2569 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2570     /* OS X needs to empty the picture_queue */
2571     int i;
2572     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2573         cur_stream->pictq[i].reallocate = 1;
2574 #endif
2575     is_full_screen = !is_full_screen;
2576     video_open(cur_stream);
2577 }
2578
2579 static void toggle_pause(void)
2580 {
2581     if (cur_stream)
2582         stream_pause(cur_stream);
2583     step = 0;
2584 }
2585
2586 static void step_to_next_frame(void)
2587 {
2588     if (cur_stream) {
2589         /* if the stream is paused unpause it, then step */
2590         if (cur_stream->paused)
2591             stream_pause(cur_stream);
2592     }
2593     step = 1;
2594 }
2595
2596 static void toggle_audio_display(void)
2597 {
2598     if (cur_stream) {
2599         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2600         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2601         fill_rectangle(screen,
2602                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2603                        bgcolor);
2604         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2605     }
2606 }
2607
2608 /* handle an event sent by the GUI */
2609 static void event_loop(void)
2610 {
2611     SDL_Event event;
2612     double incr, pos, frac;
2613
2614     for (;;) {
2615         double x;
2616         SDL_WaitEvent(&event);
2617         switch (event.type) {
2618         case SDL_KEYDOWN:
2619             if (exit_on_keydown) {
2620                 do_exit();
2621                 break;
2622             }
2623             switch (event.key.keysym.sym) {
2624             case SDLK_ESCAPE:
2625             case SDLK_q:
2626                 do_exit();
2627                 break;
2628             case SDLK_f:
2629                 toggle_full_screen();
2630                 break;
2631             case SDLK_p:
2632             case SDLK_SPACE:
2633                 toggle_pause();
2634                 break;
2635             case SDLK_s: // S: Step to next frame
2636                 step_to_next_frame();
2637                 break;
2638             case SDLK_a:
2639                 if (cur_stream)
2640                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2641                 break;
2642             case SDLK_v:
2643                 if (cur_stream)
2644                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2645                 break;
2646             case SDLK_t:
2647                 if (cur_stream)
2648                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2649                 break;
2650             case SDLK_w:
2651                 toggle_audio_display();
2652                 break;
2653             case SDLK_LEFT:
2654                 incr = -10.0;
2655                 goto do_seek;
2656             case SDLK_RIGHT:
2657                 incr = 10.0;
2658                 goto do_seek;
2659             case SDLK_UP:
2660                 incr = 60.0;
2661                 goto do_seek;
2662             case SDLK_DOWN:
2663                 incr = -60.0;
2664             do_seek:
2665                 if (cur_stream) {
2666                     if (seek_by_bytes) {
2667                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2668                             pos = cur_stream->video_current_pos;
2669                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2670                             pos = cur_stream->audio_pkt.pos;
2671                         } else
2672                             pos = avio_tell(cur_stream->ic->pb);
2673                         if (cur_stream->ic->bit_rate)
2674                             incr *= cur_stream->ic->bit_rate / 8.0;
2675                         else
2676                             incr *= 180000.0;
2677                         pos += incr;
2678                         stream_seek(cur_stream, pos, incr, 1);
2679                     } else {
2680                         pos = get_master_clock(cur_stream);
2681                         pos += incr;
2682                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2683                     }
2684                 }
2685                 break;
2686             default:
2687                 break;
2688             }
2689             break;
2690         case SDL_MOUSEBUTTONDOWN:
2691             if (exit_on_mousedown) {
2692                 do_exit();
2693                 break;
2694             }
2695         case SDL_MOUSEMOTION:
2696             if (event.type == SDL_MOUSEBUTTONDOWN) {
2697                 x = event.button.x;
2698             } else {
2699                 if (event.motion.state != SDL_PRESSED)
2700                     break;
2701                 x = event.motion.x;
2702             }
2703             if (cur_stream) {
2704                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2705                     uint64_t size =  avio_size(cur_stream->ic->pb);
2706                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2707                 } else {
2708                     int64_t ts;
2709                     int ns, hh, mm, ss;
2710                     int tns, thh, tmm, tss;
2711                     tns  = cur_stream->ic->duration / 1000000LL;
2712                     thh  = tns / 3600;
2713                     tmm  = (tns % 3600) / 60;
2714                     tss  = (tns % 60);
2715                     frac = x / cur_stream->width;
2716                     ns   = frac * tns;
2717                     hh   = ns / 3600;
2718                     mm   = (ns % 3600) / 60;
2719                     ss   = (ns % 60);
2720                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2721                             hh, mm, ss, thh, tmm, tss);
2722                     ts = frac * cur_stream->ic->duration;
2723                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2724                         ts += cur_stream->ic->start_time;
2725                     stream_seek(cur_stream, ts, 0, 0);
2726                 }
2727             }
2728             break;
2729         case SDL_VIDEORESIZE:
2730             if (cur_stream) {
2731                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2732                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2733                 screen_width  = cur_stream->width  = event.resize.w;
2734                 screen_height = cur_stream->height = event.resize.h;
2735             }
2736             break;
2737         case SDL_QUIT:
2738         case FF_QUIT_EVENT:
2739             do_exit();
2740             break;
2741         case FF_ALLOC_EVENT:
2742             video_open(event.user.data1);
2743             alloc_picture(event.user.data1);
2744             break;
2745         case FF_REFRESH_EVENT:
2746             video_refresh_timer(event.user.data1);
2747             cur_stream->refresh = 0;
2748             break;
2749         default:
2750             break;
2751         }
2752     }
2753 }
2754
2755 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2756 {
2757     av_log(NULL, AV_LOG_ERROR,
2758            "Option '%s' has been removed, use private format options instead\n", opt);
2759     return AVERROR(EINVAL);
2760 }
2761
2762 static int opt_width(void *optctx, const char *opt, const char *arg)
2763 {
2764     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2765     return 0;
2766 }
2767
2768 static int opt_height(void *optctx, const char *opt, const char *arg)
2769 {
2770     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2771     return 0;
2772 }
2773
2774 static int opt_format(void *optctx, const char *opt, const char *arg)
2775 {
2776     file_iformat = av_find_input_format(arg);
2777     if (!file_iformat) {
2778         fprintf(stderr, "Unknown input format: %s\n", arg);
2779         return AVERROR(EINVAL);
2780     }
2781     return 0;
2782 }
2783
2784 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2785 {
2786     av_log(NULL, AV_LOG_ERROR,
2787            "Option '%s' has been removed, use private format options instead\n", opt);
2788     return AVERROR(EINVAL);
2789 }
2790
2791 static int opt_sync(void *optctx, const char *opt, const char *arg)
2792 {
2793     if (!strcmp(arg, "audio"))
2794         av_sync_type = AV_SYNC_AUDIO_MASTER;
2795     else if (!strcmp(arg, "video"))
2796         av_sync_type = AV_SYNC_VIDEO_MASTER;
2797     else if (!strcmp(arg, "ext"))
2798         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2799     else {
2800         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2801         exit(1);
2802     }
2803     return 0;
2804 }
2805
2806 static int opt_seek(void *optctx, const char *opt, const char *arg)
2807 {
2808     start_time = parse_time_or_die(opt, arg, 1);
2809     return 0;
2810 }
2811
2812 static int opt_duration(void *optctx, const char *opt, const char *arg)
2813 {
2814     duration = parse_time_or_die(opt, arg, 1);
2815     return 0;
2816 }
2817
2818 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2819 {
2820     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2821     return 0;
2822 }
2823
2824 static const OptionDef options[] = {
2825 #include "cmdutils_common_opts.h"
2826     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2827     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2828     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2829     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2830     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2831     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2832     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2833     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2834     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2835     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2836     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2837     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2838     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2839     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2840     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2841     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2842     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2843     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2844     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2845     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2846     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2847     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2848     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2849     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2850     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2851     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2852     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2853     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2854     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2855     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2856     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2857     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2858     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2859     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2860 #if CONFIG_AVFILTER
2861     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2862 #endif
2863     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2864     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2865     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2866     { NULL, },
2867 };
2868
2869 static void show_usage(void)
2870 {
2871     printf("Simple media player\n");
2872     printf("usage: %s [options] input_file\n", program_name);
2873     printf("\n");
2874 }
2875
2876 void show_help_default(const char *opt, const char *arg)
2877 {
2878     av_log_set_callback(log_callback_help);
2879     show_usage();
2880     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2881     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2882     printf("\n");
2883     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2884     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2885 #if !CONFIG_AVFILTER
2886     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2887 #endif
2888     printf("\nWhile playing:\n"
2889            "q, ESC              quit\n"
2890            "f                   toggle full screen\n"
2891            "p, SPC              pause\n"
2892            "a                   cycle audio channel\n"
2893            "v                   cycle video channel\n"
2894            "t                   cycle subtitle channel\n"
2895            "w                   show audio waves\n"
2896            "s                   activate frame-step mode\n"
2897            "left/right          seek backward/forward 10 seconds\n"
2898            "down/up             seek backward/forward 1 minute\n"
2899            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2900            );
2901 }
2902
2903 static void opt_input_file(void *optctx, const char *filename)
2904 {
2905     if (input_filename) {
2906         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2907                 filename, input_filename);
2908         exit(1);
2909     }
2910     if (!strcmp(filename, "-"))
2911         filename = "pipe:";
2912     input_filename = filename;
2913 }
2914
2915 /* Called from the main */
2916 int main(int argc, char **argv)
2917 {
2918     int flags;
2919
2920     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2921     parse_loglevel(argc, argv, options);
2922
2923     /* register all codecs, demux and protocols */
2924     avcodec_register_all();
2925 #if CONFIG_AVDEVICE
2926     avdevice_register_all();
2927 #endif
2928 #if CONFIG_AVFILTER
2929     avfilter_register_all();
2930 #endif
2931     av_register_all();
2932     avformat_network_init();
2933
2934     init_opts();
2935
2936     show_banner();
2937
2938     parse_options(NULL, argc, argv, options, opt_input_file);
2939
2940     if (!input_filename) {
2941         show_usage();
2942         fprintf(stderr, "An input file must be specified\n");
2943         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2944         exit(1);
2945     }
2946
2947     if (display_disable) {
2948         video_disable = 1;
2949     }
2950     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2951 #if !defined(__MINGW32__) && !defined(__APPLE__)
2952     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2953 #endif
2954     if (SDL_Init (flags)) {
2955         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2956         exit(1);
2957     }
2958
2959     if (!display_disable) {
2960         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2961         fs_screen_width = vi->current_w;
2962         fs_screen_height = vi->current_h;
2963     }
2964
2965     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2966     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2967     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2968
2969     av_init_packet(&flush_pkt);
2970     flush_pkt.data = "FLUSH";
2971
2972     cur_stream = stream_open(input_filename, file_iformat);
2973
2974     event_loop();
2975
2976     /* never returns */
2977
2978     return 0;
2979 }