]> git.sesse.net Git - ffmpeg/blob - avplay.c
lavc: Add data and linesize to AVSubtitleRect
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/display.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/dict.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/samplefmt.h"
37 #include "libavutil/time.h"
38 #include "libavformat/avformat.h"
39 #include "libavdevice/avdevice.h"
40 #include "libswscale/swscale.h"
41 #include "libavresample/avresample.h"
42 #include "libavutil/opt.h"
43 #include "libavcodec/avfft.h"
44
45 #if CONFIG_AVFILTER
46 # include "libavfilter/avfilter.h"
47 # include "libavfilter/buffersink.h"
48 # include "libavfilter/buffersrc.h"
49 #endif
50
51 #include "cmdutils.h"
52
53 #include <SDL.h>
54 #include <SDL_thread.h>
55
56 #ifdef __MINGW32__
57 #undef main /* We don't want SDL to override our main() */
58 #endif
59
60 #include <assert.h>
61
62 const char program_name[] = "avplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2 * 65536)
88
89 static int64_t sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;             // presentation timestamp for this picture
105     double target_clock;    // av_gettime_relative() time at which this should be displayed ideally
106     int64_t pos;            // byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     int reallocate;
111     enum AVPixelFormat pix_fmt;
112
113     AVRational sar;
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     int sdl_sample_rate;
168     enum AVSampleFormat resample_sample_fmt;
169     uint64_t resample_channel_layout;
170     int resample_sample_rate;
171     AVAudioResampleContext *avr;
172     AVFrame *frame;
173
174     int show_audio; /* if true, display audio samples */
175     int16_t sample_array[SAMPLE_ARRAY_SIZE];
176     int sample_array_index;
177     int last_i_start;
178     RDFTContext *rdft;
179     int rdft_bits;
180     FFTSample *rdft_data;
181     int xpos;
182
183     SDL_Thread *subtitle_tid;
184     int subtitle_stream;
185     int subtitle_stream_changed;
186     AVStream *subtitle_st;
187     PacketQueue subtitleq;
188     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
189     int subpq_size, subpq_rindex, subpq_windex;
190     SDL_mutex *subpq_mutex;
191     SDL_cond *subpq_cond;
192
193     double frame_timer;
194     double frame_last_pts;
195     double frame_last_delay;
196     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
197     int video_stream;
198     AVStream *video_st;
199     PacketQueue videoq;
200     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
201     double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
202     int64_t video_current_pos;      // current displayed file pos
203     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204     int pictq_size, pictq_rindex, pictq_windex;
205     SDL_mutex *pictq_mutex;
206     SDL_cond *pictq_cond;
207 #if !CONFIG_AVFILTER
208     struct SwsContext *img_convert_ctx;
209 #endif
210
211     //    QETimer *video_timer;
212     char filename[1024];
213     int width, height, xleft, ytop;
214
215     PtsCorrectionContext pts_ctx;
216
217 #if CONFIG_AVFILTER
218     AVFilterContext *in_video_filter;   // the first filter in the video chain
219     AVFilterContext *out_video_filter;  // the last filter in the video chain
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int step = 0;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts = -1;
258 static int noautoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop = 1;
262 static int framedrop = 1;
263 static int infinite_buffer = 0;
264
265 static int rdftspeed = 20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269 static int autorotate = 1;
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_free_packet(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     /* duplicate the packet */
324     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
325         return -1;
326
327     pkt1 = av_malloc(sizeof(AVPacketList));
328     if (!pkt1)
329         return -1;
330     pkt1->pkt = *pkt;
331     pkt1->next = NULL;
332
333
334     SDL_LockMutex(q->mutex);
335
336     if (!q->last_pkt)
337
338         q->first_pkt = pkt1;
339     else
340         q->last_pkt->next = pkt1;
341     q->last_pkt = pkt1;
342     q->nb_packets++;
343     q->size += pkt1->pkt.size + sizeof(*pkt1);
344     /* XXX: should duplicate packet data in DV case */
345     SDL_CondSignal(q->cond);
346
347     SDL_UnlockMutex(q->mutex);
348     return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for (;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414     unsigned int v = ((const uint32_t *)(s))[0];\
415     a = (v >> 24) & 0xff;\
416     r = (v >> 16) & 0xff;\
417     g = (v >> 8) & 0xff;\
418     b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424     a = (val >> 24) & 0xff;\
425     y = (val >> 16) & 0xff;\
426     u = (val >> 8) & 0xff;\
427     v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 {
440     int wrap, wrap3, width2, skip2;
441     int y, u, v, a, u1, v1, a1, w, h;
442     uint8_t *lum, *cb, *cr;
443     const uint8_t *p;
444     const uint32_t *pal;
445     int dstx, dsty, dstw, dsth;
446
447     dstw = av_clip(rect->w, 0, imgw);
448     dsth = av_clip(rect->h, 0, imgh);
449     dstx = av_clip(rect->x, 0, imgw - dstw);
450     dsty = av_clip(rect->y, 0, imgh - dsth);
451     lum = dst->data[0] + dsty * dst->linesize[0];
452     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
453     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454
455     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
456     skip2 = dstx >> 1;
457     wrap = dst->linesize[0];
458     wrap3 = rect->linesize[0];
459     p = rect->data[0];
460     pal = (const uint32_t *)rect->data[1];  /* Now in YCrCb! */
461
462     if (dsty & 1) {
463         lum += dstx;
464         cb += skip2;
465         cr += skip2;
466
467         if (dstx & 1) {
468             YUVA_IN(y, u, v, a, p, pal);
469             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
471             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
472             cb++;
473             cr++;
474             lum++;
475             p += BPP;
476         }
477         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
478             YUVA_IN(y, u, v, a, p, pal);
479             u1 = u;
480             v1 = v;
481             a1 = a;
482             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
484             YUVA_IN(y, u, v, a, p + BPP, pal);
485             u1 += u;
486             v1 += v;
487             a1 += a;
488             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
489             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
490             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
491             cb++;
492             cr++;
493             p += 2 * BPP;
494             lum += 2;
495         }
496         if (w) {
497             YUVA_IN(y, u, v, a, p, pal);
498             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
500             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
501             p++;
502             lum++;
503         }
504         p += wrap3 - dstw * BPP;
505         lum += wrap - dstw - dstx;
506         cb += dst->linesize[1] - width2 - skip2;
507         cr += dst->linesize[2] - width2 - skip2;
508     }
509     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
510         lum += dstx;
511         cb += skip2;
512         cr += skip2;
513
514         if (dstx & 1) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             p += wrap3;
521             lum += wrap;
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += -wrap3 + BPP;
532             lum += -wrap + 1;
533         }
534         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
535             YUVA_IN(y, u, v, a, p, pal);
536             u1 = u;
537             v1 = v;
538             a1 = a;
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541             YUVA_IN(y, u, v, a, p + BPP, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546             p += wrap3;
547             lum += wrap;
548
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555             YUVA_IN(y, u, v, a, p + BPP, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560
561             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
562             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563
564             cb++;
565             cr++;
566             p += -wrap3 + 2 * BPP;
567             lum += -wrap + 2;
568         }
569         if (w) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             p += wrap3;
576             lum += wrap;
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 += u;
579             v1 += v;
580             a1 += a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
583             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
584             cb++;
585             cr++;
586             p += -wrap3 + BPP;
587             lum += -wrap + 1;
588         }
589         p += wrap3 + (wrap3 - dstw * BPP);
590         lum += wrap + (wrap - dstw - dstx);
591         cb += dst->linesize[1] - width2 - skip2;
592         cr += dst->linesize[2] - width2 - skip2;
593     }
594     /* handle odd height */
595     if (h) {
596         lum += dstx;
597         cb += skip2;
598         cr += skip2;
599
600         if (dstx & 1) {
601             YUVA_IN(y, u, v, a, p, pal);
602             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605             cb++;
606             cr++;
607             lum++;
608             p += BPP;
609         }
610         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
617             YUVA_IN(y, u, v, a, p + BPP, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
624             cb++;
625             cr++;
626             p += 2 * BPP;
627             lum += 2;
628         }
629         if (w) {
630             YUVA_IN(y, u, v, a, p, pal);
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
633             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634         }
635     }
636 }
637
638 static void free_subpicture(SubPicture *sp)
639 {
640     avsubtitle_free(&sp->sub);
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645     VideoPicture *vp;
646     SubPicture *sp;
647     AVPicture pict;
648     float aspect_ratio;
649     int width, height, x, y;
650     SDL_Rect rect;
651     int i;
652
653     vp = &is->pictq[is->pictq_rindex];
654     if (vp->bmp) {
655 #if CONFIG_AVFILTER
656          if (!vp->sar.num)
657              aspect_ratio = 0;
658          else
659              aspect_ratio = av_q2d(vp->sar);
660 #else
661
662         /* XXX: use variable in the frame */
663         if (is->video_st->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665         else if (is->video_st->codec->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667         else
668             aspect_ratio = 0;
669 #endif
670         if (aspect_ratio <= 0.0)
671             aspect_ratio = 1.0;
672         aspect_ratio *= (float)vp->width / (float)vp->height;
673
674         if (is->subtitle_st)
675         {
676             if (is->subpq_size > 0)
677             {
678                 sp = &is->subpq[is->subpq_rindex];
679
680                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
681                 {
682                     SDL_LockYUVOverlay (vp->bmp);
683
684                     pict.data[0] = vp->bmp->pixels[0];
685                     pict.data[1] = vp->bmp->pixels[2];
686                     pict.data[2] = vp->bmp->pixels[1];
687
688                     pict.linesize[0] = vp->bmp->pitches[0];
689                     pict.linesize[1] = vp->bmp->pitches[2];
690                     pict.linesize[2] = vp->bmp->pitches[1];
691
692                     for (i = 0; i < sp->sub.num_rects; i++)
693                         blend_subrect(&pict, sp->sub.rects[i],
694                                       vp->bmp->w, vp->bmp->h);
695
696                     SDL_UnlockYUVOverlay (vp->bmp);
697                 }
698             }
699         }
700
701
702         /* XXX: we suppose the screen has a 1.0 pixel ratio */
703         height = is->height;
704         width = ((int)rint(height * aspect_ratio)) & ~1;
705         if (width > is->width) {
706             width = is->width;
707             height = ((int)rint(width / aspect_ratio)) & ~1;
708         }
709         x = (is->width - width) / 2;
710         y = (is->height - height) / 2;
711         is->no_background = 0;
712         rect.x = is->xleft + x;
713         rect.y = is->ytop  + y;
714         rect.w = width;
715         rect.h = height;
716         SDL_DisplayYUVOverlay(vp->bmp, &rect);
717     }
718 }
719
720 /* get the current audio output buffer size, in samples. With SDL, we
721    cannot have a precise information */
722 static int audio_write_get_buf_size(VideoState *is)
723 {
724     return is->audio_buf_size - is->audio_buf_index;
725 }
726
727 static inline int compute_mod(int a, int b)
728 {
729     a = a % b;
730     if (a >= 0)
731         return a;
732     else
733         return a + b;
734 }
735
736 static void video_audio_display(VideoState *s)
737 {
738     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
739     int ch, channels, h, h2, bgcolor, fgcolor;
740     int16_t time_diff;
741     int rdft_bits, nb_freq;
742
743     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
744         ;
745     nb_freq = 1 << (rdft_bits - 1);
746
747     /* compute display index : center on currently output samples */
748     channels = s->sdl_channels;
749     nb_display_channels = channels;
750     if (!s->paused) {
751         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
752         n = 2 * channels;
753         delay = audio_write_get_buf_size(s);
754         delay /= n;
755
756         /* to be more precise, we take into account the time spent since
757            the last buffer computation */
758         if (audio_callback_time) {
759             time_diff = av_gettime_relative() - audio_callback_time;
760             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
761         }
762
763         delay += 2 * data_used;
764         if (delay < data_used)
765             delay = data_used;
766
767         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
768         if (s->show_audio == 1) {
769             h = INT_MIN;
770             for (i = 0; i < 1000; i += channels) {
771                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
772                 int a = s->sample_array[idx];
773                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
774                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
775                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
776                 int score = a - d;
777                 if (h < score && (b ^ c) < 0) {
778                     h = score;
779                     i_start = idx;
780                 }
781             }
782         }
783
784         s->last_i_start = i_start;
785     } else {
786         i_start = s->last_i_start;
787     }
788
789     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
790     if (s->show_audio == 1) {
791         fill_rectangle(screen,
792                        s->xleft, s->ytop, s->width, s->height,
793                        bgcolor);
794
795         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797         /* total height for one channel */
798         h = s->height / nb_display_channels;
799         /* graph height / 2 */
800         h2 = (h * 9) / 20;
801         for (ch = 0; ch < nb_display_channels; ch++) {
802             i = i_start + ch;
803             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804             for (x = 0; x < s->width; x++) {
805                 y = (s->sample_array[i] * h2) >> 15;
806                 if (y < 0) {
807                     y = -y;
808                     ys = y1 - y;
809                 } else {
810                     ys = y1;
811                 }
812                 fill_rectangle(screen,
813                                s->xleft + x, ys, 1, y,
814                                fgcolor);
815                 i += channels;
816                 if (i >= SAMPLE_ARRAY_SIZE)
817                     i -= SAMPLE_ARRAY_SIZE;
818             }
819         }
820
821         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823         for (ch = 1; ch < nb_display_channels; ch++) {
824             y = s->ytop + ch * h;
825             fill_rectangle(screen,
826                            s->xleft, y, s->width, 1,
827                            fgcolor);
828         }
829         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830     } else {
831         nb_display_channels= FFMIN(nb_display_channels, 2);
832         if (rdft_bits != s->rdft_bits) {
833             av_rdft_end(s->rdft);
834             av_free(s->rdft_data);
835             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
836             s->rdft_bits = rdft_bits;
837             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
838         }
839         {
840             FFTSample *data[2];
841             for (ch = 0; ch < nb_display_channels; ch++) {
842                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
843                 i = i_start + ch;
844                 for (x = 0; x < 2 * nb_freq; x++) {
845                     double w = (x-nb_freq) * (1.0 / nb_freq);
846                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
847                     i += channels;
848                     if (i >= SAMPLE_ARRAY_SIZE)
849                         i -= SAMPLE_ARRAY_SIZE;
850                 }
851                 av_rdft_calc(s->rdft, data[ch]);
852             }
853             /* Least efficient way to do this, we should of course
854              * directly access it but it is more than fast enough. */
855             for (y = 0; y < s->height; y++) {
856                 double w = 1 / sqrt(nb_freq);
857                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
858                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
859                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
860                 a = FFMIN(a, 255);
861                 b = FFMIN(b, 255);
862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
863
864                 fill_rectangle(screen,
865                             s->xpos, s->height-y, 1, 1,
866                             fgcolor);
867             }
868         }
869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870         s->xpos++;
871         if (s->xpos >= s->width)
872             s->xpos= s->xleft;
873     }
874 }
875
876 static int video_open(VideoState *is)
877 {
878     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
879     int w,h;
880
881     if (is_full_screen) flags |= SDL_FULLSCREEN;
882     else                flags |= SDL_RESIZABLE;
883
884     if (is_full_screen && fs_screen_width) {
885         w = fs_screen_width;
886         h = fs_screen_height;
887     } else if (!is_full_screen && screen_width) {
888         w = screen_width;
889         h = screen_height;
890 #if CONFIG_AVFILTER
891     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
892         w = is->out_video_filter->inputs[0]->w;
893         h = is->out_video_filter->inputs[0]->h;
894 #else
895     } else if (is->video_st && is->video_st->codec->width) {
896         w = is->video_st->codec->width;
897         h = is->video_st->codec->height;
898 #endif
899     } else {
900         w = 640;
901         h = 480;
902     }
903     if (screen && is->width == screen->w && screen->w == w
904        && is->height== screen->h && screen->h == h)
905         return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911     screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913     if (!screen) {
914         fprintf(stderr, "SDL: could not set video mode - exiting\n");
915         return -1;
916     }
917     if (!window_title)
918         window_title = input_filename;
919     SDL_WM_SetCaption(window_title, window_title);
920
921     is->width  = screen->w;
922     is->height = screen->h;
923
924     return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930     if (!screen)
931         video_open(cur_stream);
932     if (is->audio_st && is->show_audio)
933         video_audio_display(is);
934     else if (is->video_st)
935         video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940     VideoState *is= opaque;
941     while (!is->abort_request) {
942         SDL_Event event;
943         event.type = FF_REFRESH_EVENT;
944         event.user.data1 = opaque;
945         if (!is->refresh) {
946             is->refresh = 1;
947             SDL_PushEvent(&event);
948         }
949         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950     }
951     return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957     double pts;
958     int hw_buf_size, bytes_per_sec;
959     pts = is->audio_clock;
960     hw_buf_size = audio_write_get_buf_size(is);
961     bytes_per_sec = 0;
962     if (is->audio_st) {
963         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
964                         av_get_bytes_per_sample(is->sdl_sample_fmt);
965     }
966     if (bytes_per_sec)
967         pts -= (double)hw_buf_size / bytes_per_sec;
968     return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974     if (is->paused) {
975         return is->video_current_pts;
976     } else {
977         return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
978     }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984     int64_t ti;
985     ti = av_gettime_relative();
986     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992     double val;
993
994     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995         if (is->video_st)
996             val = get_video_clock(is);
997         else
998             val = get_audio_clock(is);
999     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000         if (is->audio_st)
1001             val = get_audio_clock(is);
1002         else
1003             val = get_video_clock(is);
1004     } else {
1005         val = get_external_clock(is);
1006     }
1007     return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013     if (!is->seek_req) {
1014         is->seek_pos = pos;
1015         is->seek_rel = rel;
1016         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017         if (seek_by_bytes)
1018             is->seek_flags |= AVSEEK_FLAG_BYTE;
1019         is->seek_req = 1;
1020     }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026     if (is->paused) {
1027         is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028         if (is->read_pause_return != AVERROR(ENOSYS)) {
1029             is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
1030         }
1031         is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
1032     }
1033     is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038     double delay, sync_threshold, diff = 0;
1039
1040     /* compute nominal delay */
1041     delay = frame_current_pts - is->frame_last_pts;
1042     if (delay <= 0 || delay >= 10.0) {
1043         /* if incorrect delay, use previous one */
1044         delay = is->frame_last_delay;
1045     } else {
1046         is->frame_last_delay = delay;
1047     }
1048     is->frame_last_pts = frame_current_pts;
1049
1050     /* update delay to follow master synchronisation source */
1051     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053         /* if video is slave, we try to correct big delays by
1054            duplicating or deleting a frame */
1055         diff = get_video_clock(is) - get_master_clock(is);
1056
1057         /* skip or repeat frame. We take into account the
1058            delay to compute the threshold. I still don't know
1059            if it is the best guess */
1060         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062             if (diff <= -sync_threshold)
1063                 delay = 0;
1064             else if (diff >= sync_threshold)
1065                 delay = 2 * delay;
1066         }
1067     }
1068     is->frame_timer += delay;
1069
1070     av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071             delay, frame_current_pts, -diff);
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             // nothing to do, no picture to display in the que
1088         } else {
1089             double time = av_gettime_relative() / 1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if (time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if (is->pictq_size > 1) {
1101                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             } else {
1105                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1106             }
1107             if (framedrop && time > next_target) {
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if (is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime_relative();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1211                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212             fflush(stdout);
1213             last_time = cur_time;
1214         }
1215     }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220     VideoPicture *vp;
1221     int i;
1222     /* XXX: use a special url_shutdown call to abort parse cleanly */
1223     is->abort_request = 1;
1224     SDL_WaitThread(is->parse_tid, NULL);
1225     SDL_WaitThread(is->refresh_tid, NULL);
1226
1227     /* free all pictures */
1228     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229         vp = &is->pictq[i];
1230         if (vp->bmp) {
1231             SDL_FreeYUVOverlay(vp->bmp);
1232             vp->bmp = NULL;
1233         }
1234     }
1235     SDL_DestroyMutex(is->pictq_mutex);
1236     SDL_DestroyCond(is->pictq_cond);
1237     SDL_DestroyMutex(is->subpq_mutex);
1238     SDL_DestroyCond(is->subpq_cond);
1239 #if !CONFIG_AVFILTER
1240     if (is->img_convert_ctx)
1241         sws_freeContext(is->img_convert_ctx);
1242 #endif
1243     av_free(is);
1244 }
1245
1246 static void do_exit(void)
1247 {
1248     if (cur_stream) {
1249         stream_close(cur_stream);
1250         cur_stream = NULL;
1251     }
1252     uninit_opts();
1253     avformat_network_deinit();
1254     if (show_status)
1255         printf("\n");
1256     SDL_Quit();
1257     av_log(NULL, AV_LOG_QUIET, "");
1258     exit(0);
1259 }
1260
1261 /* allocate a picture (needs to do that in main thread to avoid
1262    potential locking problems */
1263 static void alloc_picture(void *opaque)
1264 {
1265     VideoState *is = opaque;
1266     VideoPicture *vp;
1267
1268     vp = &is->pictq[is->pictq_windex];
1269
1270     if (vp->bmp)
1271         SDL_FreeYUVOverlay(vp->bmp);
1272
1273 #if CONFIG_AVFILTER
1274     vp->width   = is->out_video_filter->inputs[0]->w;
1275     vp->height  = is->out_video_filter->inputs[0]->h;
1276     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1277 #else
1278     vp->width   = is->video_st->codec->width;
1279     vp->height  = is->video_st->codec->height;
1280     vp->pix_fmt = is->video_st->codec->pix_fmt;
1281 #endif
1282
1283     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1284                                    SDL_YV12_OVERLAY,
1285                                    screen);
1286     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1287         /* SDL allocates a buffer smaller than requested if the video
1288          * overlay hardware is unable to support the requested size. */
1289         fprintf(stderr, "Error: the video system does not support an image\n"
1290                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1291                         "to reduce the image size.\n", vp->width, vp->height );
1292         do_exit();
1293     }
1294
1295     SDL_LockMutex(is->pictq_mutex);
1296     vp->allocated = 1;
1297     SDL_CondSignal(is->pictq_cond);
1298     SDL_UnlockMutex(is->pictq_mutex);
1299 }
1300
1301 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1302  * guessed if not known. */
1303 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1304 {
1305     VideoPicture *vp;
1306 #if CONFIG_AVFILTER
1307     AVPicture pict_src;
1308 #else
1309     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1310 #endif
1311     /* wait until we have space to put a new picture */
1312     SDL_LockMutex(is->pictq_mutex);
1313
1314     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1315         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1316
1317     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1318            !is->videoq.abort_request) {
1319         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1320     }
1321     SDL_UnlockMutex(is->pictq_mutex);
1322
1323     if (is->videoq.abort_request)
1324         return -1;
1325
1326     vp = &is->pictq[is->pictq_windex];
1327
1328     vp->sar = src_frame->sample_aspect_ratio;
1329
1330     /* alloc or resize hardware picture buffer */
1331     if (!vp->bmp || vp->reallocate ||
1332 #if CONFIG_AVFILTER
1333         vp->width  != is->out_video_filter->inputs[0]->w ||
1334         vp->height != is->out_video_filter->inputs[0]->h) {
1335 #else
1336         vp->width != is->video_st->codec->width ||
1337         vp->height != is->video_st->codec->height) {
1338 #endif
1339         SDL_Event event;
1340
1341         vp->allocated  = 0;
1342         vp->reallocate = 0;
1343
1344         /* the allocation must be done in the main thread to avoid
1345            locking problems */
1346         event.type = FF_ALLOC_EVENT;
1347         event.user.data1 = is;
1348         SDL_PushEvent(&event);
1349
1350         /* wait until the picture is allocated */
1351         SDL_LockMutex(is->pictq_mutex);
1352         while (!vp->allocated && !is->videoq.abort_request) {
1353             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1354         }
1355         SDL_UnlockMutex(is->pictq_mutex);
1356
1357         if (is->videoq.abort_request)
1358             return -1;
1359     }
1360
1361     /* if the frame is not skipped, then display it */
1362     if (vp->bmp) {
1363         AVPicture pict = { { 0 } };
1364
1365         /* get a pointer on the bitmap */
1366         SDL_LockYUVOverlay (vp->bmp);
1367
1368         pict.data[0] = vp->bmp->pixels[0];
1369         pict.data[1] = vp->bmp->pixels[2];
1370         pict.data[2] = vp->bmp->pixels[1];
1371
1372         pict.linesize[0] = vp->bmp->pitches[0];
1373         pict.linesize[1] = vp->bmp->pitches[2];
1374         pict.linesize[2] = vp->bmp->pitches[1];
1375
1376 #if CONFIG_AVFILTER
1377         pict_src.data[0] = src_frame->data[0];
1378         pict_src.data[1] = src_frame->data[1];
1379         pict_src.data[2] = src_frame->data[2];
1380
1381         pict_src.linesize[0] = src_frame->linesize[0];
1382         pict_src.linesize[1] = src_frame->linesize[1];
1383         pict_src.linesize[2] = src_frame->linesize[2];
1384
1385         // FIXME use direct rendering
1386         av_picture_copy(&pict, &pict_src,
1387                         vp->pix_fmt, vp->width, vp->height);
1388 #else
1389         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1390         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1391             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1392             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1393         if (!is->img_convert_ctx) {
1394             fprintf(stderr, "Cannot initialize the conversion context\n");
1395             exit(1);
1396         }
1397         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1398                   0, vp->height, pict.data, pict.linesize);
1399 #endif
1400         /* update the bitmap content */
1401         SDL_UnlockYUVOverlay(vp->bmp);
1402
1403         vp->pts = pts;
1404         vp->pos = pos;
1405
1406         /* now we can update the picture count */
1407         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1408             is->pictq_windex = 0;
1409         SDL_LockMutex(is->pictq_mutex);
1410         vp->target_clock = compute_target_time(vp->pts, is);
1411
1412         is->pictq_size++;
1413         SDL_UnlockMutex(is->pictq_mutex);
1414     }
1415     return 0;
1416 }
1417
1418 /* Compute the exact PTS for the picture if it is omitted in the stream.
1419  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1420 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1421 {
1422     double frame_delay, pts;
1423     int ret;
1424
1425     pts = pts1;
1426
1427     if (pts != 0) {
1428         /* update video clock with pts, if present */
1429         is->video_clock = pts;
1430     } else {
1431         pts = is->video_clock;
1432     }
1433     /* update video clock for next frame */
1434     frame_delay = av_q2d(is->video_st->codec->time_base);
1435     /* for MPEG2, the frame can be repeated, so we update the
1436        clock accordingly */
1437     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1438     is->video_clock += frame_delay;
1439
1440     ret = queue_picture(is, src_frame, pts, pos);
1441     av_frame_unref(src_frame);
1442     return ret;
1443 }
1444
1445 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1446 {
1447     int got_picture, i;
1448
1449     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1450         return -1;
1451
1452     if (pkt->data == flush_pkt.data) {
1453         avcodec_flush_buffers(is->video_st->codec);
1454
1455         SDL_LockMutex(is->pictq_mutex);
1456         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1457         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1458             is->pictq[i].target_clock= 0;
1459         }
1460         while (is->pictq_size && !is->videoq.abort_request) {
1461             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1462         }
1463         is->video_current_pos = -1;
1464         SDL_UnlockMutex(is->pictq_mutex);
1465
1466         init_pts_correction(&is->pts_ctx);
1467         is->frame_last_pts = AV_NOPTS_VALUE;
1468         is->frame_last_delay = 0;
1469         is->frame_timer = (double)av_gettime_relative() / 1000000.0;
1470         is->skip_frames = 1;
1471         is->skip_frames_index = 0;
1472         return 0;
1473     }
1474
1475     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1476
1477     if (got_picture) {
1478         if (decoder_reorder_pts == -1) {
1479             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1480         } else if (decoder_reorder_pts) {
1481             *pts = frame->pkt_pts;
1482         } else {
1483             *pts = frame->pkt_dts;
1484         }
1485
1486         if (*pts == AV_NOPTS_VALUE) {
1487             *pts = 0;
1488         }
1489         if (is->video_st->sample_aspect_ratio.num) {
1490             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1491         }
1492
1493         is->skip_frames_index += 1;
1494         if (is->skip_frames_index >= is->skip_frames) {
1495             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1496             return 1;
1497         }
1498         av_frame_unref(frame);
1499     }
1500     return 0;
1501 }
1502
1503 #if CONFIG_AVFILTER
1504 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1505 {
1506     char sws_flags_str[128];
1507     char buffersrc_args[256];
1508     int ret;
1509     AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
1510     AVCodecContext *codec = is->video_st->codec;
1511
1512     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1513     graph->scale_sws_opts = av_strdup(sws_flags_str);
1514
1515     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1516              codec->width, codec->height, codec->pix_fmt,
1517              is->video_st->time_base.num, is->video_st->time_base.den,
1518              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1519
1520
1521     if ((ret = avfilter_graph_create_filter(&filt_src,
1522                                             avfilter_get_by_name("buffer"),
1523                                             "src", buffersrc_args, NULL,
1524                                             graph)) < 0)
1525         return ret;
1526     if ((ret = avfilter_graph_create_filter(&filt_out,
1527                                             avfilter_get_by_name("buffersink"),
1528                                             "out", NULL, NULL, graph)) < 0)
1529         return ret;
1530
1531     last_filter = filt_out;
1532
1533 /* Note: this macro adds a filter before the lastly added filter, so the
1534  * processing order of the filters is in reverse */
1535 #define INSERT_FILT(name, arg) do {                                          \
1536     AVFilterContext *filt_ctx;                                               \
1537                                                                              \
1538     ret = avfilter_graph_create_filter(&filt_ctx,                            \
1539                                        avfilter_get_by_name(name),           \
1540                                        "avplay_" name, arg, NULL, graph);    \
1541     if (ret < 0)                                                             \
1542         return ret;                                                          \
1543                                                                              \
1544     ret = avfilter_link(filt_ctx, 0, last_filter, 0);                        \
1545     if (ret < 0)                                                             \
1546         return ret;                                                          \
1547                                                                              \
1548     last_filter = filt_ctx;                                                  \
1549 } while (0)
1550
1551     INSERT_FILT("format", "yuv420p");
1552
1553     if (autorotate) {
1554         uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
1555                                                          AV_PKT_DATA_DISPLAYMATRIX, NULL);
1556         if (displaymatrix) {
1557             double rot = av_display_rotation_get((int32_t*) displaymatrix);
1558             if (rot < -135 || rot > 135) {
1559                 INSERT_FILT("vflip", NULL);
1560                 INSERT_FILT("hflip", NULL);
1561             } else if (rot < -45) {
1562                 INSERT_FILT("transpose", "dir=clock");
1563             } else if (rot > 45) {
1564                 INSERT_FILT("transpose", "dir=cclock");
1565             }
1566         }
1567     }
1568
1569     if (vfilters) {
1570         AVFilterInOut *outputs = avfilter_inout_alloc();
1571         AVFilterInOut *inputs  = avfilter_inout_alloc();
1572
1573         outputs->name    = av_strdup("in");
1574         outputs->filter_ctx = filt_src;
1575         outputs->pad_idx = 0;
1576         outputs->next    = NULL;
1577
1578         inputs->name    = av_strdup("out");
1579         inputs->filter_ctx = last_filter;
1580         inputs->pad_idx = 0;
1581         inputs->next    = NULL;
1582
1583         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1584             return ret;
1585     } else {
1586         if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
1587             return ret;
1588     }
1589
1590     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1591         return ret;
1592
1593     is->in_video_filter  = filt_src;
1594     is->out_video_filter = filt_out;
1595
1596     return ret;
1597 }
1598
1599 #endif  /* CONFIG_AVFILTER */
1600
1601 static int video_thread(void *arg)
1602 {
1603     AVPacket pkt = { 0 };
1604     VideoState *is = arg;
1605     AVFrame *frame = av_frame_alloc();
1606     int64_t pts_int;
1607     double pts;
1608     int ret;
1609
1610 #if CONFIG_AVFILTER
1611     AVFilterGraph *graph = avfilter_graph_alloc();
1612     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1613     int last_w = is->video_st->codec->width;
1614     int last_h = is->video_st->codec->height;
1615     if (!graph) {
1616         av_frame_free(&frame);
1617         return AVERROR(ENOMEM);
1618     }
1619
1620     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1621         goto the_end;
1622     filt_in  = is->in_video_filter;
1623     filt_out = is->out_video_filter;
1624 #endif
1625
1626     if (!frame) {
1627 #if CONFIG_AVFILTER
1628         avfilter_graph_free(&graph);
1629 #endif
1630         return AVERROR(ENOMEM);
1631     }
1632
1633     for (;;) {
1634 #if CONFIG_AVFILTER
1635         AVRational tb;
1636 #endif
1637         while (is->paused && !is->videoq.abort_request)
1638             SDL_Delay(10);
1639
1640         av_free_packet(&pkt);
1641
1642         ret = get_video_frame(is, frame, &pts_int, &pkt);
1643         if (ret < 0)
1644             goto the_end;
1645
1646         if (!ret)
1647             continue;
1648
1649 #if CONFIG_AVFILTER
1650         if (   last_w != is->video_st->codec->width
1651             || last_h != is->video_st->codec->height) {
1652             av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1653                     is->video_st->codec->width, is->video_st->codec->height);
1654             avfilter_graph_free(&graph);
1655             graph = avfilter_graph_alloc();
1656             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1657                 goto the_end;
1658             filt_in  = is->in_video_filter;
1659             filt_out = is->out_video_filter;
1660             last_w = is->video_st->codec->width;
1661             last_h = is->video_st->codec->height;
1662         }
1663
1664         frame->pts = pts_int;
1665         ret = av_buffersrc_add_frame(filt_in, frame);
1666         if (ret < 0)
1667             goto the_end;
1668
1669         while (ret >= 0) {
1670             ret = av_buffersink_get_frame(filt_out, frame);
1671             if (ret < 0) {
1672                 ret = 0;
1673                 break;
1674             }
1675
1676             pts_int = frame->pts;
1677             tb      = filt_out->inputs[0]->time_base;
1678             if (av_cmp_q(tb, is->video_st->time_base)) {
1679                 av_unused int64_t pts1 = pts_int;
1680                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1681                 av_log(NULL, AV_LOG_TRACE, "video_thread(): "
1682                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1683                         tb.num, tb.den, pts1,
1684                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1685             }
1686             pts = pts_int * av_q2d(is->video_st->time_base);
1687             ret = output_picture2(is, frame, pts, 0);
1688         }
1689 #else
1690         pts = pts_int * av_q2d(is->video_st->time_base);
1691         ret = output_picture2(is, frame, pts,  pkt.pos);
1692 #endif
1693
1694         if (ret < 0)
1695             goto the_end;
1696
1697
1698         if (step)
1699             if (cur_stream)
1700                 stream_pause(cur_stream);
1701     }
1702  the_end:
1703 #if CONFIG_AVFILTER
1704     av_freep(&vfilters);
1705     avfilter_graph_free(&graph);
1706 #endif
1707     av_free_packet(&pkt);
1708     av_frame_free(&frame);
1709     return 0;
1710 }
1711
1712 static int subtitle_thread(void *arg)
1713 {
1714     VideoState *is = arg;
1715     SubPicture *sp;
1716     AVPacket pkt1, *pkt = &pkt1;
1717     int got_subtitle;
1718     double pts;
1719     int i, j;
1720     int r, g, b, y, u, v, a;
1721
1722     for (;;) {
1723         while (is->paused && !is->subtitleq.abort_request) {
1724             SDL_Delay(10);
1725         }
1726         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1727             break;
1728
1729         if (pkt->data == flush_pkt.data) {
1730             avcodec_flush_buffers(is->subtitle_st->codec);
1731             continue;
1732         }
1733         SDL_LockMutex(is->subpq_mutex);
1734         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1735                !is->subtitleq.abort_request) {
1736             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1737         }
1738         SDL_UnlockMutex(is->subpq_mutex);
1739
1740         if (is->subtitleq.abort_request)
1741             return 0;
1742
1743         sp = &is->subpq[is->subpq_windex];
1744
1745        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1746            this packet, if any */
1747         pts = 0;
1748         if (pkt->pts != AV_NOPTS_VALUE)
1749             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1750
1751         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1752                                  &got_subtitle, pkt);
1753
1754         if (got_subtitle && sp->sub.format == 0) {
1755             sp->pts = pts;
1756
1757             for (i = 0; i < sp->sub.num_rects; i++)
1758             {
1759                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1760                 {
1761                     RGBA_IN(r, g, b, a, (uint32_t *)sp->sub.rects[i]->data[1] + j);
1762                     y = RGB_TO_Y_CCIR(r, g, b);
1763                     u = RGB_TO_U_CCIR(r, g, b, 0);
1764                     v = RGB_TO_V_CCIR(r, g, b, 0);
1765                     YUVA_OUT((uint32_t *)sp->sub.rects[i]->data[1] + j, y, u, v, a);
1766                 }
1767             }
1768
1769             /* now we can update the picture count */
1770             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1771                 is->subpq_windex = 0;
1772             SDL_LockMutex(is->subpq_mutex);
1773             is->subpq_size++;
1774             SDL_UnlockMutex(is->subpq_mutex);
1775         }
1776         av_free_packet(pkt);
1777     }
1778     return 0;
1779 }
1780
1781 /* copy samples for viewing in editor window */
1782 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1783 {
1784     int size, len;
1785
1786     size = samples_size / sizeof(short);
1787     while (size > 0) {
1788         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1789         if (len > size)
1790             len = size;
1791         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1792         samples += len;
1793         is->sample_array_index += len;
1794         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1795             is->sample_array_index = 0;
1796         size -= len;
1797     }
1798 }
1799
1800 /* return the new audio buffer size (samples can be added or deleted
1801    to get better sync if video or external master clock) */
1802 static int synchronize_audio(VideoState *is, short *samples,
1803                              int samples_size1, double pts)
1804 {
1805     int n, samples_size;
1806     double ref_clock;
1807
1808     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1809     samples_size = samples_size1;
1810
1811     /* if not master, then we try to remove or add samples to correct the clock */
1812     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1813          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1814         double diff, avg_diff;
1815         int wanted_size, min_size, max_size, nb_samples;
1816
1817         ref_clock = get_master_clock(is);
1818         diff = get_audio_clock(is) - ref_clock;
1819
1820         if (diff < AV_NOSYNC_THRESHOLD) {
1821             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1822             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1823                 /* not enough measures to have a correct estimate */
1824                 is->audio_diff_avg_count++;
1825             } else {
1826                 /* estimate the A-V difference */
1827                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1828
1829                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1830                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1831                     nb_samples = samples_size / n;
1832
1833                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1834                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1835                     if (wanted_size < min_size)
1836                         wanted_size = min_size;
1837                     else if (wanted_size > max_size)
1838                         wanted_size = max_size;
1839
1840                     /* add or remove samples to correction the synchro */
1841                     if (wanted_size < samples_size) {
1842                         /* remove samples */
1843                         samples_size = wanted_size;
1844                     } else if (wanted_size > samples_size) {
1845                         uint8_t *samples_end, *q;
1846                         int nb;
1847
1848                         /* add samples */
1849                         nb = (samples_size - wanted_size);
1850                         samples_end = (uint8_t *)samples + samples_size - n;
1851                         q = samples_end + n;
1852                         while (nb > 0) {
1853                             memcpy(q, samples_end, n);
1854                             q += n;
1855                             nb -= n;
1856                         }
1857                         samples_size = wanted_size;
1858                     }
1859                 }
1860                 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1861                         diff, avg_diff, samples_size - samples_size1,
1862                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1863             }
1864         } else {
1865             /* too big difference : may be initial PTS errors, so
1866                reset A-V filter */
1867             is->audio_diff_avg_count = 0;
1868             is->audio_diff_cum       = 0;
1869         }
1870     }
1871
1872     return samples_size;
1873 }
1874
1875 /* decode one audio frame and returns its uncompressed size */
1876 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1877 {
1878     AVPacket *pkt_temp = &is->audio_pkt_temp;
1879     AVPacket *pkt = &is->audio_pkt;
1880     AVCodecContext *dec = is->audio_st->codec;
1881     int n, len1, data_size, got_frame;
1882     double pts;
1883     int new_packet = 0;
1884     int flush_complete = 0;
1885
1886     for (;;) {
1887         /* NOTE: the audio packet can contain several frames */
1888         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1889             int resample_changed, audio_resample;
1890
1891             if (!is->frame) {
1892                 if (!(is->frame = av_frame_alloc()))
1893                     return AVERROR(ENOMEM);
1894             }
1895
1896             if (flush_complete)
1897                 break;
1898             new_packet = 0;
1899             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1900             if (len1 < 0) {
1901                 /* if error, we skip the frame */
1902                 pkt_temp->size = 0;
1903                 break;
1904             }
1905
1906             pkt_temp->data += len1;
1907             pkt_temp->size -= len1;
1908
1909             if (!got_frame) {
1910                 /* stop sending empty packets if the decoder is finished */
1911                 if (!pkt_temp->data && (dec->codec->capabilities & AV_CODEC_CAP_DELAY))
1912                     flush_complete = 1;
1913                 continue;
1914             }
1915             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1916                                                    is->frame->nb_samples,
1917                                                    is->frame->format, 1);
1918
1919             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1920                              is->frame->channel_layout != is->sdl_channel_layout ||
1921                              is->frame->sample_rate    != is->sdl_sample_rate;
1922
1923             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1924                                is->frame->channel_layout != is->resample_channel_layout ||
1925                                is->frame->sample_rate    != is->resample_sample_rate;
1926
1927             if ((!is->avr && audio_resample) || resample_changed) {
1928                 int ret;
1929                 if (is->avr)
1930                     avresample_close(is->avr);
1931                 else if (audio_resample) {
1932                     is->avr = avresample_alloc_context();
1933                     if (!is->avr) {
1934                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1935                         break;
1936                     }
1937                 }
1938                 if (audio_resample) {
1939                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1940                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1941                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1942                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1943                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1944                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1945
1946                     if ((ret = avresample_open(is->avr)) < 0) {
1947                         fprintf(stderr, "error initializing libavresample\n");
1948                         break;
1949                     }
1950                 }
1951                 is->resample_sample_fmt     = is->frame->format;
1952                 is->resample_channel_layout = is->frame->channel_layout;
1953                 is->resample_sample_rate    = is->frame->sample_rate;
1954             }
1955
1956             if (audio_resample) {
1957                 void *tmp_out;
1958                 int out_samples, out_size, out_linesize;
1959                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1960                 int nb_samples = is->frame->nb_samples;
1961
1962                 out_size = av_samples_get_buffer_size(&out_linesize,
1963                                                       is->sdl_channels,
1964                                                       nb_samples,
1965                                                       is->sdl_sample_fmt, 0);
1966                 tmp_out = av_realloc(is->audio_buf1, out_size);
1967                 if (!tmp_out)
1968                     return AVERROR(ENOMEM);
1969                 is->audio_buf1 = tmp_out;
1970
1971                 out_samples = avresample_convert(is->avr,
1972                                                  &is->audio_buf1,
1973                                                  out_linesize, nb_samples,
1974                                                  is->frame->data,
1975                                                  is->frame->linesize[0],
1976                                                  is->frame->nb_samples);
1977                 if (out_samples < 0) {
1978                     fprintf(stderr, "avresample_convert() failed\n");
1979                     break;
1980                 }
1981                 is->audio_buf = is->audio_buf1;
1982                 data_size = out_samples * osize * is->sdl_channels;
1983             } else {
1984                 is->audio_buf = is->frame->data[0];
1985             }
1986
1987             /* if no pts, then compute it */
1988             pts = is->audio_clock;
1989             *pts_ptr = pts;
1990             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1991             is->audio_clock += (double)data_size /
1992                 (double)(n * is->sdl_sample_rate);
1993 #ifdef DEBUG
1994             {
1995                 static double last_clock;
1996                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1997                        is->audio_clock - last_clock,
1998                        is->audio_clock, pts);
1999                 last_clock = is->audio_clock;
2000             }
2001 #endif
2002             return data_size;
2003         }
2004
2005         /* free the current packet */
2006         if (pkt->data)
2007             av_free_packet(pkt);
2008         memset(pkt_temp, 0, sizeof(*pkt_temp));
2009
2010         if (is->paused || is->audioq.abort_request) {
2011             return -1;
2012         }
2013
2014         /* read next packet */
2015         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2016             return -1;
2017
2018         if (pkt->data == flush_pkt.data) {
2019             avcodec_flush_buffers(dec);
2020             flush_complete = 0;
2021         }
2022
2023         *pkt_temp = *pkt;
2024
2025         /* if update the audio clock with the pts */
2026         if (pkt->pts != AV_NOPTS_VALUE) {
2027             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2028         }
2029     }
2030 }
2031
2032 /* prepare a new audio buffer */
2033 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2034 {
2035     VideoState *is = opaque;
2036     int audio_size, len1;
2037     double pts;
2038
2039     audio_callback_time = av_gettime_relative();
2040
2041     while (len > 0) {
2042         if (is->audio_buf_index >= is->audio_buf_size) {
2043            audio_size = audio_decode_frame(is, &pts);
2044            if (audio_size < 0) {
2045                 /* if error, just output silence */
2046                is->audio_buf      = is->silence_buf;
2047                is->audio_buf_size = sizeof(is->silence_buf);
2048            } else {
2049                if (is->show_audio)
2050                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2051                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2052                                               pts);
2053                is->audio_buf_size = audio_size;
2054            }
2055            is->audio_buf_index = 0;
2056         }
2057         len1 = is->audio_buf_size - is->audio_buf_index;
2058         if (len1 > len)
2059             len1 = len;
2060         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2061         len -= len1;
2062         stream += len1;
2063         is->audio_buf_index += len1;
2064     }
2065 }
2066
2067 /* open a given stream. Return 0 if OK */
2068 static int stream_component_open(VideoState *is, int stream_index)
2069 {
2070     AVFormatContext *ic = is->ic;
2071     AVCodecContext *avctx;
2072     AVCodec *codec;
2073     SDL_AudioSpec wanted_spec, spec;
2074     AVDictionary *opts;
2075     AVDictionaryEntry *t = NULL;
2076     int ret = 0;
2077
2078     if (stream_index < 0 || stream_index >= ic->nb_streams)
2079         return -1;
2080     avctx = ic->streams[stream_index]->codec;
2081
2082     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2083
2084     codec = avcodec_find_decoder(avctx->codec_id);
2085     avctx->workaround_bugs   = workaround_bugs;
2086     avctx->idct_algo         = idct;
2087     avctx->skip_frame        = skip_frame;
2088     avctx->skip_idct         = skip_idct;
2089     avctx->skip_loop_filter  = skip_loop_filter;
2090     avctx->error_concealment = error_concealment;
2091
2092     if (fast)
2093         avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2094
2095     if (!av_dict_get(opts, "threads", NULL, 0))
2096         av_dict_set(&opts, "threads", "auto", 0);
2097     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2098         av_dict_set(&opts, "refcounted_frames", "1", 0);
2099     if (!codec ||
2100         (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2101         goto fail;
2102     }
2103     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2104         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2105         ret =  AVERROR_OPTION_NOT_FOUND;
2106         goto fail;
2107     }
2108
2109     /* prepare audio output */
2110     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2111         is->sdl_sample_rate = avctx->sample_rate;
2112
2113         if (!avctx->channel_layout)
2114             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2115         if (!avctx->channel_layout) {
2116             fprintf(stderr, "unable to guess channel layout\n");
2117             ret = AVERROR_INVALIDDATA;
2118             goto fail;
2119         }
2120         if (avctx->channels == 1)
2121             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2122         else
2123             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2124         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2125
2126         wanted_spec.format = AUDIO_S16SYS;
2127         wanted_spec.freq = is->sdl_sample_rate;
2128         wanted_spec.channels = is->sdl_channels;
2129         wanted_spec.silence = 0;
2130         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2131         wanted_spec.callback = sdl_audio_callback;
2132         wanted_spec.userdata = is;
2133         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2134             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2135             ret = AVERROR_UNKNOWN;
2136             goto fail;
2137         }
2138         is->audio_hw_buf_size = spec.size;
2139         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2140         is->resample_sample_fmt     = is->sdl_sample_fmt;
2141         is->resample_channel_layout = avctx->channel_layout;
2142         is->resample_sample_rate    = avctx->sample_rate;
2143     }
2144
2145     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2146     switch (avctx->codec_type) {
2147     case AVMEDIA_TYPE_AUDIO:
2148         is->audio_stream = stream_index;
2149         is->audio_st = ic->streams[stream_index];
2150         is->audio_buf_size  = 0;
2151         is->audio_buf_index = 0;
2152
2153         /* init averaging filter */
2154         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2155         is->audio_diff_avg_count = 0;
2156         /* since we do not have a precise anough audio fifo fullness,
2157            we correct audio sync only if larger than this threshold */
2158         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2159
2160         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2161         packet_queue_init(&is->audioq);
2162         SDL_PauseAudio(0);
2163         break;
2164     case AVMEDIA_TYPE_VIDEO:
2165         is->video_stream = stream_index;
2166         is->video_st = ic->streams[stream_index];
2167
2168         packet_queue_init(&is->videoq);
2169         is->video_tid = SDL_CreateThread(video_thread, is);
2170         break;
2171     case AVMEDIA_TYPE_SUBTITLE:
2172         is->subtitle_stream = stream_index;
2173         is->subtitle_st = ic->streams[stream_index];
2174         packet_queue_init(&is->subtitleq);
2175
2176         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2177         break;
2178     default:
2179         break;
2180     }
2181
2182 fail:
2183     av_dict_free(&opts);
2184
2185     return ret;
2186 }
2187
2188 static void stream_component_close(VideoState *is, int stream_index)
2189 {
2190     AVFormatContext *ic = is->ic;
2191     AVCodecContext *avctx;
2192
2193     if (stream_index < 0 || stream_index >= ic->nb_streams)
2194         return;
2195     avctx = ic->streams[stream_index]->codec;
2196
2197     switch (avctx->codec_type) {
2198     case AVMEDIA_TYPE_AUDIO:
2199         packet_queue_abort(&is->audioq);
2200
2201         SDL_CloseAudio();
2202
2203         packet_queue_end(&is->audioq);
2204         av_free_packet(&is->audio_pkt);
2205         if (is->avr)
2206             avresample_free(&is->avr);
2207         av_freep(&is->audio_buf1);
2208         is->audio_buf = NULL;
2209         av_frame_free(&is->frame);
2210
2211         if (is->rdft) {
2212             av_rdft_end(is->rdft);
2213             av_freep(&is->rdft_data);
2214             is->rdft = NULL;
2215             is->rdft_bits = 0;
2216         }
2217         break;
2218     case AVMEDIA_TYPE_VIDEO:
2219         packet_queue_abort(&is->videoq);
2220
2221         /* note: we also signal this mutex to make sure we deblock the
2222            video thread in all cases */
2223         SDL_LockMutex(is->pictq_mutex);
2224         SDL_CondSignal(is->pictq_cond);
2225         SDL_UnlockMutex(is->pictq_mutex);
2226
2227         SDL_WaitThread(is->video_tid, NULL);
2228
2229         packet_queue_end(&is->videoq);
2230         break;
2231     case AVMEDIA_TYPE_SUBTITLE:
2232         packet_queue_abort(&is->subtitleq);
2233
2234         /* note: we also signal this mutex to make sure we deblock the
2235            video thread in all cases */
2236         SDL_LockMutex(is->subpq_mutex);
2237         is->subtitle_stream_changed = 1;
2238
2239         SDL_CondSignal(is->subpq_cond);
2240         SDL_UnlockMutex(is->subpq_mutex);
2241
2242         SDL_WaitThread(is->subtitle_tid, NULL);
2243
2244         packet_queue_end(&is->subtitleq);
2245         break;
2246     default:
2247         break;
2248     }
2249
2250     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2251     avcodec_close(avctx);
2252     switch (avctx->codec_type) {
2253     case AVMEDIA_TYPE_AUDIO:
2254         is->audio_st = NULL;
2255         is->audio_stream = -1;
2256         break;
2257     case AVMEDIA_TYPE_VIDEO:
2258         is->video_st = NULL;
2259         is->video_stream = -1;
2260         break;
2261     case AVMEDIA_TYPE_SUBTITLE:
2262         is->subtitle_st = NULL;
2263         is->subtitle_stream = -1;
2264         break;
2265     default:
2266         break;
2267     }
2268 }
2269
2270 /* since we have only one decoding thread, we can use a global
2271    variable instead of a thread local variable */
2272 static VideoState *global_video_state;
2273
2274 static int decode_interrupt_cb(void *ctx)
2275 {
2276     return global_video_state && global_video_state->abort_request;
2277 }
2278
2279 /* this thread gets the stream from the disk or the network */
2280 static int decode_thread(void *arg)
2281 {
2282     VideoState *is = arg;
2283     AVFormatContext *ic = NULL;
2284     int err, i, ret;
2285     int st_index[AVMEDIA_TYPE_NB];
2286     AVPacket pkt1, *pkt = &pkt1;
2287     int eof = 0;
2288     int pkt_in_play_range = 0;
2289     AVDictionaryEntry *t;
2290     AVDictionary **opts;
2291     int orig_nb_streams;
2292
2293     memset(st_index, -1, sizeof(st_index));
2294     is->video_stream = -1;
2295     is->audio_stream = -1;
2296     is->subtitle_stream = -1;
2297
2298     global_video_state = is;
2299
2300     ic = avformat_alloc_context();
2301     if (!ic) {
2302         av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2303         ret = AVERROR(ENOMEM);
2304         goto fail;
2305     }
2306     ic->interrupt_callback.callback = decode_interrupt_cb;
2307     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2308     if (err < 0) {
2309         print_error(is->filename, err);
2310         ret = -1;
2311         goto fail;
2312     }
2313     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2314         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2315         ret = AVERROR_OPTION_NOT_FOUND;
2316         goto fail;
2317     }
2318     is->ic = ic;
2319
2320     if (genpts)
2321         ic->flags |= AVFMT_FLAG_GENPTS;
2322
2323     opts = setup_find_stream_info_opts(ic, codec_opts);
2324     orig_nb_streams = ic->nb_streams;
2325
2326     err = avformat_find_stream_info(ic, opts);
2327
2328     for (i = 0; i < orig_nb_streams; i++)
2329         av_dict_free(&opts[i]);
2330     av_freep(&opts);
2331
2332     if (err < 0) {
2333         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2334         ret = -1;
2335         goto fail;
2336     }
2337
2338     if (ic->pb)
2339         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2340
2341     if (seek_by_bytes < 0)
2342         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2343
2344     /* if seeking requested, we execute it */
2345     if (start_time != AV_NOPTS_VALUE) {
2346         int64_t timestamp;
2347
2348         timestamp = start_time;
2349         /* add the stream start time */
2350         if (ic->start_time != AV_NOPTS_VALUE)
2351             timestamp += ic->start_time;
2352         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2353         if (ret < 0) {
2354             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2355                     is->filename, (double)timestamp / AV_TIME_BASE);
2356         }
2357     }
2358
2359     for (i = 0; i < ic->nb_streams; i++)
2360         ic->streams[i]->discard = AVDISCARD_ALL;
2361     if (!video_disable)
2362         st_index[AVMEDIA_TYPE_VIDEO] =
2363             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2364                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2365     if (!audio_disable)
2366         st_index[AVMEDIA_TYPE_AUDIO] =
2367             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2368                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2369                                 st_index[AVMEDIA_TYPE_VIDEO],
2370                                 NULL, 0);
2371     if (!video_disable)
2372         st_index[AVMEDIA_TYPE_SUBTITLE] =
2373             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2374                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2375                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2376                                  st_index[AVMEDIA_TYPE_AUDIO] :
2377                                  st_index[AVMEDIA_TYPE_VIDEO]),
2378                                 NULL, 0);
2379     if (show_status) {
2380         av_dump_format(ic, 0, is->filename, 0);
2381     }
2382
2383     /* open the streams */
2384     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2385         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2386     }
2387
2388     ret = -1;
2389     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2390         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2391     }
2392     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2393     if (ret < 0) {
2394         if (!display_disable)
2395             is->show_audio = 2;
2396     }
2397
2398     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2399         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2400     }
2401
2402     if (is->video_stream < 0 && is->audio_stream < 0) {
2403         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2404         ret = -1;
2405         goto fail;
2406     }
2407
2408     for (;;) {
2409         if (is->abort_request)
2410             break;
2411         if (is->paused != is->last_paused) {
2412             is->last_paused = is->paused;
2413             if (is->paused)
2414                 is->read_pause_return = av_read_pause(ic);
2415             else
2416                 av_read_play(ic);
2417         }
2418 #if CONFIG_RTSP_DEMUXER
2419         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2420             /* wait 10 ms to avoid trying to get another packet */
2421             /* XXX: horrible */
2422             SDL_Delay(10);
2423             continue;
2424         }
2425 #endif
2426         if (is->seek_req) {
2427             int64_t seek_target = is->seek_pos;
2428             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2429             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2430 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2431 //      of the seek_pos/seek_rel variables
2432
2433             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2434             if (ret < 0) {
2435                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2436             } else {
2437                 if (is->audio_stream >= 0) {
2438                     packet_queue_flush(&is->audioq);
2439                     packet_queue_put(&is->audioq, &flush_pkt);
2440                 }
2441                 if (is->subtitle_stream >= 0) {
2442                     packet_queue_flush(&is->subtitleq);
2443                     packet_queue_put(&is->subtitleq, &flush_pkt);
2444                 }
2445                 if (is->video_stream >= 0) {
2446                     packet_queue_flush(&is->videoq);
2447                     packet_queue_put(&is->videoq, &flush_pkt);
2448                 }
2449             }
2450             is->seek_req = 0;
2451             eof = 0;
2452         }
2453
2454         /* if the queue are full, no need to read more */
2455         if (!infinite_buffer &&
2456               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2457             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2458                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2459                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2460             /* wait 10 ms */
2461             SDL_Delay(10);
2462             continue;
2463         }
2464         if (eof) {
2465             if (is->video_stream >= 0) {
2466                 av_init_packet(pkt);
2467                 pkt->data = NULL;
2468                 pkt->size = 0;
2469                 pkt->stream_index = is->video_stream;
2470                 packet_queue_put(&is->videoq, pkt);
2471             }
2472             if (is->audio_stream >= 0 &&
2473                 (is->audio_st->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) {
2474                 av_init_packet(pkt);
2475                 pkt->data = NULL;
2476                 pkt->size = 0;
2477                 pkt->stream_index = is->audio_stream;
2478                 packet_queue_put(&is->audioq, pkt);
2479             }
2480             SDL_Delay(10);
2481             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2482                 if (loop != 1 && (!loop || --loop)) {
2483                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2484                 } else if (!noautoexit) {
2485                     ret = AVERROR_EOF;
2486                     goto fail;
2487                 }
2488             }
2489             continue;
2490         }
2491         ret = av_read_frame(ic, pkt);
2492         if (ret < 0) {
2493             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2494                 eof = 1;
2495             if (ic->pb && ic->pb->error)
2496                 break;
2497             SDL_Delay(100); /* wait for user event */
2498             continue;
2499         }
2500         /* check if packet is in play range specified by user, then queue, otherwise discard */
2501         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2502                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2503                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2504                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2505                 <= ((double)duration / 1000000);
2506         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2507             packet_queue_put(&is->audioq, pkt);
2508         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2509             packet_queue_put(&is->videoq, pkt);
2510         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2511             packet_queue_put(&is->subtitleq, pkt);
2512         } else {
2513             av_free_packet(pkt);
2514         }
2515     }
2516     /* wait until the end */
2517     while (!is->abort_request) {
2518         SDL_Delay(100);
2519     }
2520
2521     ret = 0;
2522  fail:
2523     /* disable interrupting */
2524     global_video_state = NULL;
2525
2526     /* close each stream */
2527     if (is->audio_stream >= 0)
2528         stream_component_close(is, is->audio_stream);
2529     if (is->video_stream >= 0)
2530         stream_component_close(is, is->video_stream);
2531     if (is->subtitle_stream >= 0)
2532         stream_component_close(is, is->subtitle_stream);
2533     if (is->ic) {
2534         avformat_close_input(&is->ic);
2535     }
2536
2537     if (ret != 0) {
2538         SDL_Event event;
2539
2540         event.type = FF_QUIT_EVENT;
2541         event.user.data1 = is;
2542         SDL_PushEvent(&event);
2543     }
2544     return 0;
2545 }
2546
2547 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2548 {
2549     VideoState *is;
2550
2551     is = av_mallocz(sizeof(VideoState));
2552     if (!is)
2553         return NULL;
2554     av_strlcpy(is->filename, filename, sizeof(is->filename));
2555     is->iformat = iformat;
2556     is->ytop    = 0;
2557     is->xleft   = 0;
2558
2559     /* start video display */
2560     is->pictq_mutex = SDL_CreateMutex();
2561     is->pictq_cond  = SDL_CreateCond();
2562
2563     is->subpq_mutex = SDL_CreateMutex();
2564     is->subpq_cond  = SDL_CreateCond();
2565
2566     is->av_sync_type = av_sync_type;
2567     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2568     if (!is->parse_tid) {
2569         av_free(is);
2570         return NULL;
2571     }
2572     return is;
2573 }
2574
2575 static void stream_cycle_channel(VideoState *is, int codec_type)
2576 {
2577     AVFormatContext *ic = is->ic;
2578     int start_index, stream_index;
2579     AVStream *st;
2580
2581     if (codec_type == AVMEDIA_TYPE_VIDEO)
2582         start_index = is->video_stream;
2583     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2584         start_index = is->audio_stream;
2585     else
2586         start_index = is->subtitle_stream;
2587     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2588         return;
2589     stream_index = start_index;
2590     for (;;) {
2591         if (++stream_index >= is->ic->nb_streams)
2592         {
2593             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2594             {
2595                 stream_index = -1;
2596                 goto the_end;
2597             } else
2598                 stream_index = 0;
2599         }
2600         if (stream_index == start_index)
2601             return;
2602         st = ic->streams[stream_index];
2603         if (st->codec->codec_type == codec_type) {
2604             /* check that parameters are OK */
2605             switch (codec_type) {
2606             case AVMEDIA_TYPE_AUDIO:
2607                 if (st->codec->sample_rate != 0 &&
2608                     st->codec->channels != 0)
2609                     goto the_end;
2610                 break;
2611             case AVMEDIA_TYPE_VIDEO:
2612             case AVMEDIA_TYPE_SUBTITLE:
2613                 goto the_end;
2614             default:
2615                 break;
2616             }
2617         }
2618     }
2619  the_end:
2620     stream_component_close(is, start_index);
2621     stream_component_open(is, stream_index);
2622 }
2623
2624
2625 static void toggle_full_screen(void)
2626 {
2627 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2628     /* OS X needs to empty the picture_queue */
2629     int i;
2630     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2631         cur_stream->pictq[i].reallocate = 1;
2632 #endif
2633     is_full_screen = !is_full_screen;
2634     video_open(cur_stream);
2635 }
2636
2637 static void toggle_pause(void)
2638 {
2639     if (cur_stream)
2640         stream_pause(cur_stream);
2641     step = 0;
2642 }
2643
2644 static void step_to_next_frame(void)
2645 {
2646     if (cur_stream) {
2647         /* if the stream is paused unpause it, then step */
2648         if (cur_stream->paused)
2649             stream_pause(cur_stream);
2650     }
2651     step = 1;
2652 }
2653
2654 static void toggle_audio_display(void)
2655 {
2656     if (cur_stream) {
2657         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2658         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2659         fill_rectangle(screen,
2660                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2661                        bgcolor);
2662         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2663     }
2664 }
2665
2666 static void seek_chapter(VideoState *is, int incr)
2667 {
2668     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2669     int i;
2670
2671     if (!is->ic->nb_chapters)
2672         return;
2673
2674     /* find the current chapter */
2675     for (i = 0; i < is->ic->nb_chapters; i++) {
2676         AVChapter *ch = is->ic->chapters[i];
2677         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2678             i--;
2679             break;
2680         }
2681     }
2682
2683     i += incr;
2684     i = FFMAX(i, 0);
2685     if (i >= is->ic->nb_chapters)
2686         return;
2687
2688     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2689     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2690                                  AV_TIME_BASE_Q), 0, 0);
2691 }
2692
2693 /* handle an event sent by the GUI */
2694 static void event_loop(void)
2695 {
2696     SDL_Event event;
2697     double incr, pos, frac;
2698
2699     for (;;) {
2700         double x;
2701         SDL_WaitEvent(&event);
2702         switch (event.type) {
2703         case SDL_KEYDOWN:
2704             if (exit_on_keydown) {
2705                 do_exit();
2706                 break;
2707             }
2708             switch (event.key.keysym.sym) {
2709             case SDLK_ESCAPE:
2710             case SDLK_q:
2711                 do_exit();
2712                 break;
2713             case SDLK_f:
2714                 toggle_full_screen();
2715                 break;
2716             case SDLK_p:
2717             case SDLK_SPACE:
2718                 toggle_pause();
2719                 break;
2720             case SDLK_s: // S: Step to next frame
2721                 step_to_next_frame();
2722                 break;
2723             case SDLK_a:
2724                 if (cur_stream)
2725                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2726                 break;
2727             case SDLK_v:
2728                 if (cur_stream)
2729                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2730                 break;
2731             case SDLK_t:
2732                 if (cur_stream)
2733                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2734                 break;
2735             case SDLK_w:
2736                 toggle_audio_display();
2737                 break;
2738             case SDLK_PAGEUP:
2739                 seek_chapter(cur_stream, 1);
2740                 break;
2741             case SDLK_PAGEDOWN:
2742                 seek_chapter(cur_stream, -1);
2743                 break;
2744             case SDLK_LEFT:
2745                 incr = -10.0;
2746                 goto do_seek;
2747             case SDLK_RIGHT:
2748                 incr = 10.0;
2749                 goto do_seek;
2750             case SDLK_UP:
2751                 incr = 60.0;
2752                 goto do_seek;
2753             case SDLK_DOWN:
2754                 incr = -60.0;
2755             do_seek:
2756                 if (cur_stream) {
2757                     if (seek_by_bytes) {
2758                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2759                             pos = cur_stream->video_current_pos;
2760                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2761                             pos = cur_stream->audio_pkt.pos;
2762                         } else
2763                             pos = avio_tell(cur_stream->ic->pb);
2764                         if (cur_stream->ic->bit_rate)
2765                             incr *= cur_stream->ic->bit_rate / 8.0;
2766                         else
2767                             incr *= 180000.0;
2768                         pos += incr;
2769                         stream_seek(cur_stream, pos, incr, 1);
2770                     } else {
2771                         pos = get_master_clock(cur_stream);
2772                         pos += incr;
2773                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2774                     }
2775                 }
2776                 break;
2777             default:
2778                 break;
2779             }
2780             break;
2781         case SDL_MOUSEBUTTONDOWN:
2782             if (exit_on_mousedown) {
2783                 do_exit();
2784                 break;
2785             }
2786         case SDL_MOUSEMOTION:
2787             if (event.type == SDL_MOUSEBUTTONDOWN) {
2788                 x = event.button.x;
2789             } else {
2790                 if (event.motion.state != SDL_PRESSED)
2791                     break;
2792                 x = event.motion.x;
2793             }
2794             if (cur_stream) {
2795                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2796                     uint64_t size =  avio_size(cur_stream->ic->pb);
2797                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2798                 } else {
2799                     int64_t ts;
2800                     int ns, hh, mm, ss;
2801                     int tns, thh, tmm, tss;
2802                     tns  = cur_stream->ic->duration / 1000000LL;
2803                     thh  = tns / 3600;
2804                     tmm  = (tns % 3600) / 60;
2805                     tss  = (tns % 60);
2806                     frac = x / cur_stream->width;
2807                     ns   = frac * tns;
2808                     hh   = ns / 3600;
2809                     mm   = (ns % 3600) / 60;
2810                     ss   = (ns % 60);
2811                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2812                             hh, mm, ss, thh, tmm, tss);
2813                     ts = frac * cur_stream->ic->duration;
2814                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2815                         ts += cur_stream->ic->start_time;
2816                     stream_seek(cur_stream, ts, 0, 0);
2817                 }
2818             }
2819             break;
2820         case SDL_VIDEORESIZE:
2821             if (cur_stream) {
2822                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2823                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2824                 screen_width  = cur_stream->width  = event.resize.w;
2825                 screen_height = cur_stream->height = event.resize.h;
2826             }
2827             break;
2828         case SDL_QUIT:
2829         case FF_QUIT_EVENT:
2830             do_exit();
2831             break;
2832         case FF_ALLOC_EVENT:
2833             video_open(event.user.data1);
2834             alloc_picture(event.user.data1);
2835             break;
2836         case FF_REFRESH_EVENT:
2837             video_refresh_timer(event.user.data1);
2838             cur_stream->refresh = 0;
2839             break;
2840         default:
2841             break;
2842         }
2843     }
2844 }
2845
2846 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2847 {
2848     av_log(NULL, AV_LOG_ERROR,
2849            "Option '%s' has been removed, use private format options instead\n", opt);
2850     return AVERROR(EINVAL);
2851 }
2852
2853 static int opt_width(void *optctx, const char *opt, const char *arg)
2854 {
2855     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2856     return 0;
2857 }
2858
2859 static int opt_height(void *optctx, const char *opt, const char *arg)
2860 {
2861     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2862     return 0;
2863 }
2864
2865 static int opt_format(void *optctx, const char *opt, const char *arg)
2866 {
2867     file_iformat = av_find_input_format(arg);
2868     if (!file_iformat) {
2869         fprintf(stderr, "Unknown input format: %s\n", arg);
2870         return AVERROR(EINVAL);
2871     }
2872     return 0;
2873 }
2874
2875 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2876 {
2877     av_log(NULL, AV_LOG_ERROR,
2878            "Option '%s' has been removed, use private format options instead\n", opt);
2879     return AVERROR(EINVAL);
2880 }
2881
2882 static int opt_sync(void *optctx, const char *opt, const char *arg)
2883 {
2884     if (!strcmp(arg, "audio"))
2885         av_sync_type = AV_SYNC_AUDIO_MASTER;
2886     else if (!strcmp(arg, "video"))
2887         av_sync_type = AV_SYNC_VIDEO_MASTER;
2888     else if (!strcmp(arg, "ext"))
2889         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2890     else {
2891         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2892         exit(1);
2893     }
2894     return 0;
2895 }
2896
2897 static int opt_seek(void *optctx, const char *opt, const char *arg)
2898 {
2899     start_time = parse_time_or_die(opt, arg, 1);
2900     return 0;
2901 }
2902
2903 static int opt_duration(void *optctx, const char *opt, const char *arg)
2904 {
2905     duration = parse_time_or_die(opt, arg, 1);
2906     return 0;
2907 }
2908
2909 static const OptionDef options[] = {
2910 #include "cmdutils_common_opts.h"
2911     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2912     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2913     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2914     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2915     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2916     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2917     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2918     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2919     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2920     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2921     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2922     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2923     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2924     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2925     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2926     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2927     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2928     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2929     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2930     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2931     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2932     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2933     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2934     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2935     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2936     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2937     { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
2938     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2939     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2940     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2941     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2942     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2943     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2944 #if CONFIG_AVFILTER
2945     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2946 #endif
2947     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2948     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2949     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2950     { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
2951     { NULL, },
2952 };
2953
2954 static void show_usage(void)
2955 {
2956     printf("Simple media player\n");
2957     printf("usage: %s [options] input_file\n", program_name);
2958     printf("\n");
2959 }
2960
2961 void show_help_default(const char *opt, const char *arg)
2962 {
2963     av_log_set_callback(log_callback_help);
2964     show_usage();
2965     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2966     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2967     printf("\n");
2968     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2969     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2970 #if !CONFIG_AVFILTER
2971     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2972 #endif
2973     printf("\nWhile playing:\n"
2974            "q, ESC              quit\n"
2975            "f                   toggle full screen\n"
2976            "p, SPC              pause\n"
2977            "a                   cycle audio channel\n"
2978            "v                   cycle video channel\n"
2979            "t                   cycle subtitle channel\n"
2980            "w                   show audio waves\n"
2981            "s                   activate frame-step mode\n"
2982            "left/right          seek backward/forward 10 seconds\n"
2983            "down/up             seek backward/forward 1 minute\n"
2984            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2985            );
2986 }
2987
2988 static void opt_input_file(void *optctx, const char *filename)
2989 {
2990     if (input_filename) {
2991         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2992                 filename, input_filename);
2993         exit(1);
2994     }
2995     if (!strcmp(filename, "-"))
2996         filename = "pipe:";
2997     input_filename = filename;
2998 }
2999
3000 /* Called from the main */
3001 int main(int argc, char **argv)
3002 {
3003     int flags;
3004
3005     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3006     parse_loglevel(argc, argv, options);
3007
3008     /* register all codecs, demux and protocols */
3009     avcodec_register_all();
3010 #if CONFIG_AVDEVICE
3011     avdevice_register_all();
3012 #endif
3013 #if CONFIG_AVFILTER
3014     avfilter_register_all();
3015 #endif
3016     av_register_all();
3017     avformat_network_init();
3018
3019     init_opts();
3020
3021     show_banner();
3022
3023     parse_options(NULL, argc, argv, options, opt_input_file);
3024
3025     if (!input_filename) {
3026         show_usage();
3027         fprintf(stderr, "An input file must be specified\n");
3028         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3029         exit(1);
3030     }
3031
3032     if (display_disable) {
3033         video_disable = 1;
3034     }
3035     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3036 #if !defined(__MINGW32__) && !defined(__APPLE__)
3037     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3038 #endif
3039     if (SDL_Init (flags)) {
3040         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3041         exit(1);
3042     }
3043
3044     if (!display_disable) {
3045         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3046         fs_screen_width = vi->current_w;
3047         fs_screen_height = vi->current_h;
3048     }
3049
3050     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3051     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3052     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3053
3054     av_init_packet(&flush_pkt);
3055     flush_pkt.data = (uint8_t *)&flush_pkt;
3056
3057     cur_stream = stream_open(input_filename, file_iformat);
3058
3059     event_loop();
3060
3061     /* never returns */
3062
3063     return 0;
3064 }