]> git.sesse.net Git - ffmpeg/blob - avplay.c
avplay: use libavresample for sample format conversion and channel mixing
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavresample/avresample.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2 * 65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///< presentation time stamp for this picture
101     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///< byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     int reallocate;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
156     uint8_t *audio_buf;
157     uint8_t *audio_buf1;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat sdl_sample_fmt;
163     uint64_t sdl_channel_layout;
164     int sdl_channels;
165     enum AVSampleFormat resample_sample_fmt;
166     uint64_t resample_channel_layout;
167     AVAudioResampleContext *avr;
168     AVFrame *frame;
169
170     int show_audio; /* if true, display audio samples */
171     int16_t sample_array[SAMPLE_ARRAY_SIZE];
172     int sample_array_index;
173     int last_i_start;
174     RDFTContext *rdft;
175     int rdft_bits;
176     FFTSample *rdft_data;
177     int xpos;
178
179     SDL_Thread *subtitle_tid;
180     int subtitle_stream;
181     int subtitle_stream_changed;
182     AVStream *subtitle_st;
183     PacketQueue subtitleq;
184     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185     int subpq_size, subpq_rindex, subpq_windex;
186     SDL_mutex *subpq_mutex;
187     SDL_cond *subpq_cond;
188
189     double frame_timer;
190     double frame_last_pts;
191     double frame_last_delay;
192     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
193     int video_stream;
194     AVStream *video_st;
195     PacketQueue videoq;
196     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
197     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198     int64_t video_current_pos;                   ///< current displayed file pos
199     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200     int pictq_size, pictq_rindex, pictq_windex;
201     SDL_mutex *pictq_mutex;
202     SDL_cond *pictq_cond;
203 #if !CONFIG_AVFILTER
204     struct SwsContext *img_convert_ctx;
205 #endif
206
207     //    QETimer *video_timer;
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211     PtsCorrectionContext pts_ctx;
212
213 #if CONFIG_AVFILTER
214     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
215 #endif
216
217     float skip_frames;
218     float skip_frames_index;
219     int refresh;
220 } VideoState;
221
222 static void show_help(void);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width  = 0;
231 static int screen_height = 0;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB] = {
235     [AVMEDIA_TYPE_AUDIO]    = -1,
236     [AVMEDIA_TYPE_VIDEO]    = -1,
237     [AVMEDIA_TYPE_SUBTITLE] = -1,
238 };
239 static int seek_by_bytes = -1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug = 0;
246 static int debug_mv = 0;
247 static int step = 0;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts = -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop = 1;
261 static int framedrop = 1;
262
263 static int rdftspeed = 20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 void exit_program(int ret)
282 {
283     exit(ret);
284 }
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291     memset(q, 0, sizeof(PacketQueue));
292     q->mutex = SDL_CreateMutex();
293     q->cond = SDL_CreateCond();
294     packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299     AVPacketList *pkt, *pkt1;
300
301     SDL_LockMutex(q->mutex);
302     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303         pkt1 = pkt->next;
304         av_free_packet(&pkt->pkt);
305         av_freep(&pkt);
306     }
307     q->last_pkt = NULL;
308     q->first_pkt = NULL;
309     q->nb_packets = 0;
310     q->size = 0;
311     SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316     packet_queue_flush(q);
317     SDL_DestroyMutex(q->mutex);
318     SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323     AVPacketList *pkt1;
324
325     /* duplicate the packet */
326     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
327         return -1;
328
329     pkt1 = av_malloc(sizeof(AVPacketList));
330     if (!pkt1)
331         return -1;
332     pkt1->pkt = *pkt;
333     pkt1->next = NULL;
334
335
336     SDL_LockMutex(q->mutex);
337
338     if (!q->last_pkt)
339
340         q->first_pkt = pkt1;
341     else
342         q->last_pkt->next = pkt1;
343     q->last_pkt = pkt1;
344     q->nb_packets++;
345     q->size += pkt1->pkt.size + sizeof(*pkt1);
346     /* XXX: should duplicate packet data in DV case */
347     SDL_CondSignal(q->cond);
348
349     SDL_UnlockMutex(q->mutex);
350     return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for (;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #define ALPHA_BLEND(a, oldp, newp, s)\
412 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
413
414 #define RGBA_IN(r, g, b, a, s)\
415 {\
416     unsigned int v = ((const uint32_t *)(s))[0];\
417     a = (v >> 24) & 0xff;\
418     r = (v >> 16) & 0xff;\
419     g = (v >> 8) & 0xff;\
420     b = v & 0xff;\
421 }
422
423 #define YUVA_IN(y, u, v, a, s, pal)\
424 {\
425     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
426     a = (val >> 24) & 0xff;\
427     y = (val >> 16) & 0xff;\
428     u = (val >> 8) & 0xff;\
429     v = val & 0xff;\
430 }
431
432 #define YUVA_OUT(d, y, u, v, a)\
433 {\
434     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
435 }
436
437
438 #define BPP 1
439
440 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
441 {
442     int wrap, wrap3, width2, skip2;
443     int y, u, v, a, u1, v1, a1, w, h;
444     uint8_t *lum, *cb, *cr;
445     const uint8_t *p;
446     const uint32_t *pal;
447     int dstx, dsty, dstw, dsth;
448
449     dstw = av_clip(rect->w, 0, imgw);
450     dsth = av_clip(rect->h, 0, imgh);
451     dstx = av_clip(rect->x, 0, imgw - dstw);
452     dsty = av_clip(rect->y, 0, imgh - dsth);
453     lum = dst->data[0] + dsty * dst->linesize[0];
454     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
455     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
456
457     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
458     skip2 = dstx >> 1;
459     wrap = dst->linesize[0];
460     wrap3 = rect->pict.linesize[0];
461     p = rect->pict.data[0];
462     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
463
464     if (dsty & 1) {
465         lum += dstx;
466         cb += skip2;
467         cr += skip2;
468
469         if (dstx & 1) {
470             YUVA_IN(y, u, v, a, p, pal);
471             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
472             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
473             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
474             cb++;
475             cr++;
476             lum++;
477             p += BPP;
478         }
479         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
480             YUVA_IN(y, u, v, a, p, pal);
481             u1 = u;
482             v1 = v;
483             a1 = a;
484             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
485
486             YUVA_IN(y, u, v, a, p + BPP, pal);
487             u1 += u;
488             v1 += v;
489             a1 += a;
490             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
491             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
492             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
493             cb++;
494             cr++;
495             p += 2 * BPP;
496             lum += 2;
497         }
498         if (w) {
499             YUVA_IN(y, u, v, a, p, pal);
500             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
501             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
502             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
503             p++;
504             lum++;
505         }
506         p += wrap3 - dstw * BPP;
507         lum += wrap - dstw - dstx;
508         cb += dst->linesize[1] - width2 - skip2;
509         cr += dst->linesize[2] - width2 - skip2;
510     }
511     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
512         lum += dstx;
513         cb += skip2;
514         cr += skip2;
515
516         if (dstx & 1) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             p += wrap3;
523             lum += wrap;
524             YUVA_IN(y, u, v, a, p, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += -wrap3 + BPP;
534             lum += -wrap + 1;
535         }
536         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
537             YUVA_IN(y, u, v, a, p, pal);
538             u1 = u;
539             v1 = v;
540             a1 = a;
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542
543             YUVA_IN(y, u, v, a, p + BPP, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
548             p += wrap3;
549             lum += wrap;
550
551             YUVA_IN(y, u, v, a, p, pal);
552             u1 += u;
553             v1 += v;
554             a1 += a;
555             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556
557             YUVA_IN(y, u, v, a, p + BPP, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
562
563             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
564             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
565
566             cb++;
567             cr++;
568             p += -wrap3 + 2 * BPP;
569             lum += -wrap + 2;
570         }
571         if (w) {
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 = u;
574             v1 = v;
575             a1 = a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             p += wrap3;
578             lum += wrap;
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
585             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
586             cb++;
587             cr++;
588             p += -wrap3 + BPP;
589             lum += -wrap + 1;
590         }
591         p += wrap3 + (wrap3 - dstw * BPP);
592         lum += wrap + (wrap - dstw - dstx);
593         cb += dst->linesize[1] - width2 - skip2;
594         cr += dst->linesize[2] - width2 - skip2;
595     }
596     /* handle odd height */
597     if (h) {
598         lum += dstx;
599         cb += skip2;
600         cr += skip2;
601
602         if (dstx & 1) {
603             YUVA_IN(y, u, v, a, p, pal);
604             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
605             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
606             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
607             cb++;
608             cr++;
609             lum++;
610             p += BPP;
611         }
612         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618
619             YUVA_IN(y, u, v, a, p + BPP, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
626             cb++;
627             cr++;
628             p += 2 * BPP;
629             lum += 2;
630         }
631         if (w) {
632             YUVA_IN(y, u, v, a, p, pal);
633             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
634             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
635             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636         }
637     }
638 }
639
640 static void free_subpicture(SubPicture *sp)
641 {
642     avsubtitle_free(&sp->sub);
643 }
644
645 static void video_image_display(VideoState *is)
646 {
647     VideoPicture *vp;
648     SubPicture *sp;
649     AVPicture pict;
650     float aspect_ratio;
651     int width, height, x, y;
652     SDL_Rect rect;
653     int i;
654
655     vp = &is->pictq[is->pictq_rindex];
656     if (vp->bmp) {
657 #if CONFIG_AVFILTER
658          if (vp->picref->video->pixel_aspect.num == 0)
659              aspect_ratio = 0;
660          else
661              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
662 #else
663
664         /* XXX: use variable in the frame */
665         if (is->video_st->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667         else if (is->video_st->codec->sample_aspect_ratio.num)
668             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669         else
670             aspect_ratio = 0;
671 #endif
672         if (aspect_ratio <= 0.0)
673             aspect_ratio = 1.0;
674         aspect_ratio *= (float)vp->width / (float)vp->height;
675
676         if (is->subtitle_st)
677         {
678             if (is->subpq_size > 0)
679             {
680                 sp = &is->subpq[is->subpq_rindex];
681
682                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
683                 {
684                     SDL_LockYUVOverlay (vp->bmp);
685
686                     pict.data[0] = vp->bmp->pixels[0];
687                     pict.data[1] = vp->bmp->pixels[2];
688                     pict.data[2] = vp->bmp->pixels[1];
689
690                     pict.linesize[0] = vp->bmp->pitches[0];
691                     pict.linesize[1] = vp->bmp->pitches[2];
692                     pict.linesize[2] = vp->bmp->pitches[1];
693
694                     for (i = 0; i < sp->sub.num_rects; i++)
695                         blend_subrect(&pict, sp->sub.rects[i],
696                                       vp->bmp->w, vp->bmp->h);
697
698                     SDL_UnlockYUVOverlay (vp->bmp);
699                 }
700             }
701         }
702
703
704         /* XXX: we suppose the screen has a 1.0 pixel ratio */
705         height = is->height;
706         width = ((int)rint(height * aspect_ratio)) & ~1;
707         if (width > is->width) {
708             width = is->width;
709             height = ((int)rint(width / aspect_ratio)) & ~1;
710         }
711         x = (is->width - width) / 2;
712         y = (is->height - height) / 2;
713         is->no_background = 0;
714         rect.x = is->xleft + x;
715         rect.y = is->ytop  + y;
716         rect.w = width;
717         rect.h = height;
718         SDL_DisplayYUVOverlay(vp->bmp, &rect);
719     }
720 }
721
722 /* get the current audio output buffer size, in samples. With SDL, we
723    cannot have a precise information */
724 static int audio_write_get_buf_size(VideoState *is)
725 {
726     return is->audio_buf_size - is->audio_buf_index;
727 }
728
729 static inline int compute_mod(int a, int b)
730 {
731     a = a % b;
732     if (a >= 0)
733         return a;
734     else
735         return a + b;
736 }
737
738 static void video_audio_display(VideoState *s)
739 {
740     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
741     int ch, channels, h, h2, bgcolor, fgcolor;
742     int16_t time_diff;
743     int rdft_bits, nb_freq;
744
745     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
746         ;
747     nb_freq = 1 << (rdft_bits - 1);
748
749     /* compute display index : center on currently output samples */
750     channels = s->sdl_channels;
751     nb_display_channels = channels;
752     if (!s->paused) {
753         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
754         n = 2 * channels;
755         delay = audio_write_get_buf_size(s);
756         delay /= n;
757
758         /* to be more precise, we take into account the time spent since
759            the last buffer computation */
760         if (audio_callback_time) {
761             time_diff = av_gettime() - audio_callback_time;
762             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
763         }
764
765         delay += 2 * data_used;
766         if (delay < data_used)
767             delay = data_used;
768
769         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
770         if (s->show_audio == 1) {
771             h = INT_MIN;
772             for (i = 0; i < 1000; i += channels) {
773                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
774                 int a = s->sample_array[idx];
775                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
776                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
777                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
778                 int score = a - d;
779                 if (h < score && (b ^ c) < 0) {
780                     h = score;
781                     i_start = idx;
782                 }
783             }
784         }
785
786         s->last_i_start = i_start;
787     } else {
788         i_start = s->last_i_start;
789     }
790
791     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
792     if (s->show_audio == 1) {
793         fill_rectangle(screen,
794                        s->xleft, s->ytop, s->width, s->height,
795                        bgcolor);
796
797         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
798
799         /* total height for one channel */
800         h = s->height / nb_display_channels;
801         /* graph height / 2 */
802         h2 = (h * 9) / 20;
803         for (ch = 0; ch < nb_display_channels; ch++) {
804             i = i_start + ch;
805             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
806             for (x = 0; x < s->width; x++) {
807                 y = (s->sample_array[i] * h2) >> 15;
808                 if (y < 0) {
809                     y = -y;
810                     ys = y1 - y;
811                 } else {
812                     ys = y1;
813                 }
814                 fill_rectangle(screen,
815                                s->xleft + x, ys, 1, y,
816                                fgcolor);
817                 i += channels;
818                 if (i >= SAMPLE_ARRAY_SIZE)
819                     i -= SAMPLE_ARRAY_SIZE;
820             }
821         }
822
823         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
824
825         for (ch = 1; ch < nb_display_channels; ch++) {
826             y = s->ytop + ch * h;
827             fill_rectangle(screen,
828                            s->xleft, y, s->width, 1,
829                            fgcolor);
830         }
831         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
832     } else {
833         nb_display_channels= FFMIN(nb_display_channels, 2);
834         if (rdft_bits != s->rdft_bits) {
835             av_rdft_end(s->rdft);
836             av_free(s->rdft_data);
837             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
838             s->rdft_bits = rdft_bits;
839             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
840         }
841         {
842             FFTSample *data[2];
843             for (ch = 0; ch < nb_display_channels; ch++) {
844                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
845                 i = i_start + ch;
846                 for (x = 0; x < 2 * nb_freq; x++) {
847                     double w = (x-nb_freq) * (1.0 / nb_freq);
848                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
849                     i += channels;
850                     if (i >= SAMPLE_ARRAY_SIZE)
851                         i -= SAMPLE_ARRAY_SIZE;
852                 }
853                 av_rdft_calc(s->rdft, data[ch]);
854             }
855             // least efficient way to do this, we should of course directly access it but its more than fast enough
856             for (y = 0; y < s->height; y++) {
857                 double w = 1 / sqrt(nb_freq);
858                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
859                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
860                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
861                 a = FFMIN(a, 255);
862                 b = FFMIN(b, 255);
863                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
864
865                 fill_rectangle(screen,
866                             s->xpos, s->height-y, 1, 1,
867                             fgcolor);
868             }
869         }
870         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
871         s->xpos++;
872         if (s->xpos >= s->width)
873             s->xpos= s->xleft;
874     }
875 }
876
877 static int video_open(VideoState *is)
878 {
879     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
880     int w,h;
881
882     if (is_full_screen) flags |= SDL_FULLSCREEN;
883     else                flags |= SDL_RESIZABLE;
884
885     if (is_full_screen && fs_screen_width) {
886         w = fs_screen_width;
887         h = fs_screen_height;
888     } else if (!is_full_screen && screen_width) {
889         w = screen_width;
890         h = screen_height;
891 #if CONFIG_AVFILTER
892     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
893         w = is->out_video_filter->inputs[0]->w;
894         h = is->out_video_filter->inputs[0]->h;
895 #else
896     } else if (is->video_st && is->video_st->codec->width) {
897         w = is->video_st->codec->width;
898         h = is->video_st->codec->height;
899 #endif
900     } else {
901         w = 640;
902         h = 480;
903     }
904     if (screen && is->width == screen->w && screen->w == w
905        && is->height== screen->h && screen->h == h)
906         return 0;
907
908 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
909     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
910     screen = SDL_SetVideoMode(w, h, 24, flags);
911 #else
912     screen = SDL_SetVideoMode(w, h, 0, flags);
913 #endif
914     if (!screen) {
915         fprintf(stderr, "SDL: could not set video mode - exiting\n");
916         return -1;
917     }
918     if (!window_title)
919         window_title = input_filename;
920     SDL_WM_SetCaption(window_title, window_title);
921
922     is->width  = screen->w;
923     is->height = screen->h;
924
925     return 0;
926 }
927
928 /* display the current picture, if any */
929 static void video_display(VideoState *is)
930 {
931     if (!screen)
932         video_open(cur_stream);
933     if (is->audio_st && is->show_audio)
934         video_audio_display(is);
935     else if (is->video_st)
936         video_image_display(is);
937 }
938
939 static int refresh_thread(void *opaque)
940 {
941     VideoState *is= opaque;
942     while (!is->abort_request) {
943         SDL_Event event;
944         event.type = FF_REFRESH_EVENT;
945         event.user.data1 = opaque;
946         if (!is->refresh) {
947             is->refresh = 1;
948             SDL_PushEvent(&event);
949         }
950         usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
951     }
952     return 0;
953 }
954
955 /* get the current audio clock value */
956 static double get_audio_clock(VideoState *is)
957 {
958     double pts;
959     int hw_buf_size, bytes_per_sec;
960     pts = is->audio_clock;
961     hw_buf_size = audio_write_get_buf_size(is);
962     bytes_per_sec = 0;
963     if (is->audio_st) {
964         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
965                         av_get_bytes_per_sample(is->sdl_sample_fmt);
966     }
967     if (bytes_per_sec)
968         pts -= (double)hw_buf_size / bytes_per_sec;
969     return pts;
970 }
971
972 /* get the current video clock value */
973 static double get_video_clock(VideoState *is)
974 {
975     if (is->paused) {
976         return is->video_current_pts;
977     } else {
978         return is->video_current_pts_drift + av_gettime() / 1000000.0;
979     }
980 }
981
982 /* get the current external clock value */
983 static double get_external_clock(VideoState *is)
984 {
985     int64_t ti;
986     ti = av_gettime();
987     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
988 }
989
990 /* get the current master clock value */
991 static double get_master_clock(VideoState *is)
992 {
993     double val;
994
995     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
996         if (is->video_st)
997             val = get_video_clock(is);
998         else
999             val = get_audio_clock(is);
1000     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1001         if (is->audio_st)
1002             val = get_audio_clock(is);
1003         else
1004             val = get_video_clock(is);
1005     } else {
1006         val = get_external_clock(is);
1007     }
1008     return val;
1009 }
1010
1011 /* seek in the stream */
1012 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1013 {
1014     if (!is->seek_req) {
1015         is->seek_pos = pos;
1016         is->seek_rel = rel;
1017         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1018         if (seek_by_bytes)
1019             is->seek_flags |= AVSEEK_FLAG_BYTE;
1020         is->seek_req = 1;
1021     }
1022 }
1023
1024 /* pause or resume the video */
1025 static void stream_pause(VideoState *is)
1026 {
1027     if (is->paused) {
1028         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1029         if (is->read_pause_return != AVERROR(ENOSYS)) {
1030             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1031         }
1032         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1033     }
1034     is->paused = !is->paused;
1035 }
1036
1037 static double compute_target_time(double frame_current_pts, VideoState *is)
1038 {
1039     double delay, sync_threshold, diff;
1040
1041     /* compute nominal delay */
1042     delay = frame_current_pts - is->frame_last_pts;
1043     if (delay <= 0 || delay >= 10.0) {
1044         /* if incorrect delay, use previous one */
1045         delay = is->frame_last_delay;
1046     } else {
1047         is->frame_last_delay = delay;
1048     }
1049     is->frame_last_pts = frame_current_pts;
1050
1051     /* update delay to follow master synchronisation source */
1052     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1053          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1054         /* if video is slave, we try to correct big delays by
1055            duplicating or deleting a frame */
1056         diff = get_video_clock(is) - get_master_clock(is);
1057
1058         /* skip or repeat frame. We take into account the
1059            delay to compute the threshold. I still don't know
1060            if it is the best guess */
1061         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1062         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1063             if (diff <= -sync_threshold)
1064                 delay = 0;
1065             else if (diff >= sync_threshold)
1066                 delay = 2 * delay;
1067         }
1068     }
1069     is->frame_timer += delay;
1070
1071     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1072             delay, frame_current_pts, -diff);
1073
1074     return is->frame_timer;
1075 }
1076
1077 /* called to display each frame */
1078 static void video_refresh_timer(void *opaque)
1079 {
1080     VideoState *is = opaque;
1081     VideoPicture *vp;
1082
1083     SubPicture *sp, *sp2;
1084
1085     if (is->video_st) {
1086 retry:
1087         if (is->pictq_size == 0) {
1088             // nothing to do, no picture to display in the que
1089         } else {
1090             double time = av_gettime() / 1000000.0;
1091             double next_target;
1092             /* dequeue the picture */
1093             vp = &is->pictq[is->pictq_rindex];
1094
1095             if (time < vp->target_clock)
1096                 return;
1097             /* update current video pts */
1098             is->video_current_pts = vp->pts;
1099             is->video_current_pts_drift = is->video_current_pts - time;
1100             is->video_current_pos = vp->pos;
1101             if (is->pictq_size > 1) {
1102                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1103                 assert(nextvp->target_clock >= vp->target_clock);
1104                 next_target= nextvp->target_clock;
1105             } else {
1106                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1107             }
1108             if (framedrop && time > next_target) {
1109                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1110                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1111                     /* update queue size and signal for next picture */
1112                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113                         is->pictq_rindex = 0;
1114
1115                     SDL_LockMutex(is->pictq_mutex);
1116                     is->pictq_size--;
1117                     SDL_CondSignal(is->pictq_cond);
1118                     SDL_UnlockMutex(is->pictq_mutex);
1119                     goto retry;
1120                 }
1121             }
1122
1123             if (is->subtitle_st) {
1124                 if (is->subtitle_stream_changed) {
1125                     SDL_LockMutex(is->subpq_mutex);
1126
1127                     while (is->subpq_size) {
1128                         free_subpicture(&is->subpq[is->subpq_rindex]);
1129
1130                         /* update queue size and signal for next picture */
1131                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1132                             is->subpq_rindex = 0;
1133
1134                         is->subpq_size--;
1135                     }
1136                     is->subtitle_stream_changed = 0;
1137
1138                     SDL_CondSignal(is->subpq_cond);
1139                     SDL_UnlockMutex(is->subpq_mutex);
1140                 } else {
1141                     if (is->subpq_size > 0) {
1142                         sp = &is->subpq[is->subpq_rindex];
1143
1144                         if (is->subpq_size > 1)
1145                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1146                         else
1147                             sp2 = NULL;
1148
1149                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1150                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1151                         {
1152                             free_subpicture(sp);
1153
1154                             /* update queue size and signal for next picture */
1155                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1156                                 is->subpq_rindex = 0;
1157
1158                             SDL_LockMutex(is->subpq_mutex);
1159                             is->subpq_size--;
1160                             SDL_CondSignal(is->subpq_cond);
1161                             SDL_UnlockMutex(is->subpq_mutex);
1162                         }
1163                     }
1164                 }
1165             }
1166
1167             /* display picture */
1168             if (!display_disable)
1169                 video_display(is);
1170
1171             /* update queue size and signal for next picture */
1172             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1173                 is->pictq_rindex = 0;
1174
1175             SDL_LockMutex(is->pictq_mutex);
1176             is->pictq_size--;
1177             SDL_CondSignal(is->pictq_cond);
1178             SDL_UnlockMutex(is->pictq_mutex);
1179         }
1180     } else if (is->audio_st) {
1181         /* draw the next audio frame */
1182
1183         /* if only audio stream, then display the audio bars (better
1184            than nothing, just to test the implementation */
1185
1186         /* display picture */
1187         if (!display_disable)
1188             video_display(is);
1189     }
1190     if (show_status) {
1191         static int64_t last_time;
1192         int64_t cur_time;
1193         int aqsize, vqsize, sqsize;
1194         double av_diff;
1195
1196         cur_time = av_gettime();
1197         if (!last_time || (cur_time - last_time) >= 30000) {
1198             aqsize = 0;
1199             vqsize = 0;
1200             sqsize = 0;
1201             if (is->audio_st)
1202                 aqsize = is->audioq.size;
1203             if (is->video_st)
1204                 vqsize = is->videoq.size;
1205             if (is->subtitle_st)
1206                 sqsize = is->subtitleq.size;
1207             av_diff = 0;
1208             if (is->audio_st && is->video_st)
1209                 av_diff = get_audio_clock(is) - get_video_clock(is);
1210             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1211                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1212                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1213             fflush(stdout);
1214             last_time = cur_time;
1215         }
1216     }
1217 }
1218
1219 static void stream_close(VideoState *is)
1220 {
1221     VideoPicture *vp;
1222     int i;
1223     /* XXX: use a special url_shutdown call to abort parse cleanly */
1224     is->abort_request = 1;
1225     SDL_WaitThread(is->parse_tid, NULL);
1226     SDL_WaitThread(is->refresh_tid, NULL);
1227
1228     /* free all pictures */
1229     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1230         vp = &is->pictq[i];
1231 #if CONFIG_AVFILTER
1232         if (vp->picref) {
1233             avfilter_unref_buffer(vp->picref);
1234             vp->picref = NULL;
1235         }
1236 #endif
1237         if (vp->bmp) {
1238             SDL_FreeYUVOverlay(vp->bmp);
1239             vp->bmp = NULL;
1240         }
1241     }
1242     SDL_DestroyMutex(is->pictq_mutex);
1243     SDL_DestroyCond(is->pictq_cond);
1244     SDL_DestroyMutex(is->subpq_mutex);
1245     SDL_DestroyCond(is->subpq_cond);
1246 #if !CONFIG_AVFILTER
1247     if (is->img_convert_ctx)
1248         sws_freeContext(is->img_convert_ctx);
1249 #endif
1250     av_free(is);
1251 }
1252
1253 static void do_exit(void)
1254 {
1255     if (cur_stream) {
1256         stream_close(cur_stream);
1257         cur_stream = NULL;
1258     }
1259     uninit_opts();
1260 #if CONFIG_AVFILTER
1261     avfilter_uninit();
1262 #endif
1263     avformat_network_deinit();
1264     if (show_status)
1265         printf("\n");
1266     SDL_Quit();
1267     av_log(NULL, AV_LOG_QUIET, "");
1268     exit(0);
1269 }
1270
1271 /* allocate a picture (needs to do that in main thread to avoid
1272    potential locking problems */
1273 static void alloc_picture(void *opaque)
1274 {
1275     VideoState *is = opaque;
1276     VideoPicture *vp;
1277
1278     vp = &is->pictq[is->pictq_windex];
1279
1280     if (vp->bmp)
1281         SDL_FreeYUVOverlay(vp->bmp);
1282
1283 #if CONFIG_AVFILTER
1284     if (vp->picref)
1285         avfilter_unref_buffer(vp->picref);
1286     vp->picref = NULL;
1287
1288     vp->width   = is->out_video_filter->inputs[0]->w;
1289     vp->height  = is->out_video_filter->inputs[0]->h;
1290     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1291 #else
1292     vp->width   = is->video_st->codec->width;
1293     vp->height  = is->video_st->codec->height;
1294     vp->pix_fmt = is->video_st->codec->pix_fmt;
1295 #endif
1296
1297     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1298                                    SDL_YV12_OVERLAY,
1299                                    screen);
1300     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1301         /* SDL allocates a buffer smaller than requested if the video
1302          * overlay hardware is unable to support the requested size. */
1303         fprintf(stderr, "Error: the video system does not support an image\n"
1304                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1305                         "to reduce the image size.\n", vp->width, vp->height );
1306         do_exit();
1307     }
1308
1309     SDL_LockMutex(is->pictq_mutex);
1310     vp->allocated = 1;
1311     SDL_CondSignal(is->pictq_cond);
1312     SDL_UnlockMutex(is->pictq_mutex);
1313 }
1314
1315 /**
1316  *
1317  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1318  */
1319 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1320 {
1321     VideoPicture *vp;
1322 #if CONFIG_AVFILTER
1323     AVPicture pict_src;
1324 #else
1325     int dst_pix_fmt = PIX_FMT_YUV420P;
1326 #endif
1327     /* wait until we have space to put a new picture */
1328     SDL_LockMutex(is->pictq_mutex);
1329
1330     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1331         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1332
1333     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1334            !is->videoq.abort_request) {
1335         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1336     }
1337     SDL_UnlockMutex(is->pictq_mutex);
1338
1339     if (is->videoq.abort_request)
1340         return -1;
1341
1342     vp = &is->pictq[is->pictq_windex];
1343
1344     /* alloc or resize hardware picture buffer */
1345     if (!vp->bmp || vp->reallocate ||
1346 #if CONFIG_AVFILTER
1347         vp->width  != is->out_video_filter->inputs[0]->w ||
1348         vp->height != is->out_video_filter->inputs[0]->h) {
1349 #else
1350         vp->width != is->video_st->codec->width ||
1351         vp->height != is->video_st->codec->height) {
1352 #endif
1353         SDL_Event event;
1354
1355         vp->allocated  = 0;
1356         vp->reallocate = 0;
1357
1358         /* the allocation must be done in the main thread to avoid
1359            locking problems */
1360         event.type = FF_ALLOC_EVENT;
1361         event.user.data1 = is;
1362         SDL_PushEvent(&event);
1363
1364         /* wait until the picture is allocated */
1365         SDL_LockMutex(is->pictq_mutex);
1366         while (!vp->allocated && !is->videoq.abort_request) {
1367             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368         }
1369         SDL_UnlockMutex(is->pictq_mutex);
1370
1371         if (is->videoq.abort_request)
1372             return -1;
1373     }
1374
1375     /* if the frame is not skipped, then display it */
1376     if (vp->bmp) {
1377         AVPicture pict = { { 0 } };
1378 #if CONFIG_AVFILTER
1379         if (vp->picref)
1380             avfilter_unref_buffer(vp->picref);
1381         vp->picref = src_frame->opaque;
1382 #endif
1383
1384         /* get a pointer on the bitmap */
1385         SDL_LockYUVOverlay (vp->bmp);
1386
1387         pict.data[0] = vp->bmp->pixels[0];
1388         pict.data[1] = vp->bmp->pixels[2];
1389         pict.data[2] = vp->bmp->pixels[1];
1390
1391         pict.linesize[0] = vp->bmp->pitches[0];
1392         pict.linesize[1] = vp->bmp->pitches[2];
1393         pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396         pict_src.data[0] = src_frame->data[0];
1397         pict_src.data[1] = src_frame->data[1];
1398         pict_src.data[2] = src_frame->data[2];
1399
1400         pict_src.linesize[0] = src_frame->linesize[0];
1401         pict_src.linesize[1] = src_frame->linesize[1];
1402         pict_src.linesize[2] = src_frame->linesize[2];
1403
1404         // FIXME use direct rendering
1405         av_picture_copy(&pict, &pict_src,
1406                         vp->pix_fmt, vp->width, vp->height);
1407 #else
1408         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1409         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1410             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1411             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1412         if (is->img_convert_ctx == NULL) {
1413             fprintf(stderr, "Cannot initialize the conversion context\n");
1414             exit(1);
1415         }
1416         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1417                   0, vp->height, pict.data, pict.linesize);
1418 #endif
1419         /* update the bitmap content */
1420         SDL_UnlockYUVOverlay(vp->bmp);
1421
1422         vp->pts = pts;
1423         vp->pos = pos;
1424
1425         /* now we can update the picture count */
1426         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1427             is->pictq_windex = 0;
1428         SDL_LockMutex(is->pictq_mutex);
1429         vp->target_clock = compute_target_time(vp->pts, is);
1430
1431         is->pictq_size++;
1432         SDL_UnlockMutex(is->pictq_mutex);
1433     }
1434     return 0;
1435 }
1436
1437 /**
1438  * compute the exact PTS for the picture if it is omitted in the stream
1439  * @param pts1 the dts of the pkt / pts of the frame
1440  */
1441 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1442 {
1443     double frame_delay, pts;
1444
1445     pts = pts1;
1446
1447     if (pts != 0) {
1448         /* update video clock with pts, if present */
1449         is->video_clock = pts;
1450     } else {
1451         pts = is->video_clock;
1452     }
1453     /* update video clock for next frame */
1454     frame_delay = av_q2d(is->video_st->codec->time_base);
1455     /* for MPEG2, the frame can be repeated, so we update the
1456        clock accordingly */
1457     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1458     is->video_clock += frame_delay;
1459
1460     return queue_picture(is, src_frame, pts, pos);
1461 }
1462
1463 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1464 {
1465     int got_picture, i;
1466
1467     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1468         return -1;
1469
1470     if (pkt->data == flush_pkt.data) {
1471         avcodec_flush_buffers(is->video_st->codec);
1472
1473         SDL_LockMutex(is->pictq_mutex);
1474         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1475         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1476             is->pictq[i].target_clock= 0;
1477         }
1478         while (is->pictq_size && !is->videoq.abort_request) {
1479             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1480         }
1481         is->video_current_pos = -1;
1482         SDL_UnlockMutex(is->pictq_mutex);
1483
1484         init_pts_correction(&is->pts_ctx);
1485         is->frame_last_pts = AV_NOPTS_VALUE;
1486         is->frame_last_delay = 0;
1487         is->frame_timer = (double)av_gettime() / 1000000.0;
1488         is->skip_frames = 1;
1489         is->skip_frames_index = 0;
1490         return 0;
1491     }
1492
1493     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1494
1495     if (got_picture) {
1496         if (decoder_reorder_pts == -1) {
1497             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1498         } else if (decoder_reorder_pts) {
1499             *pts = frame->pkt_pts;
1500         } else {
1501             *pts = frame->pkt_dts;
1502         }
1503
1504         if (*pts == AV_NOPTS_VALUE) {
1505             *pts = 0;
1506         }
1507
1508         is->skip_frames_index += 1;
1509         if (is->skip_frames_index >= is->skip_frames) {
1510             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1511             return 1;
1512         }
1513
1514     }
1515     return 0;
1516 }
1517
1518 #if CONFIG_AVFILTER
1519 typedef struct {
1520     VideoState *is;
1521     AVFrame *frame;
1522     int use_dr1;
1523 } FilterPriv;
1524
1525 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1526 {
1527     AVFilterContext *ctx = codec->opaque;
1528     AVFilterBufferRef  *ref;
1529     int perms = AV_PERM_WRITE;
1530     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1531     unsigned edge;
1532     int pixel_size;
1533
1534     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1535         perms |= AV_PERM_NEG_LINESIZES;
1536
1537     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1538         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1539         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1540         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1541     }
1542     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1543
1544     w = codec->width;
1545     h = codec->height;
1546     avcodec_align_dimensions2(codec, &w, &h, stride);
1547     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1548     w += edge << 1;
1549     h += edge << 1;
1550
1551     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1552         return -1;
1553
1554     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1555     ref->video->w = codec->width;
1556     ref->video->h = codec->height;
1557     for (i = 0; i < 4; i ++) {
1558         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1559         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1560
1561         if (ref->data[i]) {
1562             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1563         }
1564         pic->data[i]     = ref->data[i];
1565         pic->linesize[i] = ref->linesize[i];
1566     }
1567     pic->opaque = ref;
1568     pic->type   = FF_BUFFER_TYPE_USER;
1569     pic->reordered_opaque = codec->reordered_opaque;
1570     pic->width               = codec->width;
1571     pic->height              = codec->height;
1572     pic->format              = codec->pix_fmt;
1573     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1574     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1575     else            pic->pkt_pts = AV_NOPTS_VALUE;
1576     return 0;
1577 }
1578
1579 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1580 {
1581     memset(pic->data, 0, sizeof(pic->data));
1582     avfilter_unref_buffer(pic->opaque);
1583 }
1584
1585 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1586 {
1587     AVFilterBufferRef *ref = pic->opaque;
1588
1589     if (pic->data[0] == NULL) {
1590         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1591         return codec->get_buffer(codec, pic);
1592     }
1593
1594     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1595         (codec->pix_fmt != ref->format)) {
1596         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1597         return -1;
1598     }
1599
1600     pic->reordered_opaque = codec->reordered_opaque;
1601     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1602     else            pic->pkt_pts = AV_NOPTS_VALUE;
1603     return 0;
1604 }
1605
1606 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1607 {
1608     FilterPriv *priv = ctx->priv;
1609     AVCodecContext *codec;
1610     if (!opaque) return -1;
1611
1612     priv->is = opaque;
1613     codec    = priv->is->video_st->codec;
1614     codec->opaque = ctx;
1615     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1616         priv->use_dr1 = 1;
1617         codec->get_buffer     = input_get_buffer;
1618         codec->release_buffer = input_release_buffer;
1619         codec->reget_buffer   = input_reget_buffer;
1620         codec->thread_safe_callbacks = 1;
1621     }
1622
1623     priv->frame = avcodec_alloc_frame();
1624
1625     return 0;
1626 }
1627
1628 static void input_uninit(AVFilterContext *ctx)
1629 {
1630     FilterPriv *priv = ctx->priv;
1631     av_free(priv->frame);
1632 }
1633
1634 static int input_request_frame(AVFilterLink *link)
1635 {
1636     FilterPriv *priv = link->src->priv;
1637     AVFilterBufferRef *picref;
1638     int64_t pts = 0;
1639     AVPacket pkt;
1640     int ret;
1641
1642     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1643         av_free_packet(&pkt);
1644     if (ret < 0)
1645         return -1;
1646
1647     if (priv->use_dr1) {
1648         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1649     } else {
1650         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1651         av_image_copy(picref->data, picref->linesize,
1652                       priv->frame->data, priv->frame->linesize,
1653                       picref->format, link->w, link->h);
1654     }
1655     av_free_packet(&pkt);
1656
1657     avfilter_copy_frame_props(picref, priv->frame);
1658     picref->pts = pts;
1659
1660     avfilter_start_frame(link, picref);
1661     avfilter_draw_slice(link, 0, link->h, 1);
1662     avfilter_end_frame(link);
1663
1664     return 0;
1665 }
1666
1667 static int input_query_formats(AVFilterContext *ctx)
1668 {
1669     FilterPriv *priv = ctx->priv;
1670     enum PixelFormat pix_fmts[] = {
1671         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1672     };
1673
1674     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1675     return 0;
1676 }
1677
1678 static int input_config_props(AVFilterLink *link)
1679 {
1680     FilterPriv *priv  = link->src->priv;
1681     AVCodecContext *c = priv->is->video_st->codec;
1682
1683     link->w = c->width;
1684     link->h = c->height;
1685     link->time_base = priv->is->video_st->time_base;
1686
1687     return 0;
1688 }
1689
1690 static AVFilter input_filter =
1691 {
1692     .name      = "avplay_input",
1693
1694     .priv_size = sizeof(FilterPriv),
1695
1696     .init      = input_init,
1697     .uninit    = input_uninit,
1698
1699     .query_formats = input_query_formats,
1700
1701     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1702     .outputs   = (AVFilterPad[]) {{ .name = "default",
1703                                     .type = AVMEDIA_TYPE_VIDEO,
1704                                     .request_frame = input_request_frame,
1705                                     .config_props  = input_config_props, },
1706                                   { .name = NULL }},
1707 };
1708
1709 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1710 {
1711     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1712     char sws_flags_str[128];
1713     int ret;
1714     SinkContext sink_ctx = { .pix_fmts = pix_fmts };
1715     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1716     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1717     graph->scale_sws_opts = av_strdup(sws_flags_str);
1718
1719     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1720                                             NULL, is, graph)) < 0)
1721         return ret;
1722     if ((ret = avfilter_graph_create_filter(&filt_out, &sink, "out",
1723                                             NULL, &sink_ctx, graph)) < 0)
1724         return ret;
1725
1726     if (vfilters) {
1727         AVFilterInOut *outputs = avfilter_inout_alloc();
1728         AVFilterInOut *inputs  = avfilter_inout_alloc();
1729
1730         outputs->name    = av_strdup("in");
1731         outputs->filter_ctx = filt_src;
1732         outputs->pad_idx = 0;
1733         outputs->next    = NULL;
1734
1735         inputs->name    = av_strdup("out");
1736         inputs->filter_ctx = filt_out;
1737         inputs->pad_idx = 0;
1738         inputs->next    = NULL;
1739
1740         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1741             return ret;
1742     } else {
1743         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1744             return ret;
1745     }
1746
1747     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1748         return ret;
1749
1750     is->out_video_filter = filt_out;
1751
1752     return ret;
1753 }
1754
1755 #endif  /* CONFIG_AVFILTER */
1756
1757 static int video_thread(void *arg)
1758 {
1759     VideoState *is = arg;
1760     AVFrame *frame = avcodec_alloc_frame();
1761     int64_t pts_int;
1762     double pts;
1763     int ret;
1764
1765 #if CONFIG_AVFILTER
1766     AVFilterGraph *graph = avfilter_graph_alloc();
1767     AVFilterContext *filt_out = NULL;
1768     int64_t pos;
1769     int last_w = is->video_st->codec->width;
1770     int last_h = is->video_st->codec->height;
1771
1772     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1773         goto the_end;
1774     filt_out = is->out_video_filter;
1775 #endif
1776
1777     for (;;) {
1778 #if !CONFIG_AVFILTER
1779         AVPacket pkt;
1780 #else
1781         AVFilterBufferRef *picref;
1782         AVRational tb;
1783 #endif
1784         while (is->paused && !is->videoq.abort_request)
1785             SDL_Delay(10);
1786 #if CONFIG_AVFILTER
1787         if (   last_w != is->video_st->codec->width
1788             || last_h != is->video_st->codec->height) {
1789             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1790                     is->video_st->codec->width, is->video_st->codec->height);
1791             avfilter_graph_free(&graph);
1792             graph = avfilter_graph_alloc();
1793             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1794                 goto the_end;
1795             filt_out = is->out_video_filter;
1796             last_w = is->video_st->codec->width;
1797             last_h = is->video_st->codec->height;
1798         }
1799         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1800         if (picref) {
1801             pts_int = picref->pts;
1802             pos     = picref->pos;
1803             frame->opaque = picref;
1804         }
1805
1806         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1807             av_unused int64_t pts1 = pts_int;
1808             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1809             av_dlog(NULL, "video_thread(): "
1810                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1811                     tb.num, tb.den, pts1,
1812                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1813         }
1814 #else
1815         ret = get_video_frame(is, frame, &pts_int, &pkt);
1816 #endif
1817
1818         if (ret < 0)
1819             goto the_end;
1820
1821         if (!ret)
1822             continue;
1823
1824         pts = pts_int * av_q2d(is->video_st->time_base);
1825
1826 #if CONFIG_AVFILTER
1827         ret = output_picture2(is, frame, pts, pos);
1828 #else
1829         ret = output_picture2(is, frame, pts,  pkt.pos);
1830         av_free_packet(&pkt);
1831 #endif
1832         if (ret < 0)
1833             goto the_end;
1834
1835         if (step)
1836             if (cur_stream)
1837                 stream_pause(cur_stream);
1838     }
1839  the_end:
1840 #if CONFIG_AVFILTER
1841     av_freep(&vfilters);
1842     avfilter_graph_free(&graph);
1843 #endif
1844     av_free(frame);
1845     return 0;
1846 }
1847
1848 static int subtitle_thread(void *arg)
1849 {
1850     VideoState *is = arg;
1851     SubPicture *sp;
1852     AVPacket pkt1, *pkt = &pkt1;
1853     int got_subtitle;
1854     double pts;
1855     int i, j;
1856     int r, g, b, y, u, v, a;
1857
1858     for (;;) {
1859         while (is->paused && !is->subtitleq.abort_request) {
1860             SDL_Delay(10);
1861         }
1862         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1863             break;
1864
1865         if (pkt->data == flush_pkt.data) {
1866             avcodec_flush_buffers(is->subtitle_st->codec);
1867             continue;
1868         }
1869         SDL_LockMutex(is->subpq_mutex);
1870         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1871                !is->subtitleq.abort_request) {
1872             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1873         }
1874         SDL_UnlockMutex(is->subpq_mutex);
1875
1876         if (is->subtitleq.abort_request)
1877             return 0;
1878
1879         sp = &is->subpq[is->subpq_windex];
1880
1881        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1882            this packet, if any */
1883         pts = 0;
1884         if (pkt->pts != AV_NOPTS_VALUE)
1885             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1886
1887         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1888                                  &got_subtitle, pkt);
1889
1890         if (got_subtitle && sp->sub.format == 0) {
1891             sp->pts = pts;
1892
1893             for (i = 0; i < sp->sub.num_rects; i++)
1894             {
1895                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1896                 {
1897                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1898                     y = RGB_TO_Y_CCIR(r, g, b);
1899                     u = RGB_TO_U_CCIR(r, g, b, 0);
1900                     v = RGB_TO_V_CCIR(r, g, b, 0);
1901                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1902                 }
1903             }
1904
1905             /* now we can update the picture count */
1906             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1907                 is->subpq_windex = 0;
1908             SDL_LockMutex(is->subpq_mutex);
1909             is->subpq_size++;
1910             SDL_UnlockMutex(is->subpq_mutex);
1911         }
1912         av_free_packet(pkt);
1913     }
1914     return 0;
1915 }
1916
1917 /* copy samples for viewing in editor window */
1918 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1919 {
1920     int size, len;
1921
1922     size = samples_size / sizeof(short);
1923     while (size > 0) {
1924         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1925         if (len > size)
1926             len = size;
1927         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1928         samples += len;
1929         is->sample_array_index += len;
1930         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1931             is->sample_array_index = 0;
1932         size -= len;
1933     }
1934 }
1935
1936 /* return the new audio buffer size (samples can be added or deleted
1937    to get better sync if video or external master clock) */
1938 static int synchronize_audio(VideoState *is, short *samples,
1939                              int samples_size1, double pts)
1940 {
1941     int n, samples_size;
1942     double ref_clock;
1943
1944     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1945     samples_size = samples_size1;
1946
1947     /* if not master, then we try to remove or add samples to correct the clock */
1948     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1949          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1950         double diff, avg_diff;
1951         int wanted_size, min_size, max_size, nb_samples;
1952
1953         ref_clock = get_master_clock(is);
1954         diff = get_audio_clock(is) - ref_clock;
1955
1956         if (diff < AV_NOSYNC_THRESHOLD) {
1957             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1958             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1959                 /* not enough measures to have a correct estimate */
1960                 is->audio_diff_avg_count++;
1961             } else {
1962                 /* estimate the A-V difference */
1963                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1964
1965                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1966                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1967                     nb_samples = samples_size / n;
1968
1969                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1970                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1971                     if (wanted_size < min_size)
1972                         wanted_size = min_size;
1973                     else if (wanted_size > max_size)
1974                         wanted_size = max_size;
1975
1976                     /* add or remove samples to correction the synchro */
1977                     if (wanted_size < samples_size) {
1978                         /* remove samples */
1979                         samples_size = wanted_size;
1980                     } else if (wanted_size > samples_size) {
1981                         uint8_t *samples_end, *q;
1982                         int nb;
1983
1984                         /* add samples */
1985                         nb = (samples_size - wanted_size);
1986                         samples_end = (uint8_t *)samples + samples_size - n;
1987                         q = samples_end + n;
1988                         while (nb > 0) {
1989                             memcpy(q, samples_end, n);
1990                             q += n;
1991                             nb -= n;
1992                         }
1993                         samples_size = wanted_size;
1994                     }
1995                 }
1996                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1997                         diff, avg_diff, samples_size - samples_size1,
1998                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1999             }
2000         } else {
2001             /* too big difference : may be initial PTS errors, so
2002                reset A-V filter */
2003             is->audio_diff_avg_count = 0;
2004             is->audio_diff_cum       = 0;
2005         }
2006     }
2007
2008     return samples_size;
2009 }
2010
2011 /* decode one audio frame and returns its uncompressed size */
2012 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2013 {
2014     AVPacket *pkt_temp = &is->audio_pkt_temp;
2015     AVPacket *pkt = &is->audio_pkt;
2016     AVCodecContext *dec = is->audio_st->codec;
2017     int n, len1, data_size, got_frame;
2018     double pts;
2019     int new_packet = 0;
2020     int flush_complete = 0;
2021
2022     for (;;) {
2023         /* NOTE: the audio packet can contain several frames */
2024         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2025             int resample_changed, audio_resample;
2026
2027             if (!is->frame) {
2028                 if (!(is->frame = avcodec_alloc_frame()))
2029                     return AVERROR(ENOMEM);
2030             } else
2031                 avcodec_get_frame_defaults(is->frame);
2032
2033             if (flush_complete)
2034                 break;
2035             new_packet = 0;
2036             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2037             if (len1 < 0) {
2038                 /* if error, we skip the frame */
2039                 pkt_temp->size = 0;
2040                 break;
2041             }
2042
2043             pkt_temp->data += len1;
2044             pkt_temp->size -= len1;
2045
2046             if (!got_frame) {
2047                 /* stop sending empty packets if the decoder is finished */
2048                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2049                     flush_complete = 1;
2050                 continue;
2051             }
2052             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2053                                                    is->frame->nb_samples,
2054                                                    dec->sample_fmt, 1);
2055
2056             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
2057                              dec->channel_layout != is->sdl_channel_layout;
2058
2059             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
2060                                dec->channel_layout != is->resample_channel_layout;
2061
2062             if ((!is->avr && audio_resample) || resample_changed) {
2063                 if (is->avr)
2064                     avresample_close(is->avr);
2065                 else if (audio_resample) {
2066                     int ret;
2067                     is->avr = avresample_alloc_context();
2068                     if (!is->avr) {
2069                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
2070                         break;
2071                     }
2072                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
2073                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
2074                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
2075                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
2076                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
2077                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
2078                     if (av_get_bytes_per_sample(dec->sample_fmt) <= 2)
2079                         av_opt_set_int(is->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
2080
2081                     if ((ret = avresample_open(is->avr)) < 0) {
2082                         fprintf(stderr, "error initializing libavresample\n");
2083                         break;
2084                     }
2085                 }
2086                 is->resample_sample_fmt     = dec->sample_fmt;
2087                 is->resample_channel_layout = dec->channel_layout;
2088             }
2089
2090             if (audio_resample) {
2091                 void *tmp_out;
2092                 int out_samples, out_size, out_linesize;
2093                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
2094                 int nb_samples = is->frame->nb_samples;
2095
2096                 out_size = av_samples_get_buffer_size(&out_linesize,
2097                                                       is->sdl_channels,
2098                                                       nb_samples,
2099                                                       is->sdl_sample_fmt, 0);
2100                 tmp_out = av_realloc(is->audio_buf1, out_size);
2101                 if (!tmp_out)
2102                     return AVERROR(ENOMEM);
2103                 is->audio_buf1 = tmp_out;
2104
2105                 out_samples = avresample_convert(is->avr,
2106                                                  (void **)&is->audio_buf1,
2107                                                  out_linesize, nb_samples,
2108                                                  (void **)is->frame->data,
2109                                                  is->frame->linesize[0],
2110                                                  is->frame->nb_samples);
2111                 if (out_samples < 0) {
2112                     fprintf(stderr, "avresample_convert() failed\n");
2113                     break;
2114                 }
2115                 is->audio_buf = is->audio_buf1;
2116                 data_size = out_samples * osize * is->sdl_channels;
2117             } else {
2118                 is->audio_buf = is->frame->data[0];
2119             }
2120
2121             /* if no pts, then compute it */
2122             pts = is->audio_clock;
2123             *pts_ptr = pts;
2124             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
2125             is->audio_clock += (double)data_size /
2126                 (double)(n * dec->sample_rate);
2127 #ifdef DEBUG
2128             {
2129                 static double last_clock;
2130                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2131                        is->audio_clock - last_clock,
2132                        is->audio_clock, pts);
2133                 last_clock = is->audio_clock;
2134             }
2135 #endif
2136             return data_size;
2137         }
2138
2139         /* free the current packet */
2140         if (pkt->data)
2141             av_free_packet(pkt);
2142         memset(pkt_temp, 0, sizeof(*pkt_temp));
2143
2144         if (is->paused || is->audioq.abort_request) {
2145             return -1;
2146         }
2147
2148         /* read next packet */
2149         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2150             return -1;
2151
2152         if (pkt->data == flush_pkt.data) {
2153             avcodec_flush_buffers(dec);
2154             flush_complete = 0;
2155         }
2156
2157         *pkt_temp = *pkt;
2158
2159         /* if update the audio clock with the pts */
2160         if (pkt->pts != AV_NOPTS_VALUE) {
2161             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2162         }
2163     }
2164 }
2165
2166 /* prepare a new audio buffer */
2167 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2168 {
2169     VideoState *is = opaque;
2170     int audio_size, len1;
2171     double pts;
2172
2173     audio_callback_time = av_gettime();
2174
2175     while (len > 0) {
2176         if (is->audio_buf_index >= is->audio_buf_size) {
2177            audio_size = audio_decode_frame(is, &pts);
2178            if (audio_size < 0) {
2179                 /* if error, just output silence */
2180                is->audio_buf      = is->silence_buf;
2181                is->audio_buf_size = sizeof(is->silence_buf);
2182            } else {
2183                if (is->show_audio)
2184                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2185                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2186                                               pts);
2187                is->audio_buf_size = audio_size;
2188            }
2189            is->audio_buf_index = 0;
2190         }
2191         len1 = is->audio_buf_size - is->audio_buf_index;
2192         if (len1 > len)
2193             len1 = len;
2194         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2195         len -= len1;
2196         stream += len1;
2197         is->audio_buf_index += len1;
2198     }
2199 }
2200
2201 /* open a given stream. Return 0 if OK */
2202 static int stream_component_open(VideoState *is, int stream_index)
2203 {
2204     AVFormatContext *ic = is->ic;
2205     AVCodecContext *avctx;
2206     AVCodec *codec;
2207     SDL_AudioSpec wanted_spec, spec;
2208     AVDictionary *opts;
2209     AVDictionaryEntry *t = NULL;
2210
2211     if (stream_index < 0 || stream_index >= ic->nb_streams)
2212         return -1;
2213     avctx = ic->streams[stream_index]->codec;
2214
2215     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2216
2217     codec = avcodec_find_decoder(avctx->codec_id);
2218     avctx->debug_mv          = debug_mv;
2219     avctx->debug             = debug;
2220     avctx->workaround_bugs   = workaround_bugs;
2221     avctx->idct_algo         = idct;
2222     avctx->skip_frame        = skip_frame;
2223     avctx->skip_idct         = skip_idct;
2224     avctx->skip_loop_filter  = skip_loop_filter;
2225     avctx->error_concealment = error_concealment;
2226
2227     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2228
2229     if (!av_dict_get(opts, "threads", NULL, 0))
2230         av_dict_set(&opts, "threads", "auto", 0);
2231     if (!codec ||
2232         avcodec_open2(avctx, codec, &opts) < 0)
2233         return -1;
2234     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2235         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2236         return AVERROR_OPTION_NOT_FOUND;
2237     }
2238
2239     /* prepare audio output */
2240     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2241         wanted_spec.freq = avctx->sample_rate;
2242         wanted_spec.format = AUDIO_S16SYS;
2243
2244         if (!avctx->channel_layout)
2245             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2246         if (!avctx->channel_layout) {
2247             fprintf(stderr, "unable to guess channel layout\n");
2248             return -1;
2249         }
2250         if (avctx->channels == 1)
2251             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2252         else
2253             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2254         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2255
2256         wanted_spec.channels = is->sdl_channels;
2257         wanted_spec.silence = 0;
2258         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2259         wanted_spec.callback = sdl_audio_callback;
2260         wanted_spec.userdata = is;
2261         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2262             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2263             return -1;
2264         }
2265         is->audio_hw_buf_size = spec.size;
2266         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2267         is->resample_sample_fmt     = is->sdl_sample_fmt;
2268         is->resample_channel_layout = is->sdl_channel_layout;
2269     }
2270
2271     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2272     switch (avctx->codec_type) {
2273     case AVMEDIA_TYPE_AUDIO:
2274         is->audio_stream = stream_index;
2275         is->audio_st = ic->streams[stream_index];
2276         is->audio_buf_size  = 0;
2277         is->audio_buf_index = 0;
2278
2279         /* init averaging filter */
2280         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2281         is->audio_diff_avg_count = 0;
2282         /* since we do not have a precise anough audio fifo fullness,
2283            we correct audio sync only if larger than this threshold */
2284         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2285
2286         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2287         packet_queue_init(&is->audioq);
2288         SDL_PauseAudio(0);
2289         break;
2290     case AVMEDIA_TYPE_VIDEO:
2291         is->video_stream = stream_index;
2292         is->video_st = ic->streams[stream_index];
2293
2294         packet_queue_init(&is->videoq);
2295         is->video_tid = SDL_CreateThread(video_thread, is);
2296         break;
2297     case AVMEDIA_TYPE_SUBTITLE:
2298         is->subtitle_stream = stream_index;
2299         is->subtitle_st = ic->streams[stream_index];
2300         packet_queue_init(&is->subtitleq);
2301
2302         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2303         break;
2304     default:
2305         break;
2306     }
2307     return 0;
2308 }
2309
2310 static void stream_component_close(VideoState *is, int stream_index)
2311 {
2312     AVFormatContext *ic = is->ic;
2313     AVCodecContext *avctx;
2314
2315     if (stream_index < 0 || stream_index >= ic->nb_streams)
2316         return;
2317     avctx = ic->streams[stream_index]->codec;
2318
2319     switch (avctx->codec_type) {
2320     case AVMEDIA_TYPE_AUDIO:
2321         packet_queue_abort(&is->audioq);
2322
2323         SDL_CloseAudio();
2324
2325         packet_queue_end(&is->audioq);
2326         av_free_packet(&is->audio_pkt);
2327         if (is->avr)
2328             avresample_free(&is->avr);
2329         av_freep(&is->audio_buf1);
2330         is->audio_buf = NULL;
2331         av_freep(&is->frame);
2332
2333         if (is->rdft) {
2334             av_rdft_end(is->rdft);
2335             av_freep(&is->rdft_data);
2336             is->rdft = NULL;
2337             is->rdft_bits = 0;
2338         }
2339         break;
2340     case AVMEDIA_TYPE_VIDEO:
2341         packet_queue_abort(&is->videoq);
2342
2343         /* note: we also signal this mutex to make sure we deblock the
2344            video thread in all cases */
2345         SDL_LockMutex(is->pictq_mutex);
2346         SDL_CondSignal(is->pictq_cond);
2347         SDL_UnlockMutex(is->pictq_mutex);
2348
2349         SDL_WaitThread(is->video_tid, NULL);
2350
2351         packet_queue_end(&is->videoq);
2352         break;
2353     case AVMEDIA_TYPE_SUBTITLE:
2354         packet_queue_abort(&is->subtitleq);
2355
2356         /* note: we also signal this mutex to make sure we deblock the
2357            video thread in all cases */
2358         SDL_LockMutex(is->subpq_mutex);
2359         is->subtitle_stream_changed = 1;
2360
2361         SDL_CondSignal(is->subpq_cond);
2362         SDL_UnlockMutex(is->subpq_mutex);
2363
2364         SDL_WaitThread(is->subtitle_tid, NULL);
2365
2366         packet_queue_end(&is->subtitleq);
2367         break;
2368     default:
2369         break;
2370     }
2371
2372     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2373     avcodec_close(avctx);
2374     switch (avctx->codec_type) {
2375     case AVMEDIA_TYPE_AUDIO:
2376         is->audio_st = NULL;
2377         is->audio_stream = -1;
2378         break;
2379     case AVMEDIA_TYPE_VIDEO:
2380         is->video_st = NULL;
2381         is->video_stream = -1;
2382         break;
2383     case AVMEDIA_TYPE_SUBTITLE:
2384         is->subtitle_st = NULL;
2385         is->subtitle_stream = -1;
2386         break;
2387     default:
2388         break;
2389     }
2390 }
2391
2392 /* since we have only one decoding thread, we can use a global
2393    variable instead of a thread local variable */
2394 static VideoState *global_video_state;
2395
2396 static int decode_interrupt_cb(void *ctx)
2397 {
2398     return global_video_state && global_video_state->abort_request;
2399 }
2400
2401 /* this thread gets the stream from the disk or the network */
2402 static int decode_thread(void *arg)
2403 {
2404     VideoState *is = arg;
2405     AVFormatContext *ic = NULL;
2406     int err, i, ret;
2407     int st_index[AVMEDIA_TYPE_NB];
2408     AVPacket pkt1, *pkt = &pkt1;
2409     int eof = 0;
2410     int pkt_in_play_range = 0;
2411     AVDictionaryEntry *t;
2412     AVDictionary **opts;
2413     int orig_nb_streams;
2414
2415     memset(st_index, -1, sizeof(st_index));
2416     is->video_stream = -1;
2417     is->audio_stream = -1;
2418     is->subtitle_stream = -1;
2419
2420     global_video_state = is;
2421
2422     ic = avformat_alloc_context();
2423     ic->interrupt_callback.callback = decode_interrupt_cb;
2424     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2425     if (err < 0) {
2426         print_error(is->filename, err);
2427         ret = -1;
2428         goto fail;
2429     }
2430     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2431         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2432         ret = AVERROR_OPTION_NOT_FOUND;
2433         goto fail;
2434     }
2435     is->ic = ic;
2436
2437     if (genpts)
2438         ic->flags |= AVFMT_FLAG_GENPTS;
2439
2440     opts = setup_find_stream_info_opts(ic, codec_opts);
2441     orig_nb_streams = ic->nb_streams;
2442
2443     err = avformat_find_stream_info(ic, opts);
2444     if (err < 0) {
2445         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2446         ret = -1;
2447         goto fail;
2448     }
2449     for (i = 0; i < orig_nb_streams; i++)
2450         av_dict_free(&opts[i]);
2451     av_freep(&opts);
2452
2453     if (ic->pb)
2454         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2455
2456     if (seek_by_bytes < 0)
2457         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2458
2459     /* if seeking requested, we execute it */
2460     if (start_time != AV_NOPTS_VALUE) {
2461         int64_t timestamp;
2462
2463         timestamp = start_time;
2464         /* add the stream start time */
2465         if (ic->start_time != AV_NOPTS_VALUE)
2466             timestamp += ic->start_time;
2467         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2468         if (ret < 0) {
2469             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2470                     is->filename, (double)timestamp / AV_TIME_BASE);
2471         }
2472     }
2473
2474     for (i = 0; i < ic->nb_streams; i++)
2475         ic->streams[i]->discard = AVDISCARD_ALL;
2476     if (!video_disable)
2477         st_index[AVMEDIA_TYPE_VIDEO] =
2478             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2479                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2480     if (!audio_disable)
2481         st_index[AVMEDIA_TYPE_AUDIO] =
2482             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2483                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2484                                 st_index[AVMEDIA_TYPE_VIDEO],
2485                                 NULL, 0);
2486     if (!video_disable)
2487         st_index[AVMEDIA_TYPE_SUBTITLE] =
2488             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2489                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2490                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2491                                  st_index[AVMEDIA_TYPE_AUDIO] :
2492                                  st_index[AVMEDIA_TYPE_VIDEO]),
2493                                 NULL, 0);
2494     if (show_status) {
2495         av_dump_format(ic, 0, is->filename, 0);
2496     }
2497
2498     /* open the streams */
2499     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2500         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2501     }
2502
2503     ret = -1;
2504     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2505         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2506     }
2507     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2508     if (ret < 0) {
2509         if (!display_disable)
2510             is->show_audio = 2;
2511     }
2512
2513     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2514         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2515     }
2516
2517     if (is->video_stream < 0 && is->audio_stream < 0) {
2518         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2519         ret = -1;
2520         goto fail;
2521     }
2522
2523     for (;;) {
2524         if (is->abort_request)
2525             break;
2526         if (is->paused != is->last_paused) {
2527             is->last_paused = is->paused;
2528             if (is->paused)
2529                 is->read_pause_return = av_read_pause(ic);
2530             else
2531                 av_read_play(ic);
2532         }
2533 #if CONFIG_RTSP_DEMUXER
2534         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2535             /* wait 10 ms to avoid trying to get another packet */
2536             /* XXX: horrible */
2537             SDL_Delay(10);
2538             continue;
2539         }
2540 #endif
2541         if (is->seek_req) {
2542             int64_t seek_target = is->seek_pos;
2543             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2544             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2545 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2546 //      of the seek_pos/seek_rel variables
2547
2548             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2549             if (ret < 0) {
2550                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2551             } else {
2552                 if (is->audio_stream >= 0) {
2553                     packet_queue_flush(&is->audioq);
2554                     packet_queue_put(&is->audioq, &flush_pkt);
2555                 }
2556                 if (is->subtitle_stream >= 0) {
2557                     packet_queue_flush(&is->subtitleq);
2558                     packet_queue_put(&is->subtitleq, &flush_pkt);
2559                 }
2560                 if (is->video_stream >= 0) {
2561                     packet_queue_flush(&is->videoq);
2562                     packet_queue_put(&is->videoq, &flush_pkt);
2563                 }
2564             }
2565             is->seek_req = 0;
2566             eof = 0;
2567         }
2568
2569         /* if the queue are full, no need to read more */
2570         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2571             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2572                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2573                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
2574             /* wait 10 ms */
2575             SDL_Delay(10);
2576             continue;
2577         }
2578         if (eof) {
2579             if (is->video_stream >= 0) {
2580                 av_init_packet(pkt);
2581                 pkt->data = NULL;
2582                 pkt->size = 0;
2583                 pkt->stream_index = is->video_stream;
2584                 packet_queue_put(&is->videoq, pkt);
2585             }
2586             if (is->audio_stream >= 0 &&
2587                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2588                 av_init_packet(pkt);
2589                 pkt->data = NULL;
2590                 pkt->size = 0;
2591                 pkt->stream_index = is->audio_stream;
2592                 packet_queue_put(&is->audioq, pkt);
2593             }
2594             SDL_Delay(10);
2595             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2596                 if (loop != 1 && (!loop || --loop)) {
2597                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2598                 } else if (autoexit) {
2599                     ret = AVERROR_EOF;
2600                     goto fail;
2601                 }
2602             }
2603             continue;
2604         }
2605         ret = av_read_frame(ic, pkt);
2606         if (ret < 0) {
2607             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2608                 eof = 1;
2609             if (ic->pb && ic->pb->error)
2610                 break;
2611             SDL_Delay(100); /* wait for user event */
2612             continue;
2613         }
2614         /* check if packet is in play range specified by user, then queue, otherwise discard */
2615         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2616                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2617                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2618                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2619                 <= ((double)duration / 1000000);
2620         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2621             packet_queue_put(&is->audioq, pkt);
2622         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2623             packet_queue_put(&is->videoq, pkt);
2624         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2625             packet_queue_put(&is->subtitleq, pkt);
2626         } else {
2627             av_free_packet(pkt);
2628         }
2629     }
2630     /* wait until the end */
2631     while (!is->abort_request) {
2632         SDL_Delay(100);
2633     }
2634
2635     ret = 0;
2636  fail:
2637     /* disable interrupting */
2638     global_video_state = NULL;
2639
2640     /* close each stream */
2641     if (is->audio_stream >= 0)
2642         stream_component_close(is, is->audio_stream);
2643     if (is->video_stream >= 0)
2644         stream_component_close(is, is->video_stream);
2645     if (is->subtitle_stream >= 0)
2646         stream_component_close(is, is->subtitle_stream);
2647     if (is->ic) {
2648         avformat_close_input(&is->ic);
2649     }
2650
2651     if (ret != 0) {
2652         SDL_Event event;
2653
2654         event.type = FF_QUIT_EVENT;
2655         event.user.data1 = is;
2656         SDL_PushEvent(&event);
2657     }
2658     return 0;
2659 }
2660
2661 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2662 {
2663     VideoState *is;
2664
2665     is = av_mallocz(sizeof(VideoState));
2666     if (!is)
2667         return NULL;
2668     av_strlcpy(is->filename, filename, sizeof(is->filename));
2669     is->iformat = iformat;
2670     is->ytop    = 0;
2671     is->xleft   = 0;
2672
2673     /* start video display */
2674     is->pictq_mutex = SDL_CreateMutex();
2675     is->pictq_cond  = SDL_CreateCond();
2676
2677     is->subpq_mutex = SDL_CreateMutex();
2678     is->subpq_cond  = SDL_CreateCond();
2679
2680     is->av_sync_type = av_sync_type;
2681     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2682     if (!is->parse_tid) {
2683         av_free(is);
2684         return NULL;
2685     }
2686     return is;
2687 }
2688
2689 static void stream_cycle_channel(VideoState *is, int codec_type)
2690 {
2691     AVFormatContext *ic = is->ic;
2692     int start_index, stream_index;
2693     AVStream *st;
2694
2695     if (codec_type == AVMEDIA_TYPE_VIDEO)
2696         start_index = is->video_stream;
2697     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2698         start_index = is->audio_stream;
2699     else
2700         start_index = is->subtitle_stream;
2701     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2702         return;
2703     stream_index = start_index;
2704     for (;;) {
2705         if (++stream_index >= is->ic->nb_streams)
2706         {
2707             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2708             {
2709                 stream_index = -1;
2710                 goto the_end;
2711             } else
2712                 stream_index = 0;
2713         }
2714         if (stream_index == start_index)
2715             return;
2716         st = ic->streams[stream_index];
2717         if (st->codec->codec_type == codec_type) {
2718             /* check that parameters are OK */
2719             switch (codec_type) {
2720             case AVMEDIA_TYPE_AUDIO:
2721                 if (st->codec->sample_rate != 0 &&
2722                     st->codec->channels != 0)
2723                     goto the_end;
2724                 break;
2725             case AVMEDIA_TYPE_VIDEO:
2726             case AVMEDIA_TYPE_SUBTITLE:
2727                 goto the_end;
2728             default:
2729                 break;
2730             }
2731         }
2732     }
2733  the_end:
2734     stream_component_close(is, start_index);
2735     stream_component_open(is, stream_index);
2736 }
2737
2738
2739 static void toggle_full_screen(void)
2740 {
2741     is_full_screen = !is_full_screen;
2742 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2743     /* OS X needs to empty the picture_queue */
2744     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2745         cur_stream->pictq[i].reallocate = 1;
2746     }
2747 #endif
2748     video_open(cur_stream);
2749 }
2750
2751 static void toggle_pause(void)
2752 {
2753     if (cur_stream)
2754         stream_pause(cur_stream);
2755     step = 0;
2756 }
2757
2758 static void step_to_next_frame(void)
2759 {
2760     if (cur_stream) {
2761         /* if the stream is paused unpause it, then step */
2762         if (cur_stream->paused)
2763             stream_pause(cur_stream);
2764     }
2765     step = 1;
2766 }
2767
2768 static void toggle_audio_display(void)
2769 {
2770     if (cur_stream) {
2771         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2772         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2773         fill_rectangle(screen,
2774                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2775                        bgcolor);
2776         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2777     }
2778 }
2779
2780 /* handle an event sent by the GUI */
2781 static void event_loop(void)
2782 {
2783     SDL_Event event;
2784     double incr, pos, frac;
2785
2786     for (;;) {
2787         double x;
2788         SDL_WaitEvent(&event);
2789         switch (event.type) {
2790         case SDL_KEYDOWN:
2791             if (exit_on_keydown) {
2792                 do_exit();
2793                 break;
2794             }
2795             switch (event.key.keysym.sym) {
2796             case SDLK_ESCAPE:
2797             case SDLK_q:
2798                 do_exit();
2799                 break;
2800             case SDLK_f:
2801                 toggle_full_screen();
2802                 break;
2803             case SDLK_p:
2804             case SDLK_SPACE:
2805                 toggle_pause();
2806                 break;
2807             case SDLK_s: // S: Step to next frame
2808                 step_to_next_frame();
2809                 break;
2810             case SDLK_a:
2811                 if (cur_stream)
2812                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2813                 break;
2814             case SDLK_v:
2815                 if (cur_stream)
2816                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2817                 break;
2818             case SDLK_t:
2819                 if (cur_stream)
2820                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2821                 break;
2822             case SDLK_w:
2823                 toggle_audio_display();
2824                 break;
2825             case SDLK_LEFT:
2826                 incr = -10.0;
2827                 goto do_seek;
2828             case SDLK_RIGHT:
2829                 incr = 10.0;
2830                 goto do_seek;
2831             case SDLK_UP:
2832                 incr = 60.0;
2833                 goto do_seek;
2834             case SDLK_DOWN:
2835                 incr = -60.0;
2836             do_seek:
2837                 if (cur_stream) {
2838                     if (seek_by_bytes) {
2839                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2840                             pos = cur_stream->video_current_pos;
2841                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2842                             pos = cur_stream->audio_pkt.pos;
2843                         } else
2844                             pos = avio_tell(cur_stream->ic->pb);
2845                         if (cur_stream->ic->bit_rate)
2846                             incr *= cur_stream->ic->bit_rate / 8.0;
2847                         else
2848                             incr *= 180000.0;
2849                         pos += incr;
2850                         stream_seek(cur_stream, pos, incr, 1);
2851                     } else {
2852                         pos = get_master_clock(cur_stream);
2853                         pos += incr;
2854                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2855                     }
2856                 }
2857                 break;
2858             default:
2859                 break;
2860             }
2861             break;
2862         case SDL_MOUSEBUTTONDOWN:
2863             if (exit_on_mousedown) {
2864                 do_exit();
2865                 break;
2866             }
2867         case SDL_MOUSEMOTION:
2868             if (event.type == SDL_MOUSEBUTTONDOWN) {
2869                 x = event.button.x;
2870             } else {
2871                 if (event.motion.state != SDL_PRESSED)
2872                     break;
2873                 x = event.motion.x;
2874             }
2875             if (cur_stream) {
2876                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2877                     uint64_t size =  avio_size(cur_stream->ic->pb);
2878                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2879                 } else {
2880                     int64_t ts;
2881                     int ns, hh, mm, ss;
2882                     int tns, thh, tmm, tss;
2883                     tns  = cur_stream->ic->duration / 1000000LL;
2884                     thh  = tns / 3600;
2885                     tmm  = (tns % 3600) / 60;
2886                     tss  = (tns % 60);
2887                     frac = x / cur_stream->width;
2888                     ns   = frac * tns;
2889                     hh   = ns / 3600;
2890                     mm   = (ns % 3600) / 60;
2891                     ss   = (ns % 60);
2892                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2893                             hh, mm, ss, thh, tmm, tss);
2894                     ts = frac * cur_stream->ic->duration;
2895                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2896                         ts += cur_stream->ic->start_time;
2897                     stream_seek(cur_stream, ts, 0, 0);
2898                 }
2899             }
2900             break;
2901         case SDL_VIDEORESIZE:
2902             if (cur_stream) {
2903                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2904                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2905                 screen_width  = cur_stream->width  = event.resize.w;
2906                 screen_height = cur_stream->height = event.resize.h;
2907             }
2908             break;
2909         case SDL_QUIT:
2910         case FF_QUIT_EVENT:
2911             do_exit();
2912             break;
2913         case FF_ALLOC_EVENT:
2914             video_open(event.user.data1);
2915             alloc_picture(event.user.data1);
2916             break;
2917         case FF_REFRESH_EVENT:
2918             video_refresh_timer(event.user.data1);
2919             cur_stream->refresh = 0;
2920             break;
2921         default:
2922             break;
2923         }
2924     }
2925 }
2926
2927 static int opt_frame_size(const char *opt, const char *arg)
2928 {
2929     av_log(NULL, AV_LOG_ERROR,
2930            "Option '%s' has been removed, use private format options instead\n", opt);
2931     return AVERROR(EINVAL);
2932 }
2933
2934 static int opt_width(const char *opt, const char *arg)
2935 {
2936     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2937     return 0;
2938 }
2939
2940 static int opt_height(const char *opt, const char *arg)
2941 {
2942     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2943     return 0;
2944 }
2945
2946 static int opt_format(const char *opt, const char *arg)
2947 {
2948     file_iformat = av_find_input_format(arg);
2949     if (!file_iformat) {
2950         fprintf(stderr, "Unknown input format: %s\n", arg);
2951         return AVERROR(EINVAL);
2952     }
2953     return 0;
2954 }
2955
2956 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2957 {
2958     av_log(NULL, AV_LOG_ERROR,
2959            "Option '%s' has been removed, use private format options instead\n", opt);
2960     return AVERROR(EINVAL);
2961 }
2962
2963 static int opt_sync(const char *opt, const char *arg)
2964 {
2965     if (!strcmp(arg, "audio"))
2966         av_sync_type = AV_SYNC_AUDIO_MASTER;
2967     else if (!strcmp(arg, "video"))
2968         av_sync_type = AV_SYNC_VIDEO_MASTER;
2969     else if (!strcmp(arg, "ext"))
2970         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2971     else {
2972         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2973         exit(1);
2974     }
2975     return 0;
2976 }
2977
2978 static int opt_seek(const char *opt, const char *arg)
2979 {
2980     start_time = parse_time_or_die(opt, arg, 1);
2981     return 0;
2982 }
2983
2984 static int opt_duration(const char *opt, const char *arg)
2985 {
2986     duration = parse_time_or_die(opt, arg, 1);
2987     return 0;
2988 }
2989
2990 static int opt_debug(const char *opt, const char *arg)
2991 {
2992     av_log_set_level(99);
2993     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2994     return 0;
2995 }
2996
2997 static int opt_vismv(const char *opt, const char *arg)
2998 {
2999     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3000     return 0;
3001 }
3002
3003 static const OptionDef options[] = {
3004 #include "cmdutils_common_opts.h"
3005     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3006     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3007     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3008     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3009     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3010     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3011     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3012     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3013     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3014     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3015     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3016     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3017     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3018     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3019     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3020     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3021     { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
3022     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3023     { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
3024     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3025     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3026     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3027     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3028     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3029     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3030     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3031     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3032     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3033     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3034     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3035     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3036     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3037     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3038     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3039 #if CONFIG_AVFILTER
3040     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3041 #endif
3042     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3043     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3044     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
3045     { NULL, },
3046 };
3047
3048 static void show_usage(void)
3049 {
3050     printf("Simple media player\n");
3051     printf("usage: %s [options] input_file\n", program_name);
3052     printf("\n");
3053 }
3054
3055 static void show_help(void)
3056 {
3057     av_log_set_callback(log_callback_help);
3058     show_usage();
3059     show_help_options(options, "Main options:\n",
3060                       OPT_EXPERT, 0);
3061     show_help_options(options, "\nAdvanced options:\n",
3062                       OPT_EXPERT, OPT_EXPERT);
3063     printf("\n");
3064     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3065     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3066 #if !CONFIG_AVFILTER
3067     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3068 #endif
3069     printf("\nWhile playing:\n"
3070            "q, ESC              quit\n"
3071            "f                   toggle full screen\n"
3072            "p, SPC              pause\n"
3073            "a                   cycle audio channel\n"
3074            "v                   cycle video channel\n"
3075            "t                   cycle subtitle channel\n"
3076            "w                   show audio waves\n"
3077            "s                   activate frame-step mode\n"
3078            "left/right          seek backward/forward 10 seconds\n"
3079            "down/up             seek backward/forward 1 minute\n"
3080            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3081            );
3082 }
3083
3084 static void opt_input_file(void *optctx, const char *filename)
3085 {
3086     if (input_filename) {
3087         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3088                 filename, input_filename);
3089         exit(1);
3090     }
3091     if (!strcmp(filename, "-"))
3092         filename = "pipe:";
3093     input_filename = filename;
3094 }
3095
3096 /* Called from the main */
3097 int main(int argc, char **argv)
3098 {
3099     int flags;
3100
3101     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3102     parse_loglevel(argc, argv, options);
3103
3104     /* register all codecs, demux and protocols */
3105     avcodec_register_all();
3106 #if CONFIG_AVDEVICE
3107     avdevice_register_all();
3108 #endif
3109 #if CONFIG_AVFILTER
3110     avfilter_register_all();
3111 #endif
3112     av_register_all();
3113     avformat_network_init();
3114
3115     init_opts();
3116
3117     show_banner();
3118
3119     parse_options(NULL, argc, argv, options, opt_input_file);
3120
3121     if (!input_filename) {
3122         show_usage();
3123         fprintf(stderr, "An input file must be specified\n");
3124         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3125         exit(1);
3126     }
3127
3128     if (display_disable) {
3129         video_disable = 1;
3130     }
3131     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3132 #if !defined(__MINGW32__) && !defined(__APPLE__)
3133     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3134 #endif
3135     if (SDL_Init (flags)) {
3136         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3137         exit(1);
3138     }
3139
3140     if (!display_disable) {
3141 #if HAVE_SDL_VIDEO_SIZE
3142         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3143         fs_screen_width = vi->current_w;
3144         fs_screen_height = vi->current_h;
3145 #endif
3146     }
3147
3148     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3149     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3150     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3151
3152     av_init_packet(&flush_pkt);
3153     flush_pkt.data = "FLUSH";
3154
3155     cur_stream = stream_open(input_filename, file_iformat);
3156
3157     event_loop();
3158
3159     /* never returns */
3160
3161     return 0;
3162 }