]> git.sesse.net Git - ffmpeg/blob - avplay.c
mem: Don't abort on av_malloc(0) in debug mode
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///< byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///< current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
217     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
218     int use_dr1;
219     FrameBuffer *buffer_pool;
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 static void show_help(void);
228
229 /* options specified by the user */
230 static AVInputFormat *file_iformat;
231 static const char *input_filename;
232 static const char *window_title;
233 static int fs_screen_width;
234 static int fs_screen_height;
235 static int screen_width  = 0;
236 static int screen_height = 0;
237 static int audio_disable;
238 static int video_disable;
239 static int wanted_stream[AVMEDIA_TYPE_NB] = {
240     [AVMEDIA_TYPE_AUDIO]    = -1,
241     [AVMEDIA_TYPE_VIDEO]    = -1,
242     [AVMEDIA_TYPE_SUBTITLE] = -1,
243 };
244 static int seek_by_bytes = -1;
245 static int display_disable;
246 static int show_status = 1;
247 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
248 static int64_t start_time = AV_NOPTS_VALUE;
249 static int64_t duration = AV_NOPTS_VALUE;
250 static int debug = 0;
251 static int debug_mv = 0;
252 static int step = 0;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts = -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop = 1;
266 static int framedrop = 1;
267 static int infinite_buffer = 0;
268
269 static int rdftspeed = 20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 void exit_program(int ret)
288 {
289     exit(ret);
290 }
291
292 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
293
294 /* packet queue handling */
295 static void packet_queue_init(PacketQueue *q)
296 {
297     memset(q, 0, sizeof(PacketQueue));
298     q->mutex = SDL_CreateMutex();
299     q->cond = SDL_CreateCond();
300     packet_queue_put(q, &flush_pkt);
301 }
302
303 static void packet_queue_flush(PacketQueue *q)
304 {
305     AVPacketList *pkt, *pkt1;
306
307     SDL_LockMutex(q->mutex);
308     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
309         pkt1 = pkt->next;
310         av_free_packet(&pkt->pkt);
311         av_freep(&pkt);
312     }
313     q->last_pkt = NULL;
314     q->first_pkt = NULL;
315     q->nb_packets = 0;
316     q->size = 0;
317     SDL_UnlockMutex(q->mutex);
318 }
319
320 static void packet_queue_end(PacketQueue *q)
321 {
322     packet_queue_flush(q);
323     SDL_DestroyMutex(q->mutex);
324     SDL_DestroyCond(q->cond);
325 }
326
327 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
328 {
329     AVPacketList *pkt1;
330
331     /* duplicate the packet */
332     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
333         return -1;
334
335     pkt1 = av_malloc(sizeof(AVPacketList));
336     if (!pkt1)
337         return -1;
338     pkt1->pkt = *pkt;
339     pkt1->next = NULL;
340
341
342     SDL_LockMutex(q->mutex);
343
344     if (!q->last_pkt)
345
346         q->first_pkt = pkt1;
347     else
348         q->last_pkt->next = pkt1;
349     q->last_pkt = pkt1;
350     q->nb_packets++;
351     q->size += pkt1->pkt.size + sizeof(*pkt1);
352     /* XXX: should duplicate packet data in DV case */
353     SDL_CondSignal(q->cond);
354
355     SDL_UnlockMutex(q->mutex);
356     return 0;
357 }
358
359 static void packet_queue_abort(PacketQueue *q)
360 {
361     SDL_LockMutex(q->mutex);
362
363     q->abort_request = 1;
364
365     SDL_CondSignal(q->cond);
366
367     SDL_UnlockMutex(q->mutex);
368 }
369
370 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
371 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
372 {
373     AVPacketList *pkt1;
374     int ret;
375
376     SDL_LockMutex(q->mutex);
377
378     for (;;) {
379         if (q->abort_request) {
380             ret = -1;
381             break;
382         }
383
384         pkt1 = q->first_pkt;
385         if (pkt1) {
386             q->first_pkt = pkt1->next;
387             if (!q->first_pkt)
388                 q->last_pkt = NULL;
389             q->nb_packets--;
390             q->size -= pkt1->pkt.size + sizeof(*pkt1);
391             *pkt = pkt1->pkt;
392             av_free(pkt1);
393             ret = 1;
394             break;
395         } else if (!block) {
396             ret = 0;
397             break;
398         } else {
399             SDL_CondWait(q->cond, q->mutex);
400         }
401     }
402     SDL_UnlockMutex(q->mutex);
403     return ret;
404 }
405
406 static inline void fill_rectangle(SDL_Surface *screen,
407                                   int x, int y, int w, int h, int color)
408 {
409     SDL_Rect rect;
410     rect.x = x;
411     rect.y = y;
412     rect.w = w;
413     rect.h = h;
414     SDL_FillRect(screen, &rect, color);
415 }
416
417 #define ALPHA_BLEND(a, oldp, newp, s)\
418 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
419
420 #define RGBA_IN(r, g, b, a, s)\
421 {\
422     unsigned int v = ((const uint32_t *)(s))[0];\
423     a = (v >> 24) & 0xff;\
424     r = (v >> 16) & 0xff;\
425     g = (v >> 8) & 0xff;\
426     b = v & 0xff;\
427 }
428
429 #define YUVA_IN(y, u, v, a, s, pal)\
430 {\
431     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
432     a = (val >> 24) & 0xff;\
433     y = (val >> 16) & 0xff;\
434     u = (val >> 8) & 0xff;\
435     v = val & 0xff;\
436 }
437
438 #define YUVA_OUT(d, y, u, v, a)\
439 {\
440     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
441 }
442
443
444 #define BPP 1
445
446 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
447 {
448     int wrap, wrap3, width2, skip2;
449     int y, u, v, a, u1, v1, a1, w, h;
450     uint8_t *lum, *cb, *cr;
451     const uint8_t *p;
452     const uint32_t *pal;
453     int dstx, dsty, dstw, dsth;
454
455     dstw = av_clip(rect->w, 0, imgw);
456     dsth = av_clip(rect->h, 0, imgh);
457     dstx = av_clip(rect->x, 0, imgw - dstw);
458     dsty = av_clip(rect->y, 0, imgh - dsth);
459     lum = dst->data[0] + dsty * dst->linesize[0];
460     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
461     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
462
463     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
464     skip2 = dstx >> 1;
465     wrap = dst->linesize[0];
466     wrap3 = rect->pict.linesize[0];
467     p = rect->pict.data[0];
468     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
469
470     if (dsty & 1) {
471         lum += dstx;
472         cb += skip2;
473         cr += skip2;
474
475         if (dstx & 1) {
476             YUVA_IN(y, u, v, a, p, pal);
477             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
479             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
480             cb++;
481             cr++;
482             lum++;
483             p += BPP;
484         }
485         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
486             YUVA_IN(y, u, v, a, p, pal);
487             u1 = u;
488             v1 = v;
489             a1 = a;
490             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491
492             YUVA_IN(y, u, v, a, p + BPP, pal);
493             u1 += u;
494             v1 += v;
495             a1 += a;
496             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
497             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
498             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
499             cb++;
500             cr++;
501             p += 2 * BPP;
502             lum += 2;
503         }
504         if (w) {
505             YUVA_IN(y, u, v, a, p, pal);
506             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509             p++;
510             lum++;
511         }
512         p += wrap3 - dstw * BPP;
513         lum += wrap - dstw - dstx;
514         cb += dst->linesize[1] - width2 - skip2;
515         cr += dst->linesize[2] - width2 - skip2;
516     }
517     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
518         lum += dstx;
519         cb += skip2;
520         cr += skip2;
521
522         if (dstx & 1) {
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 = u;
525             v1 = v;
526             a1 = a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             p += wrap3;
529             lum += wrap;
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 += u;
532             v1 += v;
533             a1 += a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
536             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
537             cb++;
538             cr++;
539             p += -wrap3 + BPP;
540             lum += -wrap + 1;
541         }
542         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 = u;
545             v1 = v;
546             a1 = a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554             p += wrap3;
555             lum += wrap;
556
557             YUVA_IN(y, u, v, a, p, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562
563             YUVA_IN(y, u, v, a, p + BPP, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
568
569             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
570             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
571
572             cb++;
573             cr++;
574             p += -wrap3 + 2 * BPP;
575             lum += -wrap + 2;
576         }
577         if (w) {
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 = u;
580             v1 = v;
581             a1 = a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             p += wrap3;
584             lum += wrap;
585             YUVA_IN(y, u, v, a, p, pal);
586             u1 += u;
587             v1 += v;
588             a1 += a;
589             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
590             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
591             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
592             cb++;
593             cr++;
594             p += -wrap3 + BPP;
595             lum += -wrap + 1;
596         }
597         p += wrap3 + (wrap3 - dstw * BPP);
598         lum += wrap + (wrap - dstw - dstx);
599         cb += dst->linesize[1] - width2 - skip2;
600         cr += dst->linesize[2] - width2 - skip2;
601     }
602     /* handle odd height */
603     if (h) {
604         lum += dstx;
605         cb += skip2;
606         cr += skip2;
607
608         if (dstx & 1) {
609             YUVA_IN(y, u, v, a, p, pal);
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
612             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
613             cb++;
614             cr++;
615             lum++;
616             p += BPP;
617         }
618         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
619             YUVA_IN(y, u, v, a, p, pal);
620             u1 = u;
621             v1 = v;
622             a1 = a;
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624
625             YUVA_IN(y, u, v, a, p + BPP, pal);
626             u1 += u;
627             v1 += v;
628             a1 += a;
629             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
630             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
631             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
632             cb++;
633             cr++;
634             p += 2 * BPP;
635             lum += 2;
636         }
637         if (w) {
638             YUVA_IN(y, u, v, a, p, pal);
639             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642         }
643     }
644 }
645
646 static void free_subpicture(SubPicture *sp)
647 {
648     avsubtitle_free(&sp->sub);
649 }
650
651 static void video_image_display(VideoState *is)
652 {
653     VideoPicture *vp;
654     SubPicture *sp;
655     AVPicture pict;
656     float aspect_ratio;
657     int width, height, x, y;
658     SDL_Rect rect;
659     int i;
660
661     vp = &is->pictq[is->pictq_rindex];
662     if (vp->bmp) {
663 #if CONFIG_AVFILTER
664          if (vp->picref->video->pixel_aspect.num == 0)
665              aspect_ratio = 0;
666          else
667              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
668 #else
669
670         /* XXX: use variable in the frame */
671         if (is->video_st->sample_aspect_ratio.num)
672             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
673         else if (is->video_st->codec->sample_aspect_ratio.num)
674             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
675         else
676             aspect_ratio = 0;
677 #endif
678         if (aspect_ratio <= 0.0)
679             aspect_ratio = 1.0;
680         aspect_ratio *= (float)vp->width / (float)vp->height;
681
682         if (is->subtitle_st)
683         {
684             if (is->subpq_size > 0)
685             {
686                 sp = &is->subpq[is->subpq_rindex];
687
688                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
689                 {
690                     SDL_LockYUVOverlay (vp->bmp);
691
692                     pict.data[0] = vp->bmp->pixels[0];
693                     pict.data[1] = vp->bmp->pixels[2];
694                     pict.data[2] = vp->bmp->pixels[1];
695
696                     pict.linesize[0] = vp->bmp->pitches[0];
697                     pict.linesize[1] = vp->bmp->pitches[2];
698                     pict.linesize[2] = vp->bmp->pitches[1];
699
700                     for (i = 0; i < sp->sub.num_rects; i++)
701                         blend_subrect(&pict, sp->sub.rects[i],
702                                       vp->bmp->w, vp->bmp->h);
703
704                     SDL_UnlockYUVOverlay (vp->bmp);
705                 }
706             }
707         }
708
709
710         /* XXX: we suppose the screen has a 1.0 pixel ratio */
711         height = is->height;
712         width = ((int)rint(height * aspect_ratio)) & ~1;
713         if (width > is->width) {
714             width = is->width;
715             height = ((int)rint(width / aspect_ratio)) & ~1;
716         }
717         x = (is->width - width) / 2;
718         y = (is->height - height) / 2;
719         is->no_background = 0;
720         rect.x = is->xleft + x;
721         rect.y = is->ytop  + y;
722         rect.w = width;
723         rect.h = height;
724         SDL_DisplayYUVOverlay(vp->bmp, &rect);
725     }
726 }
727
728 /* get the current audio output buffer size, in samples. With SDL, we
729    cannot have a precise information */
730 static int audio_write_get_buf_size(VideoState *is)
731 {
732     return is->audio_buf_size - is->audio_buf_index;
733 }
734
735 static inline int compute_mod(int a, int b)
736 {
737     a = a % b;
738     if (a >= 0)
739         return a;
740     else
741         return a + b;
742 }
743
744 static void video_audio_display(VideoState *s)
745 {
746     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
747     int ch, channels, h, h2, bgcolor, fgcolor;
748     int16_t time_diff;
749     int rdft_bits, nb_freq;
750
751     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
752         ;
753     nb_freq = 1 << (rdft_bits - 1);
754
755     /* compute display index : center on currently output samples */
756     channels = s->sdl_channels;
757     nb_display_channels = channels;
758     if (!s->paused) {
759         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
760         n = 2 * channels;
761         delay = audio_write_get_buf_size(s);
762         delay /= n;
763
764         /* to be more precise, we take into account the time spent since
765            the last buffer computation */
766         if (audio_callback_time) {
767             time_diff = av_gettime() - audio_callback_time;
768             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
769         }
770
771         delay += 2 * data_used;
772         if (delay < data_used)
773             delay = data_used;
774
775         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
776         if (s->show_audio == 1) {
777             h = INT_MIN;
778             for (i = 0; i < 1000; i += channels) {
779                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
780                 int a = s->sample_array[idx];
781                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
782                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
783                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
784                 int score = a - d;
785                 if (h < score && (b ^ c) < 0) {
786                     h = score;
787                     i_start = idx;
788                 }
789             }
790         }
791
792         s->last_i_start = i_start;
793     } else {
794         i_start = s->last_i_start;
795     }
796
797     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
798     if (s->show_audio == 1) {
799         fill_rectangle(screen,
800                        s->xleft, s->ytop, s->width, s->height,
801                        bgcolor);
802
803         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
804
805         /* total height for one channel */
806         h = s->height / nb_display_channels;
807         /* graph height / 2 */
808         h2 = (h * 9) / 20;
809         for (ch = 0; ch < nb_display_channels; ch++) {
810             i = i_start + ch;
811             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
812             for (x = 0; x < s->width; x++) {
813                 y = (s->sample_array[i] * h2) >> 15;
814                 if (y < 0) {
815                     y = -y;
816                     ys = y1 - y;
817                 } else {
818                     ys = y1;
819                 }
820                 fill_rectangle(screen,
821                                s->xleft + x, ys, 1, y,
822                                fgcolor);
823                 i += channels;
824                 if (i >= SAMPLE_ARRAY_SIZE)
825                     i -= SAMPLE_ARRAY_SIZE;
826             }
827         }
828
829         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
830
831         for (ch = 1; ch < nb_display_channels; ch++) {
832             y = s->ytop + ch * h;
833             fill_rectangle(screen,
834                            s->xleft, y, s->width, 1,
835                            fgcolor);
836         }
837         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
838     } else {
839         nb_display_channels= FFMIN(nb_display_channels, 2);
840         if (rdft_bits != s->rdft_bits) {
841             av_rdft_end(s->rdft);
842             av_free(s->rdft_data);
843             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
844             s->rdft_bits = rdft_bits;
845             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
846         }
847         {
848             FFTSample *data[2];
849             for (ch = 0; ch < nb_display_channels; ch++) {
850                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
851                 i = i_start + ch;
852                 for (x = 0; x < 2 * nb_freq; x++) {
853                     double w = (x-nb_freq) * (1.0 / nb_freq);
854                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
855                     i += channels;
856                     if (i >= SAMPLE_ARRAY_SIZE)
857                         i -= SAMPLE_ARRAY_SIZE;
858                 }
859                 av_rdft_calc(s->rdft, data[ch]);
860             }
861             // least efficient way to do this, we should of course directly access it but its more than fast enough
862             for (y = 0; y < s->height; y++) {
863                 double w = 1 / sqrt(nb_freq);
864                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
865                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
866                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
867                 a = FFMIN(a, 255);
868                 b = FFMIN(b, 255);
869                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
870
871                 fill_rectangle(screen,
872                             s->xpos, s->height-y, 1, 1,
873                             fgcolor);
874             }
875         }
876         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
877         s->xpos++;
878         if (s->xpos >= s->width)
879             s->xpos= s->xleft;
880     }
881 }
882
883 static int video_open(VideoState *is)
884 {
885     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
886     int w,h;
887
888     if (is_full_screen) flags |= SDL_FULLSCREEN;
889     else                flags |= SDL_RESIZABLE;
890
891     if (is_full_screen && fs_screen_width) {
892         w = fs_screen_width;
893         h = fs_screen_height;
894     } else if (!is_full_screen && screen_width) {
895         w = screen_width;
896         h = screen_height;
897 #if CONFIG_AVFILTER
898     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
899         w = is->out_video_filter->inputs[0]->w;
900         h = is->out_video_filter->inputs[0]->h;
901 #else
902     } else if (is->video_st && is->video_st->codec->width) {
903         w = is->video_st->codec->width;
904         h = is->video_st->codec->height;
905 #endif
906     } else {
907         w = 640;
908         h = 480;
909     }
910     if (screen && is->width == screen->w && screen->w == w
911        && is->height== screen->h && screen->h == h)
912         return 0;
913
914 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
915     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
916     screen = SDL_SetVideoMode(w, h, 24, flags);
917 #else
918     screen = SDL_SetVideoMode(w, h, 0, flags);
919 #endif
920     if (!screen) {
921         fprintf(stderr, "SDL: could not set video mode - exiting\n");
922         return -1;
923     }
924     if (!window_title)
925         window_title = input_filename;
926     SDL_WM_SetCaption(window_title, window_title);
927
928     is->width  = screen->w;
929     is->height = screen->h;
930
931     return 0;
932 }
933
934 /* display the current picture, if any */
935 static void video_display(VideoState *is)
936 {
937     if (!screen)
938         video_open(cur_stream);
939     if (is->audio_st && is->show_audio)
940         video_audio_display(is);
941     else if (is->video_st)
942         video_image_display(is);
943 }
944
945 static int refresh_thread(void *opaque)
946 {
947     VideoState *is= opaque;
948     while (!is->abort_request) {
949         SDL_Event event;
950         event.type = FF_REFRESH_EVENT;
951         event.user.data1 = opaque;
952         if (!is->refresh) {
953             is->refresh = 1;
954             SDL_PushEvent(&event);
955         }
956         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
957     }
958     return 0;
959 }
960
961 /* get the current audio clock value */
962 static double get_audio_clock(VideoState *is)
963 {
964     double pts;
965     int hw_buf_size, bytes_per_sec;
966     pts = is->audio_clock;
967     hw_buf_size = audio_write_get_buf_size(is);
968     bytes_per_sec = 0;
969     if (is->audio_st) {
970         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
971                         av_get_bytes_per_sample(is->sdl_sample_fmt);
972     }
973     if (bytes_per_sec)
974         pts -= (double)hw_buf_size / bytes_per_sec;
975     return pts;
976 }
977
978 /* get the current video clock value */
979 static double get_video_clock(VideoState *is)
980 {
981     if (is->paused) {
982         return is->video_current_pts;
983     } else {
984         return is->video_current_pts_drift + av_gettime() / 1000000.0;
985     }
986 }
987
988 /* get the current external clock value */
989 static double get_external_clock(VideoState *is)
990 {
991     int64_t ti;
992     ti = av_gettime();
993     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
994 }
995
996 /* get the current master clock value */
997 static double get_master_clock(VideoState *is)
998 {
999     double val;
1000
1001     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1002         if (is->video_st)
1003             val = get_video_clock(is);
1004         else
1005             val = get_audio_clock(is);
1006     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1007         if (is->audio_st)
1008             val = get_audio_clock(is);
1009         else
1010             val = get_video_clock(is);
1011     } else {
1012         val = get_external_clock(is);
1013     }
1014     return val;
1015 }
1016
1017 /* seek in the stream */
1018 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1019 {
1020     if (!is->seek_req) {
1021         is->seek_pos = pos;
1022         is->seek_rel = rel;
1023         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1024         if (seek_by_bytes)
1025             is->seek_flags |= AVSEEK_FLAG_BYTE;
1026         is->seek_req = 1;
1027     }
1028 }
1029
1030 /* pause or resume the video */
1031 static void stream_pause(VideoState *is)
1032 {
1033     if (is->paused) {
1034         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1035         if (is->read_pause_return != AVERROR(ENOSYS)) {
1036             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1037         }
1038         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1039     }
1040     is->paused = !is->paused;
1041 }
1042
1043 static double compute_target_time(double frame_current_pts, VideoState *is)
1044 {
1045     double delay, sync_threshold, diff;
1046
1047     /* compute nominal delay */
1048     delay = frame_current_pts - is->frame_last_pts;
1049     if (delay <= 0 || delay >= 10.0) {
1050         /* if incorrect delay, use previous one */
1051         delay = is->frame_last_delay;
1052     } else {
1053         is->frame_last_delay = delay;
1054     }
1055     is->frame_last_pts = frame_current_pts;
1056
1057     /* update delay to follow master synchronisation source */
1058     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1059          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1060         /* if video is slave, we try to correct big delays by
1061            duplicating or deleting a frame */
1062         diff = get_video_clock(is) - get_master_clock(is);
1063
1064         /* skip or repeat frame. We take into account the
1065            delay to compute the threshold. I still don't know
1066            if it is the best guess */
1067         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1068         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1069             if (diff <= -sync_threshold)
1070                 delay = 0;
1071             else if (diff >= sync_threshold)
1072                 delay = 2 * delay;
1073         }
1074     }
1075     is->frame_timer += delay;
1076
1077     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1078             delay, frame_current_pts, -diff);
1079
1080     return is->frame_timer;
1081 }
1082
1083 /* called to display each frame */
1084 static void video_refresh_timer(void *opaque)
1085 {
1086     VideoState *is = opaque;
1087     VideoPicture *vp;
1088
1089     SubPicture *sp, *sp2;
1090
1091     if (is->video_st) {
1092 retry:
1093         if (is->pictq_size == 0) {
1094             // nothing to do, no picture to display in the que
1095         } else {
1096             double time = av_gettime() / 1000000.0;
1097             double next_target;
1098             /* dequeue the picture */
1099             vp = &is->pictq[is->pictq_rindex];
1100
1101             if (time < vp->target_clock)
1102                 return;
1103             /* update current video pts */
1104             is->video_current_pts = vp->pts;
1105             is->video_current_pts_drift = is->video_current_pts - time;
1106             is->video_current_pos = vp->pos;
1107             if (is->pictq_size > 1) {
1108                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1109                 assert(nextvp->target_clock >= vp->target_clock);
1110                 next_target= nextvp->target_clock;
1111             } else {
1112                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1113             }
1114             if (framedrop && time > next_target) {
1115                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1116                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1117                     /* update queue size and signal for next picture */
1118                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1119                         is->pictq_rindex = 0;
1120
1121                     SDL_LockMutex(is->pictq_mutex);
1122                     is->pictq_size--;
1123                     SDL_CondSignal(is->pictq_cond);
1124                     SDL_UnlockMutex(is->pictq_mutex);
1125                     goto retry;
1126                 }
1127             }
1128
1129             if (is->subtitle_st) {
1130                 if (is->subtitle_stream_changed) {
1131                     SDL_LockMutex(is->subpq_mutex);
1132
1133                     while (is->subpq_size) {
1134                         free_subpicture(&is->subpq[is->subpq_rindex]);
1135
1136                         /* update queue size and signal for next picture */
1137                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1138                             is->subpq_rindex = 0;
1139
1140                         is->subpq_size--;
1141                     }
1142                     is->subtitle_stream_changed = 0;
1143
1144                     SDL_CondSignal(is->subpq_cond);
1145                     SDL_UnlockMutex(is->subpq_mutex);
1146                 } else {
1147                     if (is->subpq_size > 0) {
1148                         sp = &is->subpq[is->subpq_rindex];
1149
1150                         if (is->subpq_size > 1)
1151                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1152                         else
1153                             sp2 = NULL;
1154
1155                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1156                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1157                         {
1158                             free_subpicture(sp);
1159
1160                             /* update queue size and signal for next picture */
1161                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1162                                 is->subpq_rindex = 0;
1163
1164                             SDL_LockMutex(is->subpq_mutex);
1165                             is->subpq_size--;
1166                             SDL_CondSignal(is->subpq_cond);
1167                             SDL_UnlockMutex(is->subpq_mutex);
1168                         }
1169                     }
1170                 }
1171             }
1172
1173             /* display picture */
1174             if (!display_disable)
1175                 video_display(is);
1176
1177             /* update queue size and signal for next picture */
1178             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1179                 is->pictq_rindex = 0;
1180
1181             SDL_LockMutex(is->pictq_mutex);
1182             is->pictq_size--;
1183             SDL_CondSignal(is->pictq_cond);
1184             SDL_UnlockMutex(is->pictq_mutex);
1185         }
1186     } else if (is->audio_st) {
1187         /* draw the next audio frame */
1188
1189         /* if only audio stream, then display the audio bars (better
1190            than nothing, just to test the implementation */
1191
1192         /* display picture */
1193         if (!display_disable)
1194             video_display(is);
1195     }
1196     if (show_status) {
1197         static int64_t last_time;
1198         int64_t cur_time;
1199         int aqsize, vqsize, sqsize;
1200         double av_diff;
1201
1202         cur_time = av_gettime();
1203         if (!last_time || (cur_time - last_time) >= 30000) {
1204             aqsize = 0;
1205             vqsize = 0;
1206             sqsize = 0;
1207             if (is->audio_st)
1208                 aqsize = is->audioq.size;
1209             if (is->video_st)
1210                 vqsize = is->videoq.size;
1211             if (is->subtitle_st)
1212                 sqsize = is->subtitleq.size;
1213             av_diff = 0;
1214             if (is->audio_st && is->video_st)
1215                 av_diff = get_audio_clock(is) - get_video_clock(is);
1216             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1217                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1218                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1219             fflush(stdout);
1220             last_time = cur_time;
1221         }
1222     }
1223 }
1224
1225 static void stream_close(VideoState *is)
1226 {
1227     VideoPicture *vp;
1228     int i;
1229     /* XXX: use a special url_shutdown call to abort parse cleanly */
1230     is->abort_request = 1;
1231     SDL_WaitThread(is->parse_tid, NULL);
1232     SDL_WaitThread(is->refresh_tid, NULL);
1233
1234     /* free all pictures */
1235     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1236         vp = &is->pictq[i];
1237 #if CONFIG_AVFILTER
1238         if (vp->picref) {
1239             avfilter_unref_buffer(vp->picref);
1240             vp->picref = NULL;
1241         }
1242 #endif
1243         if (vp->bmp) {
1244             SDL_FreeYUVOverlay(vp->bmp);
1245             vp->bmp = NULL;
1246         }
1247     }
1248     SDL_DestroyMutex(is->pictq_mutex);
1249     SDL_DestroyCond(is->pictq_cond);
1250     SDL_DestroyMutex(is->subpq_mutex);
1251     SDL_DestroyCond(is->subpq_cond);
1252 #if !CONFIG_AVFILTER
1253     if (is->img_convert_ctx)
1254         sws_freeContext(is->img_convert_ctx);
1255 #endif
1256     av_free(is);
1257 }
1258
1259 static void do_exit(void)
1260 {
1261     if (cur_stream) {
1262         stream_close(cur_stream);
1263         cur_stream = NULL;
1264     }
1265     uninit_opts();
1266 #if CONFIG_AVFILTER
1267     avfilter_uninit();
1268 #endif
1269     avformat_network_deinit();
1270     if (show_status)
1271         printf("\n");
1272     SDL_Quit();
1273     av_log(NULL, AV_LOG_QUIET, "");
1274     exit(0);
1275 }
1276
1277 /* allocate a picture (needs to do that in main thread to avoid
1278    potential locking problems */
1279 static void alloc_picture(void *opaque)
1280 {
1281     VideoState *is = opaque;
1282     VideoPicture *vp;
1283
1284     vp = &is->pictq[is->pictq_windex];
1285
1286     if (vp->bmp)
1287         SDL_FreeYUVOverlay(vp->bmp);
1288
1289 #if CONFIG_AVFILTER
1290     if (vp->picref)
1291         avfilter_unref_buffer(vp->picref);
1292     vp->picref = NULL;
1293
1294     vp->width   = is->out_video_filter->inputs[0]->w;
1295     vp->height  = is->out_video_filter->inputs[0]->h;
1296     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1297 #else
1298     vp->width   = is->video_st->codec->width;
1299     vp->height  = is->video_st->codec->height;
1300     vp->pix_fmt = is->video_st->codec->pix_fmt;
1301 #endif
1302
1303     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1304                                    SDL_YV12_OVERLAY,
1305                                    screen);
1306     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1307         /* SDL allocates a buffer smaller than requested if the video
1308          * overlay hardware is unable to support the requested size. */
1309         fprintf(stderr, "Error: the video system does not support an image\n"
1310                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1311                         "to reduce the image size.\n", vp->width, vp->height );
1312         do_exit();
1313     }
1314
1315     SDL_LockMutex(is->pictq_mutex);
1316     vp->allocated = 1;
1317     SDL_CondSignal(is->pictq_cond);
1318     SDL_UnlockMutex(is->pictq_mutex);
1319 }
1320
1321 /**
1322  *
1323  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1324  */
1325 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1326 {
1327     VideoPicture *vp;
1328 #if CONFIG_AVFILTER
1329     AVPicture pict_src;
1330 #else
1331     int dst_pix_fmt = PIX_FMT_YUV420P;
1332 #endif
1333     /* wait until we have space to put a new picture */
1334     SDL_LockMutex(is->pictq_mutex);
1335
1336     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1337         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1338
1339     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1340            !is->videoq.abort_request) {
1341         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1342     }
1343     SDL_UnlockMutex(is->pictq_mutex);
1344
1345     if (is->videoq.abort_request)
1346         return -1;
1347
1348     vp = &is->pictq[is->pictq_windex];
1349
1350     /* alloc or resize hardware picture buffer */
1351     if (!vp->bmp || vp->reallocate ||
1352 #if CONFIG_AVFILTER
1353         vp->width  != is->out_video_filter->inputs[0]->w ||
1354         vp->height != is->out_video_filter->inputs[0]->h) {
1355 #else
1356         vp->width != is->video_st->codec->width ||
1357         vp->height != is->video_st->codec->height) {
1358 #endif
1359         SDL_Event event;
1360
1361         vp->allocated  = 0;
1362         vp->reallocate = 0;
1363
1364         /* the allocation must be done in the main thread to avoid
1365            locking problems */
1366         event.type = FF_ALLOC_EVENT;
1367         event.user.data1 = is;
1368         SDL_PushEvent(&event);
1369
1370         /* wait until the picture is allocated */
1371         SDL_LockMutex(is->pictq_mutex);
1372         while (!vp->allocated && !is->videoq.abort_request) {
1373             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1374         }
1375         SDL_UnlockMutex(is->pictq_mutex);
1376
1377         if (is->videoq.abort_request)
1378             return -1;
1379     }
1380
1381     /* if the frame is not skipped, then display it */
1382     if (vp->bmp) {
1383         AVPicture pict = { { 0 } };
1384 #if CONFIG_AVFILTER
1385         if (vp->picref)
1386             avfilter_unref_buffer(vp->picref);
1387         vp->picref = src_frame->opaque;
1388 #endif
1389
1390         /* get a pointer on the bitmap */
1391         SDL_LockYUVOverlay (vp->bmp);
1392
1393         pict.data[0] = vp->bmp->pixels[0];
1394         pict.data[1] = vp->bmp->pixels[2];
1395         pict.data[2] = vp->bmp->pixels[1];
1396
1397         pict.linesize[0] = vp->bmp->pitches[0];
1398         pict.linesize[1] = vp->bmp->pitches[2];
1399         pict.linesize[2] = vp->bmp->pitches[1];
1400
1401 #if CONFIG_AVFILTER
1402         pict_src.data[0] = src_frame->data[0];
1403         pict_src.data[1] = src_frame->data[1];
1404         pict_src.data[2] = src_frame->data[2];
1405
1406         pict_src.linesize[0] = src_frame->linesize[0];
1407         pict_src.linesize[1] = src_frame->linesize[1];
1408         pict_src.linesize[2] = src_frame->linesize[2];
1409
1410         // FIXME use direct rendering
1411         av_picture_copy(&pict, &pict_src,
1412                         vp->pix_fmt, vp->width, vp->height);
1413 #else
1414         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1415         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1416             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1417             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1418         if (is->img_convert_ctx == NULL) {
1419             fprintf(stderr, "Cannot initialize the conversion context\n");
1420             exit(1);
1421         }
1422         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1423                   0, vp->height, pict.data, pict.linesize);
1424 #endif
1425         /* update the bitmap content */
1426         SDL_UnlockYUVOverlay(vp->bmp);
1427
1428         vp->pts = pts;
1429         vp->pos = pos;
1430
1431         /* now we can update the picture count */
1432         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1433             is->pictq_windex = 0;
1434         SDL_LockMutex(is->pictq_mutex);
1435         vp->target_clock = compute_target_time(vp->pts, is);
1436
1437         is->pictq_size++;
1438         SDL_UnlockMutex(is->pictq_mutex);
1439     }
1440     return 0;
1441 }
1442
1443 /**
1444  * compute the exact PTS for the picture if it is omitted in the stream
1445  * @param pts1 the dts of the pkt / pts of the frame
1446  */
1447 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1448 {
1449     double frame_delay, pts;
1450
1451     pts = pts1;
1452
1453     if (pts != 0) {
1454         /* update video clock with pts, if present */
1455         is->video_clock = pts;
1456     } else {
1457         pts = is->video_clock;
1458     }
1459     /* update video clock for next frame */
1460     frame_delay = av_q2d(is->video_st->codec->time_base);
1461     /* for MPEG2, the frame can be repeated, so we update the
1462        clock accordingly */
1463     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1464     is->video_clock += frame_delay;
1465
1466     return queue_picture(is, src_frame, pts, pos);
1467 }
1468
1469 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1470 {
1471     int got_picture, i;
1472
1473     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1474         return -1;
1475
1476     if (pkt->data == flush_pkt.data) {
1477         avcodec_flush_buffers(is->video_st->codec);
1478
1479         SDL_LockMutex(is->pictq_mutex);
1480         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1481         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1482             is->pictq[i].target_clock= 0;
1483         }
1484         while (is->pictq_size && !is->videoq.abort_request) {
1485             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1486         }
1487         is->video_current_pos = -1;
1488         SDL_UnlockMutex(is->pictq_mutex);
1489
1490         init_pts_correction(&is->pts_ctx);
1491         is->frame_last_pts = AV_NOPTS_VALUE;
1492         is->frame_last_delay = 0;
1493         is->frame_timer = (double)av_gettime() / 1000000.0;
1494         is->skip_frames = 1;
1495         is->skip_frames_index = 0;
1496         return 0;
1497     }
1498
1499     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1500
1501     if (got_picture) {
1502         if (decoder_reorder_pts == -1) {
1503             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1504         } else if (decoder_reorder_pts) {
1505             *pts = frame->pkt_pts;
1506         } else {
1507             *pts = frame->pkt_dts;
1508         }
1509
1510         if (*pts == AV_NOPTS_VALUE) {
1511             *pts = 0;
1512         }
1513
1514         is->skip_frames_index += 1;
1515         if (is->skip_frames_index >= is->skip_frames) {
1516             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1517             return 1;
1518         }
1519
1520     }
1521     return 0;
1522 }
1523
1524 #if CONFIG_AVFILTER
1525 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1526 {
1527     char sws_flags_str[128];
1528     char buffersrc_args[256];
1529     int ret;
1530     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1531     AVCodecContext *codec = is->video_st->codec;
1532
1533     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1534     graph->scale_sws_opts = av_strdup(sws_flags_str);
1535
1536     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1537              codec->width, codec->height, codec->pix_fmt,
1538              is->video_st->time_base.num, is->video_st->time_base.den,
1539              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1540
1541
1542     if ((ret = avfilter_graph_create_filter(&filt_src,
1543                                             avfilter_get_by_name("buffer"),
1544                                             "src", buffersrc_args, NULL,
1545                                             graph)) < 0)
1546         return ret;
1547     if ((ret = avfilter_graph_create_filter(&filt_out,
1548                                             avfilter_get_by_name("buffersink"),
1549                                             "out", NULL, NULL, graph)) < 0)
1550         return ret;
1551
1552     if ((ret = avfilter_graph_create_filter(&filt_format,
1553                                             avfilter_get_by_name("format"),
1554                                             "format", "yuv420p", NULL, graph)) < 0)
1555         return ret;
1556     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1557         return ret;
1558
1559
1560     if (vfilters) {
1561         AVFilterInOut *outputs = avfilter_inout_alloc();
1562         AVFilterInOut *inputs  = avfilter_inout_alloc();
1563
1564         outputs->name    = av_strdup("in");
1565         outputs->filter_ctx = filt_src;
1566         outputs->pad_idx = 0;
1567         outputs->next    = NULL;
1568
1569         inputs->name    = av_strdup("out");
1570         inputs->filter_ctx = filt_format;
1571         inputs->pad_idx = 0;
1572         inputs->next    = NULL;
1573
1574         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1575             return ret;
1576     } else {
1577         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1578             return ret;
1579     }
1580
1581     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1582         return ret;
1583
1584     is->in_video_filter  = filt_src;
1585     is->out_video_filter = filt_out;
1586
1587     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1588         is->use_dr1 = 1;
1589         codec->get_buffer     = codec_get_buffer;
1590         codec->release_buffer = codec_release_buffer;
1591         codec->opaque         = &is->buffer_pool;
1592     }
1593
1594     return ret;
1595 }
1596
1597 #endif  /* CONFIG_AVFILTER */
1598
1599 static int video_thread(void *arg)
1600 {
1601     AVPacket pkt = { 0 };
1602     VideoState *is = arg;
1603     AVFrame *frame = avcodec_alloc_frame();
1604     int64_t pts_int;
1605     double pts;
1606     int ret;
1607
1608 #if CONFIG_AVFILTER
1609     AVFilterGraph *graph = avfilter_graph_alloc();
1610     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1611     int64_t pos;
1612     int last_w = is->video_st->codec->width;
1613     int last_h = is->video_st->codec->height;
1614
1615     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1616         goto the_end;
1617     filt_in  = is->in_video_filter;
1618     filt_out = is->out_video_filter;
1619 #endif
1620
1621     for (;;) {
1622 #if CONFIG_AVFILTER
1623         AVFilterBufferRef *picref;
1624         AVRational tb;
1625 #endif
1626         while (is->paused && !is->videoq.abort_request)
1627             SDL_Delay(10);
1628
1629         av_free_packet(&pkt);
1630
1631         ret = get_video_frame(is, frame, &pts_int, &pkt);
1632         if (ret < 0)
1633             goto the_end;
1634
1635         if (!ret)
1636             continue;
1637
1638 #if CONFIG_AVFILTER
1639         if (   last_w != is->video_st->codec->width
1640             || last_h != is->video_st->codec->height) {
1641             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1642                     is->video_st->codec->width, is->video_st->codec->height);
1643             avfilter_graph_free(&graph);
1644             graph = avfilter_graph_alloc();
1645             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1646                 goto the_end;
1647             filt_in  = is->in_video_filter;
1648             filt_out = is->out_video_filter;
1649             last_w = is->video_st->codec->width;
1650             last_h = is->video_st->codec->height;
1651         }
1652
1653         frame->pts = pts_int;
1654         if (is->use_dr1) {
1655             FrameBuffer      *buf = frame->opaque;
1656             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1657                                         frame->data, frame->linesize,
1658                                         AV_PERM_READ | AV_PERM_PRESERVE,
1659                                         frame->width, frame->height,
1660                                         frame->format);
1661
1662             avfilter_copy_frame_props(fb, frame);
1663             fb->buf->priv           = buf;
1664             fb->buf->free           = filter_release_buffer;
1665
1666             buf->refcount++;
1667             av_buffersrc_buffer(filt_in, fb);
1668
1669         } else
1670             av_buffersrc_write_frame(filt_in, frame);
1671
1672         while (ret >= 0) {
1673             ret = av_buffersink_read(filt_out, &picref);
1674             if (ret < 0) {
1675                 ret = 0;
1676                 break;
1677             }
1678
1679             avfilter_copy_buf_props(frame, picref);
1680
1681             pts_int = picref->pts;
1682             tb      = filt_out->inputs[0]->time_base;
1683             pos     = picref->pos;
1684             frame->opaque = picref;
1685
1686             if (av_cmp_q(tb, is->video_st->time_base)) {
1687                 av_unused int64_t pts1 = pts_int;
1688                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1689                 av_dlog(NULL, "video_thread(): "
1690                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1691                         tb.num, tb.den, pts1,
1692                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1693             }
1694             pts = pts_int * av_q2d(is->video_st->time_base);
1695             ret = output_picture2(is, frame, pts, pos);
1696         }
1697 #else
1698         pts = pts_int * av_q2d(is->video_st->time_base);
1699         ret = output_picture2(is, frame, pts,  pkt.pos);
1700 #endif
1701
1702         if (ret < 0)
1703             goto the_end;
1704
1705         if (step)
1706             if (cur_stream)
1707                 stream_pause(cur_stream);
1708     }
1709  the_end:
1710 #if CONFIG_AVFILTER
1711     av_freep(&vfilters);
1712     avfilter_graph_free(&graph);
1713 #endif
1714     av_free_packet(&pkt);
1715     av_free(frame);
1716     return 0;
1717 }
1718
1719 static int subtitle_thread(void *arg)
1720 {
1721     VideoState *is = arg;
1722     SubPicture *sp;
1723     AVPacket pkt1, *pkt = &pkt1;
1724     int got_subtitle;
1725     double pts;
1726     int i, j;
1727     int r, g, b, y, u, v, a;
1728
1729     for (;;) {
1730         while (is->paused && !is->subtitleq.abort_request) {
1731             SDL_Delay(10);
1732         }
1733         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1734             break;
1735
1736         if (pkt->data == flush_pkt.data) {
1737             avcodec_flush_buffers(is->subtitle_st->codec);
1738             continue;
1739         }
1740         SDL_LockMutex(is->subpq_mutex);
1741         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1742                !is->subtitleq.abort_request) {
1743             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1744         }
1745         SDL_UnlockMutex(is->subpq_mutex);
1746
1747         if (is->subtitleq.abort_request)
1748             return 0;
1749
1750         sp = &is->subpq[is->subpq_windex];
1751
1752        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1753            this packet, if any */
1754         pts = 0;
1755         if (pkt->pts != AV_NOPTS_VALUE)
1756             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1757
1758         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1759                                  &got_subtitle, pkt);
1760
1761         if (got_subtitle && sp->sub.format == 0) {
1762             sp->pts = pts;
1763
1764             for (i = 0; i < sp->sub.num_rects; i++)
1765             {
1766                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1767                 {
1768                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1769                     y = RGB_TO_Y_CCIR(r, g, b);
1770                     u = RGB_TO_U_CCIR(r, g, b, 0);
1771                     v = RGB_TO_V_CCIR(r, g, b, 0);
1772                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1773                 }
1774             }
1775
1776             /* now we can update the picture count */
1777             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1778                 is->subpq_windex = 0;
1779             SDL_LockMutex(is->subpq_mutex);
1780             is->subpq_size++;
1781             SDL_UnlockMutex(is->subpq_mutex);
1782         }
1783         av_free_packet(pkt);
1784     }
1785     return 0;
1786 }
1787
1788 /* copy samples for viewing in editor window */
1789 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1790 {
1791     int size, len;
1792
1793     size = samples_size / sizeof(short);
1794     while (size > 0) {
1795         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1796         if (len > size)
1797             len = size;
1798         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1799         samples += len;
1800         is->sample_array_index += len;
1801         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1802             is->sample_array_index = 0;
1803         size -= len;
1804     }
1805 }
1806
1807 /* return the new audio buffer size (samples can be added or deleted
1808    to get better sync if video or external master clock) */
1809 static int synchronize_audio(VideoState *is, short *samples,
1810                              int samples_size1, double pts)
1811 {
1812     int n, samples_size;
1813     double ref_clock;
1814
1815     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1816     samples_size = samples_size1;
1817
1818     /* if not master, then we try to remove or add samples to correct the clock */
1819     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1820          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1821         double diff, avg_diff;
1822         int wanted_size, min_size, max_size, nb_samples;
1823
1824         ref_clock = get_master_clock(is);
1825         diff = get_audio_clock(is) - ref_clock;
1826
1827         if (diff < AV_NOSYNC_THRESHOLD) {
1828             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1829             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1830                 /* not enough measures to have a correct estimate */
1831                 is->audio_diff_avg_count++;
1832             } else {
1833                 /* estimate the A-V difference */
1834                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1835
1836                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1837                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1838                     nb_samples = samples_size / n;
1839
1840                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1841                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1842                     if (wanted_size < min_size)
1843                         wanted_size = min_size;
1844                     else if (wanted_size > max_size)
1845                         wanted_size = max_size;
1846
1847                     /* add or remove samples to correction the synchro */
1848                     if (wanted_size < samples_size) {
1849                         /* remove samples */
1850                         samples_size = wanted_size;
1851                     } else if (wanted_size > samples_size) {
1852                         uint8_t *samples_end, *q;
1853                         int nb;
1854
1855                         /* add samples */
1856                         nb = (samples_size - wanted_size);
1857                         samples_end = (uint8_t *)samples + samples_size - n;
1858                         q = samples_end + n;
1859                         while (nb > 0) {
1860                             memcpy(q, samples_end, n);
1861                             q += n;
1862                             nb -= n;
1863                         }
1864                         samples_size = wanted_size;
1865                     }
1866                 }
1867                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1868                         diff, avg_diff, samples_size - samples_size1,
1869                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1870             }
1871         } else {
1872             /* too big difference : may be initial PTS errors, so
1873                reset A-V filter */
1874             is->audio_diff_avg_count = 0;
1875             is->audio_diff_cum       = 0;
1876         }
1877     }
1878
1879     return samples_size;
1880 }
1881
1882 /* decode one audio frame and returns its uncompressed size */
1883 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1884 {
1885     AVPacket *pkt_temp = &is->audio_pkt_temp;
1886     AVPacket *pkt = &is->audio_pkt;
1887     AVCodecContext *dec = is->audio_st->codec;
1888     int n, len1, data_size, got_frame;
1889     double pts;
1890     int new_packet = 0;
1891     int flush_complete = 0;
1892
1893     for (;;) {
1894         /* NOTE: the audio packet can contain several frames */
1895         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1896             int resample_changed, audio_resample;
1897
1898             if (!is->frame) {
1899                 if (!(is->frame = avcodec_alloc_frame()))
1900                     return AVERROR(ENOMEM);
1901             } else
1902                 avcodec_get_frame_defaults(is->frame);
1903
1904             if (flush_complete)
1905                 break;
1906             new_packet = 0;
1907             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1908             if (len1 < 0) {
1909                 /* if error, we skip the frame */
1910                 pkt_temp->size = 0;
1911                 break;
1912             }
1913
1914             pkt_temp->data += len1;
1915             pkt_temp->size -= len1;
1916
1917             if (!got_frame) {
1918                 /* stop sending empty packets if the decoder is finished */
1919                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1920                     flush_complete = 1;
1921                 continue;
1922             }
1923             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1924                                                    is->frame->nb_samples,
1925                                                    dec->sample_fmt, 1);
1926
1927             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
1928                              dec->channel_layout != is->sdl_channel_layout;
1929
1930             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
1931                                dec->channel_layout != is->resample_channel_layout;
1932
1933             if ((!is->avr && audio_resample) || resample_changed) {
1934                 int ret;
1935                 if (is->avr)
1936                     avresample_close(is->avr);
1937                 else if (audio_resample) {
1938                     is->avr = avresample_alloc_context();
1939                     if (!is->avr) {
1940                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1941                         break;
1942                     }
1943                 }
1944                 if (audio_resample) {
1945                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
1946                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
1947                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
1948                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1949                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
1950                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
1951
1952                     if ((ret = avresample_open(is->avr)) < 0) {
1953                         fprintf(stderr, "error initializing libavresample\n");
1954                         break;
1955                     }
1956                 }
1957                 is->resample_sample_fmt     = dec->sample_fmt;
1958                 is->resample_channel_layout = dec->channel_layout;
1959             }
1960
1961             if (audio_resample) {
1962                 void *tmp_out;
1963                 int out_samples, out_size, out_linesize;
1964                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1965                 int nb_samples = is->frame->nb_samples;
1966
1967                 out_size = av_samples_get_buffer_size(&out_linesize,
1968                                                       is->sdl_channels,
1969                                                       nb_samples,
1970                                                       is->sdl_sample_fmt, 0);
1971                 tmp_out = av_realloc(is->audio_buf1, out_size);
1972                 if (!tmp_out)
1973                     return AVERROR(ENOMEM);
1974                 is->audio_buf1 = tmp_out;
1975
1976                 out_samples = avresample_convert(is->avr,
1977                                                  (void **)&is->audio_buf1,
1978                                                  out_linesize, nb_samples,
1979                                                  (void **)is->frame->data,
1980                                                  is->frame->linesize[0],
1981                                                  is->frame->nb_samples);
1982                 if (out_samples < 0) {
1983                     fprintf(stderr, "avresample_convert() failed\n");
1984                     break;
1985                 }
1986                 is->audio_buf = is->audio_buf1;
1987                 data_size = out_samples * osize * is->sdl_channels;
1988             } else {
1989                 is->audio_buf = is->frame->data[0];
1990             }
1991
1992             /* if no pts, then compute it */
1993             pts = is->audio_clock;
1994             *pts_ptr = pts;
1995             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1996             is->audio_clock += (double)data_size /
1997                 (double)(n * dec->sample_rate);
1998 #ifdef DEBUG
1999             {
2000                 static double last_clock;
2001                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2002                        is->audio_clock - last_clock,
2003                        is->audio_clock, pts);
2004                 last_clock = is->audio_clock;
2005             }
2006 #endif
2007             return data_size;
2008         }
2009
2010         /* free the current packet */
2011         if (pkt->data)
2012             av_free_packet(pkt);
2013         memset(pkt_temp, 0, sizeof(*pkt_temp));
2014
2015         if (is->paused || is->audioq.abort_request) {
2016             return -1;
2017         }
2018
2019         /* read next packet */
2020         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2021             return -1;
2022
2023         if (pkt->data == flush_pkt.data) {
2024             avcodec_flush_buffers(dec);
2025             flush_complete = 0;
2026         }
2027
2028         *pkt_temp = *pkt;
2029
2030         /* if update the audio clock with the pts */
2031         if (pkt->pts != AV_NOPTS_VALUE) {
2032             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2033         }
2034     }
2035 }
2036
2037 /* prepare a new audio buffer */
2038 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2039 {
2040     VideoState *is = opaque;
2041     int audio_size, len1;
2042     double pts;
2043
2044     audio_callback_time = av_gettime();
2045
2046     while (len > 0) {
2047         if (is->audio_buf_index >= is->audio_buf_size) {
2048            audio_size = audio_decode_frame(is, &pts);
2049            if (audio_size < 0) {
2050                 /* if error, just output silence */
2051                is->audio_buf      = is->silence_buf;
2052                is->audio_buf_size = sizeof(is->silence_buf);
2053            } else {
2054                if (is->show_audio)
2055                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2056                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2057                                               pts);
2058                is->audio_buf_size = audio_size;
2059            }
2060            is->audio_buf_index = 0;
2061         }
2062         len1 = is->audio_buf_size - is->audio_buf_index;
2063         if (len1 > len)
2064             len1 = len;
2065         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2066         len -= len1;
2067         stream += len1;
2068         is->audio_buf_index += len1;
2069     }
2070 }
2071
2072 /* open a given stream. Return 0 if OK */
2073 static int stream_component_open(VideoState *is, int stream_index)
2074 {
2075     AVFormatContext *ic = is->ic;
2076     AVCodecContext *avctx;
2077     AVCodec *codec;
2078     SDL_AudioSpec wanted_spec, spec;
2079     AVDictionary *opts;
2080     AVDictionaryEntry *t = NULL;
2081
2082     if (stream_index < 0 || stream_index >= ic->nb_streams)
2083         return -1;
2084     avctx = ic->streams[stream_index]->codec;
2085
2086     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2087
2088     codec = avcodec_find_decoder(avctx->codec_id);
2089     avctx->debug_mv          = debug_mv;
2090     avctx->debug             = debug;
2091     avctx->workaround_bugs   = workaround_bugs;
2092     avctx->idct_algo         = idct;
2093     avctx->skip_frame        = skip_frame;
2094     avctx->skip_idct         = skip_idct;
2095     avctx->skip_loop_filter  = skip_loop_filter;
2096     avctx->error_concealment = error_concealment;
2097
2098     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2099
2100     if (!av_dict_get(opts, "threads", NULL, 0))
2101         av_dict_set(&opts, "threads", "auto", 0);
2102     if (!codec ||
2103         avcodec_open2(avctx, codec, &opts) < 0)
2104         return -1;
2105     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2106         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2107         return AVERROR_OPTION_NOT_FOUND;
2108     }
2109
2110     /* prepare audio output */
2111     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2112         wanted_spec.freq = avctx->sample_rate;
2113         wanted_spec.format = AUDIO_S16SYS;
2114
2115         if (!avctx->channel_layout)
2116             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2117         if (!avctx->channel_layout) {
2118             fprintf(stderr, "unable to guess channel layout\n");
2119             return -1;
2120         }
2121         if (avctx->channels == 1)
2122             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2123         else
2124             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2125         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2126
2127         wanted_spec.channels = is->sdl_channels;
2128         wanted_spec.silence = 0;
2129         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2130         wanted_spec.callback = sdl_audio_callback;
2131         wanted_spec.userdata = is;
2132         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2133             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2134             return -1;
2135         }
2136         is->audio_hw_buf_size = spec.size;
2137         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2138         is->resample_sample_fmt     = is->sdl_sample_fmt;
2139         is->resample_channel_layout = is->sdl_channel_layout;
2140     }
2141
2142     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2143     switch (avctx->codec_type) {
2144     case AVMEDIA_TYPE_AUDIO:
2145         is->audio_stream = stream_index;
2146         is->audio_st = ic->streams[stream_index];
2147         is->audio_buf_size  = 0;
2148         is->audio_buf_index = 0;
2149
2150         /* init averaging filter */
2151         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2152         is->audio_diff_avg_count = 0;
2153         /* since we do not have a precise anough audio fifo fullness,
2154            we correct audio sync only if larger than this threshold */
2155         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2156
2157         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2158         packet_queue_init(&is->audioq);
2159         SDL_PauseAudio(0);
2160         break;
2161     case AVMEDIA_TYPE_VIDEO:
2162         is->video_stream = stream_index;
2163         is->video_st = ic->streams[stream_index];
2164
2165         packet_queue_init(&is->videoq);
2166         is->video_tid = SDL_CreateThread(video_thread, is);
2167         break;
2168     case AVMEDIA_TYPE_SUBTITLE:
2169         is->subtitle_stream = stream_index;
2170         is->subtitle_st = ic->streams[stream_index];
2171         packet_queue_init(&is->subtitleq);
2172
2173         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2174         break;
2175     default:
2176         break;
2177     }
2178     return 0;
2179 }
2180
2181 static void stream_component_close(VideoState *is, int stream_index)
2182 {
2183     AVFormatContext *ic = is->ic;
2184     AVCodecContext *avctx;
2185
2186     if (stream_index < 0 || stream_index >= ic->nb_streams)
2187         return;
2188     avctx = ic->streams[stream_index]->codec;
2189
2190     switch (avctx->codec_type) {
2191     case AVMEDIA_TYPE_AUDIO:
2192         packet_queue_abort(&is->audioq);
2193
2194         SDL_CloseAudio();
2195
2196         packet_queue_end(&is->audioq);
2197         av_free_packet(&is->audio_pkt);
2198         if (is->avr)
2199             avresample_free(&is->avr);
2200         av_freep(&is->audio_buf1);
2201         is->audio_buf = NULL;
2202         av_freep(&is->frame);
2203
2204         if (is->rdft) {
2205             av_rdft_end(is->rdft);
2206             av_freep(&is->rdft_data);
2207             is->rdft = NULL;
2208             is->rdft_bits = 0;
2209         }
2210         break;
2211     case AVMEDIA_TYPE_VIDEO:
2212         packet_queue_abort(&is->videoq);
2213
2214         /* note: we also signal this mutex to make sure we deblock the
2215            video thread in all cases */
2216         SDL_LockMutex(is->pictq_mutex);
2217         SDL_CondSignal(is->pictq_cond);
2218         SDL_UnlockMutex(is->pictq_mutex);
2219
2220         SDL_WaitThread(is->video_tid, NULL);
2221
2222         packet_queue_end(&is->videoq);
2223         break;
2224     case AVMEDIA_TYPE_SUBTITLE:
2225         packet_queue_abort(&is->subtitleq);
2226
2227         /* note: we also signal this mutex to make sure we deblock the
2228            video thread in all cases */
2229         SDL_LockMutex(is->subpq_mutex);
2230         is->subtitle_stream_changed = 1;
2231
2232         SDL_CondSignal(is->subpq_cond);
2233         SDL_UnlockMutex(is->subpq_mutex);
2234
2235         SDL_WaitThread(is->subtitle_tid, NULL);
2236
2237         packet_queue_end(&is->subtitleq);
2238         break;
2239     default:
2240         break;
2241     }
2242
2243     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2244     avcodec_close(avctx);
2245     free_buffer_pool(&is->buffer_pool);
2246     switch (avctx->codec_type) {
2247     case AVMEDIA_TYPE_AUDIO:
2248         is->audio_st = NULL;
2249         is->audio_stream = -1;
2250         break;
2251     case AVMEDIA_TYPE_VIDEO:
2252         is->video_st = NULL;
2253         is->video_stream = -1;
2254         break;
2255     case AVMEDIA_TYPE_SUBTITLE:
2256         is->subtitle_st = NULL;
2257         is->subtitle_stream = -1;
2258         break;
2259     default:
2260         break;
2261     }
2262 }
2263
2264 /* since we have only one decoding thread, we can use a global
2265    variable instead of a thread local variable */
2266 static VideoState *global_video_state;
2267
2268 static int decode_interrupt_cb(void *ctx)
2269 {
2270     return global_video_state && global_video_state->abort_request;
2271 }
2272
2273 /* this thread gets the stream from the disk or the network */
2274 static int decode_thread(void *arg)
2275 {
2276     VideoState *is = arg;
2277     AVFormatContext *ic = NULL;
2278     int err, i, ret;
2279     int st_index[AVMEDIA_TYPE_NB];
2280     AVPacket pkt1, *pkt = &pkt1;
2281     int eof = 0;
2282     int pkt_in_play_range = 0;
2283     AVDictionaryEntry *t;
2284     AVDictionary **opts;
2285     int orig_nb_streams;
2286
2287     memset(st_index, -1, sizeof(st_index));
2288     is->video_stream = -1;
2289     is->audio_stream = -1;
2290     is->subtitle_stream = -1;
2291
2292     global_video_state = is;
2293
2294     ic = avformat_alloc_context();
2295     ic->interrupt_callback.callback = decode_interrupt_cb;
2296     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2297     if (err < 0) {
2298         print_error(is->filename, err);
2299         ret = -1;
2300         goto fail;
2301     }
2302     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2303         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2304         ret = AVERROR_OPTION_NOT_FOUND;
2305         goto fail;
2306     }
2307     is->ic = ic;
2308
2309     if (genpts)
2310         ic->flags |= AVFMT_FLAG_GENPTS;
2311
2312     opts = setup_find_stream_info_opts(ic, codec_opts);
2313     orig_nb_streams = ic->nb_streams;
2314
2315     err = avformat_find_stream_info(ic, opts);
2316     if (err < 0) {
2317         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2318         ret = -1;
2319         goto fail;
2320     }
2321     for (i = 0; i < orig_nb_streams; i++)
2322         av_dict_free(&opts[i]);
2323     av_freep(&opts);
2324
2325     if (ic->pb)
2326         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2327
2328     if (seek_by_bytes < 0)
2329         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2330
2331     /* if seeking requested, we execute it */
2332     if (start_time != AV_NOPTS_VALUE) {
2333         int64_t timestamp;
2334
2335         timestamp = start_time;
2336         /* add the stream start time */
2337         if (ic->start_time != AV_NOPTS_VALUE)
2338             timestamp += ic->start_time;
2339         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2340         if (ret < 0) {
2341             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2342                     is->filename, (double)timestamp / AV_TIME_BASE);
2343         }
2344     }
2345
2346     for (i = 0; i < ic->nb_streams; i++)
2347         ic->streams[i]->discard = AVDISCARD_ALL;
2348     if (!video_disable)
2349         st_index[AVMEDIA_TYPE_VIDEO] =
2350             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2351                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2352     if (!audio_disable)
2353         st_index[AVMEDIA_TYPE_AUDIO] =
2354             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2355                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2356                                 st_index[AVMEDIA_TYPE_VIDEO],
2357                                 NULL, 0);
2358     if (!video_disable)
2359         st_index[AVMEDIA_TYPE_SUBTITLE] =
2360             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2361                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2362                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2363                                  st_index[AVMEDIA_TYPE_AUDIO] :
2364                                  st_index[AVMEDIA_TYPE_VIDEO]),
2365                                 NULL, 0);
2366     if (show_status) {
2367         av_dump_format(ic, 0, is->filename, 0);
2368     }
2369
2370     /* open the streams */
2371     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2372         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2373     }
2374
2375     ret = -1;
2376     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2377         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2378     }
2379     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2380     if (ret < 0) {
2381         if (!display_disable)
2382             is->show_audio = 2;
2383     }
2384
2385     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2386         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2387     }
2388
2389     if (is->video_stream < 0 && is->audio_stream < 0) {
2390         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2391         ret = -1;
2392         goto fail;
2393     }
2394
2395     for (;;) {
2396         if (is->abort_request)
2397             break;
2398         if (is->paused != is->last_paused) {
2399             is->last_paused = is->paused;
2400             if (is->paused)
2401                 is->read_pause_return = av_read_pause(ic);
2402             else
2403                 av_read_play(ic);
2404         }
2405 #if CONFIG_RTSP_DEMUXER
2406         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2407             /* wait 10 ms to avoid trying to get another packet */
2408             /* XXX: horrible */
2409             SDL_Delay(10);
2410             continue;
2411         }
2412 #endif
2413         if (is->seek_req) {
2414             int64_t seek_target = is->seek_pos;
2415             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2416             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2417 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2418 //      of the seek_pos/seek_rel variables
2419
2420             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2421             if (ret < 0) {
2422                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2423             } else {
2424                 if (is->audio_stream >= 0) {
2425                     packet_queue_flush(&is->audioq);
2426                     packet_queue_put(&is->audioq, &flush_pkt);
2427                 }
2428                 if (is->subtitle_stream >= 0) {
2429                     packet_queue_flush(&is->subtitleq);
2430                     packet_queue_put(&is->subtitleq, &flush_pkt);
2431                 }
2432                 if (is->video_stream >= 0) {
2433                     packet_queue_flush(&is->videoq);
2434                     packet_queue_put(&is->videoq, &flush_pkt);
2435                 }
2436             }
2437             is->seek_req = 0;
2438             eof = 0;
2439         }
2440
2441         /* if the queue are full, no need to read more */
2442         if (!infinite_buffer &&
2443               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2444             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2445                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2446                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2447             /* wait 10 ms */
2448             SDL_Delay(10);
2449             continue;
2450         }
2451         if (eof) {
2452             if (is->video_stream >= 0) {
2453                 av_init_packet(pkt);
2454                 pkt->data = NULL;
2455                 pkt->size = 0;
2456                 pkt->stream_index = is->video_stream;
2457                 packet_queue_put(&is->videoq, pkt);
2458             }
2459             if (is->audio_stream >= 0 &&
2460                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2461                 av_init_packet(pkt);
2462                 pkt->data = NULL;
2463                 pkt->size = 0;
2464                 pkt->stream_index = is->audio_stream;
2465                 packet_queue_put(&is->audioq, pkt);
2466             }
2467             SDL_Delay(10);
2468             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2469                 if (loop != 1 && (!loop || --loop)) {
2470                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2471                 } else if (autoexit) {
2472                     ret = AVERROR_EOF;
2473                     goto fail;
2474                 }
2475             }
2476             continue;
2477         }
2478         ret = av_read_frame(ic, pkt);
2479         if (ret < 0) {
2480             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2481                 eof = 1;
2482             if (ic->pb && ic->pb->error)
2483                 break;
2484             SDL_Delay(100); /* wait for user event */
2485             continue;
2486         }
2487         /* check if packet is in play range specified by user, then queue, otherwise discard */
2488         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2489                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2490                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2491                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2492                 <= ((double)duration / 1000000);
2493         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2494             packet_queue_put(&is->audioq, pkt);
2495         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2496             packet_queue_put(&is->videoq, pkt);
2497         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2498             packet_queue_put(&is->subtitleq, pkt);
2499         } else {
2500             av_free_packet(pkt);
2501         }
2502     }
2503     /* wait until the end */
2504     while (!is->abort_request) {
2505         SDL_Delay(100);
2506     }
2507
2508     ret = 0;
2509  fail:
2510     /* disable interrupting */
2511     global_video_state = NULL;
2512
2513     /* close each stream */
2514     if (is->audio_stream >= 0)
2515         stream_component_close(is, is->audio_stream);
2516     if (is->video_stream >= 0)
2517         stream_component_close(is, is->video_stream);
2518     if (is->subtitle_stream >= 0)
2519         stream_component_close(is, is->subtitle_stream);
2520     if (is->ic) {
2521         avformat_close_input(&is->ic);
2522     }
2523
2524     if (ret != 0) {
2525         SDL_Event event;
2526
2527         event.type = FF_QUIT_EVENT;
2528         event.user.data1 = is;
2529         SDL_PushEvent(&event);
2530     }
2531     return 0;
2532 }
2533
2534 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2535 {
2536     VideoState *is;
2537
2538     is = av_mallocz(sizeof(VideoState));
2539     if (!is)
2540         return NULL;
2541     av_strlcpy(is->filename, filename, sizeof(is->filename));
2542     is->iformat = iformat;
2543     is->ytop    = 0;
2544     is->xleft   = 0;
2545
2546     /* start video display */
2547     is->pictq_mutex = SDL_CreateMutex();
2548     is->pictq_cond  = SDL_CreateCond();
2549
2550     is->subpq_mutex = SDL_CreateMutex();
2551     is->subpq_cond  = SDL_CreateCond();
2552
2553     is->av_sync_type = av_sync_type;
2554     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2555     if (!is->parse_tid) {
2556         av_free(is);
2557         return NULL;
2558     }
2559     return is;
2560 }
2561
2562 static void stream_cycle_channel(VideoState *is, int codec_type)
2563 {
2564     AVFormatContext *ic = is->ic;
2565     int start_index, stream_index;
2566     AVStream *st;
2567
2568     if (codec_type == AVMEDIA_TYPE_VIDEO)
2569         start_index = is->video_stream;
2570     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2571         start_index = is->audio_stream;
2572     else
2573         start_index = is->subtitle_stream;
2574     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2575         return;
2576     stream_index = start_index;
2577     for (;;) {
2578         if (++stream_index >= is->ic->nb_streams)
2579         {
2580             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2581             {
2582                 stream_index = -1;
2583                 goto the_end;
2584             } else
2585                 stream_index = 0;
2586         }
2587         if (stream_index == start_index)
2588             return;
2589         st = ic->streams[stream_index];
2590         if (st->codec->codec_type == codec_type) {
2591             /* check that parameters are OK */
2592             switch (codec_type) {
2593             case AVMEDIA_TYPE_AUDIO:
2594                 if (st->codec->sample_rate != 0 &&
2595                     st->codec->channels != 0)
2596                     goto the_end;
2597                 break;
2598             case AVMEDIA_TYPE_VIDEO:
2599             case AVMEDIA_TYPE_SUBTITLE:
2600                 goto the_end;
2601             default:
2602                 break;
2603             }
2604         }
2605     }
2606  the_end:
2607     stream_component_close(is, start_index);
2608     stream_component_open(is, stream_index);
2609 }
2610
2611
2612 static void toggle_full_screen(void)
2613 {
2614 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2615     /* OS X needs to empty the picture_queue */
2616     int i;
2617     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2618         cur_stream->pictq[i].reallocate = 1;
2619 #endif
2620     is_full_screen = !is_full_screen;
2621     video_open(cur_stream);
2622 }
2623
2624 static void toggle_pause(void)
2625 {
2626     if (cur_stream)
2627         stream_pause(cur_stream);
2628     step = 0;
2629 }
2630
2631 static void step_to_next_frame(void)
2632 {
2633     if (cur_stream) {
2634         /* if the stream is paused unpause it, then step */
2635         if (cur_stream->paused)
2636             stream_pause(cur_stream);
2637     }
2638     step = 1;
2639 }
2640
2641 static void toggle_audio_display(void)
2642 {
2643     if (cur_stream) {
2644         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2645         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2646         fill_rectangle(screen,
2647                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2648                        bgcolor);
2649         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2650     }
2651 }
2652
2653 /* handle an event sent by the GUI */
2654 static void event_loop(void)
2655 {
2656     SDL_Event event;
2657     double incr, pos, frac;
2658
2659     for (;;) {
2660         double x;
2661         SDL_WaitEvent(&event);
2662         switch (event.type) {
2663         case SDL_KEYDOWN:
2664             if (exit_on_keydown) {
2665                 do_exit();
2666                 break;
2667             }
2668             switch (event.key.keysym.sym) {
2669             case SDLK_ESCAPE:
2670             case SDLK_q:
2671                 do_exit();
2672                 break;
2673             case SDLK_f:
2674                 toggle_full_screen();
2675                 break;
2676             case SDLK_p:
2677             case SDLK_SPACE:
2678                 toggle_pause();
2679                 break;
2680             case SDLK_s: // S: Step to next frame
2681                 step_to_next_frame();
2682                 break;
2683             case SDLK_a:
2684                 if (cur_stream)
2685                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2686                 break;
2687             case SDLK_v:
2688                 if (cur_stream)
2689                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2690                 break;
2691             case SDLK_t:
2692                 if (cur_stream)
2693                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2694                 break;
2695             case SDLK_w:
2696                 toggle_audio_display();
2697                 break;
2698             case SDLK_LEFT:
2699                 incr = -10.0;
2700                 goto do_seek;
2701             case SDLK_RIGHT:
2702                 incr = 10.0;
2703                 goto do_seek;
2704             case SDLK_UP:
2705                 incr = 60.0;
2706                 goto do_seek;
2707             case SDLK_DOWN:
2708                 incr = -60.0;
2709             do_seek:
2710                 if (cur_stream) {
2711                     if (seek_by_bytes) {
2712                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2713                             pos = cur_stream->video_current_pos;
2714                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2715                             pos = cur_stream->audio_pkt.pos;
2716                         } else
2717                             pos = avio_tell(cur_stream->ic->pb);
2718                         if (cur_stream->ic->bit_rate)
2719                             incr *= cur_stream->ic->bit_rate / 8.0;
2720                         else
2721                             incr *= 180000.0;
2722                         pos += incr;
2723                         stream_seek(cur_stream, pos, incr, 1);
2724                     } else {
2725                         pos = get_master_clock(cur_stream);
2726                         pos += incr;
2727                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2728                     }
2729                 }
2730                 break;
2731             default:
2732                 break;
2733             }
2734             break;
2735         case SDL_MOUSEBUTTONDOWN:
2736             if (exit_on_mousedown) {
2737                 do_exit();
2738                 break;
2739             }
2740         case SDL_MOUSEMOTION:
2741             if (event.type == SDL_MOUSEBUTTONDOWN) {
2742                 x = event.button.x;
2743             } else {
2744                 if (event.motion.state != SDL_PRESSED)
2745                     break;
2746                 x = event.motion.x;
2747             }
2748             if (cur_stream) {
2749                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2750                     uint64_t size =  avio_size(cur_stream->ic->pb);
2751                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2752                 } else {
2753                     int64_t ts;
2754                     int ns, hh, mm, ss;
2755                     int tns, thh, tmm, tss;
2756                     tns  = cur_stream->ic->duration / 1000000LL;
2757                     thh  = tns / 3600;
2758                     tmm  = (tns % 3600) / 60;
2759                     tss  = (tns % 60);
2760                     frac = x / cur_stream->width;
2761                     ns   = frac * tns;
2762                     hh   = ns / 3600;
2763                     mm   = (ns % 3600) / 60;
2764                     ss   = (ns % 60);
2765                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2766                             hh, mm, ss, thh, tmm, tss);
2767                     ts = frac * cur_stream->ic->duration;
2768                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2769                         ts += cur_stream->ic->start_time;
2770                     stream_seek(cur_stream, ts, 0, 0);
2771                 }
2772             }
2773             break;
2774         case SDL_VIDEORESIZE:
2775             if (cur_stream) {
2776                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2777                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2778                 screen_width  = cur_stream->width  = event.resize.w;
2779                 screen_height = cur_stream->height = event.resize.h;
2780             }
2781             break;
2782         case SDL_QUIT:
2783         case FF_QUIT_EVENT:
2784             do_exit();
2785             break;
2786         case FF_ALLOC_EVENT:
2787             video_open(event.user.data1);
2788             alloc_picture(event.user.data1);
2789             break;
2790         case FF_REFRESH_EVENT:
2791             video_refresh_timer(event.user.data1);
2792             cur_stream->refresh = 0;
2793             break;
2794         default:
2795             break;
2796         }
2797     }
2798 }
2799
2800 static int opt_frame_size(const char *opt, const char *arg)
2801 {
2802     av_log(NULL, AV_LOG_ERROR,
2803            "Option '%s' has been removed, use private format options instead\n", opt);
2804     return AVERROR(EINVAL);
2805 }
2806
2807 static int opt_width(const char *opt, const char *arg)
2808 {
2809     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2810     return 0;
2811 }
2812
2813 static int opt_height(const char *opt, const char *arg)
2814 {
2815     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2816     return 0;
2817 }
2818
2819 static int opt_format(const char *opt, const char *arg)
2820 {
2821     file_iformat = av_find_input_format(arg);
2822     if (!file_iformat) {
2823         fprintf(stderr, "Unknown input format: %s\n", arg);
2824         return AVERROR(EINVAL);
2825     }
2826     return 0;
2827 }
2828
2829 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2830 {
2831     av_log(NULL, AV_LOG_ERROR,
2832            "Option '%s' has been removed, use private format options instead\n", opt);
2833     return AVERROR(EINVAL);
2834 }
2835
2836 static int opt_sync(const char *opt, const char *arg)
2837 {
2838     if (!strcmp(arg, "audio"))
2839         av_sync_type = AV_SYNC_AUDIO_MASTER;
2840     else if (!strcmp(arg, "video"))
2841         av_sync_type = AV_SYNC_VIDEO_MASTER;
2842     else if (!strcmp(arg, "ext"))
2843         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2844     else {
2845         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2846         exit(1);
2847     }
2848     return 0;
2849 }
2850
2851 static int opt_seek(const char *opt, const char *arg)
2852 {
2853     start_time = parse_time_or_die(opt, arg, 1);
2854     return 0;
2855 }
2856
2857 static int opt_duration(const char *opt, const char *arg)
2858 {
2859     duration = parse_time_or_die(opt, arg, 1);
2860     return 0;
2861 }
2862
2863 static int opt_debug(const char *opt, const char *arg)
2864 {
2865     av_log_set_level(99);
2866     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2867     return 0;
2868 }
2869
2870 static int opt_vismv(const char *opt, const char *arg)
2871 {
2872     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2873     return 0;
2874 }
2875
2876 static const OptionDef options[] = {
2877 #include "cmdutils_common_opts.h"
2878     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2879     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2880     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2881     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2882     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2883     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2884     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2885     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2886     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2887     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2888     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2889     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2890     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2891     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2892     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2893     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2894     { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
2895     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2896     { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
2897     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2898     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2899     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2900     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
2901     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
2902     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
2903     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
2904     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
2905     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2906     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
2907     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
2908     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
2909     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
2910     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
2911     { "infbuf", OPT_BOOL | OPT_EXPERT, { (void*)&infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2912     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
2913 #if CONFIG_AVFILTER
2914     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
2915 #endif
2916     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
2917     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
2918     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2919     { NULL, },
2920 };
2921
2922 static void show_usage(void)
2923 {
2924     printf("Simple media player\n");
2925     printf("usage: %s [options] input_file\n", program_name);
2926     printf("\n");
2927 }
2928
2929 static void show_help(void)
2930 {
2931     av_log_set_callback(log_callback_help);
2932     show_usage();
2933     show_help_options(options, "Main options:\n",
2934                       OPT_EXPERT, 0);
2935     show_help_options(options, "\nAdvanced options:\n",
2936                       OPT_EXPERT, OPT_EXPERT);
2937     printf("\n");
2938     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2939     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2940 #if !CONFIG_AVFILTER
2941     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2942 #endif
2943     printf("\nWhile playing:\n"
2944            "q, ESC              quit\n"
2945            "f                   toggle full screen\n"
2946            "p, SPC              pause\n"
2947            "a                   cycle audio channel\n"
2948            "v                   cycle video channel\n"
2949            "t                   cycle subtitle channel\n"
2950            "w                   show audio waves\n"
2951            "s                   activate frame-step mode\n"
2952            "left/right          seek backward/forward 10 seconds\n"
2953            "down/up             seek backward/forward 1 minute\n"
2954            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2955            );
2956 }
2957
2958 static void opt_input_file(void *optctx, const char *filename)
2959 {
2960     if (input_filename) {
2961         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2962                 filename, input_filename);
2963         exit(1);
2964     }
2965     if (!strcmp(filename, "-"))
2966         filename = "pipe:";
2967     input_filename = filename;
2968 }
2969
2970 /* Called from the main */
2971 int main(int argc, char **argv)
2972 {
2973     int flags;
2974
2975     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2976     parse_loglevel(argc, argv, options);
2977
2978     /* register all codecs, demux and protocols */
2979     avcodec_register_all();
2980 #if CONFIG_AVDEVICE
2981     avdevice_register_all();
2982 #endif
2983 #if CONFIG_AVFILTER
2984     avfilter_register_all();
2985 #endif
2986     av_register_all();
2987     avformat_network_init();
2988
2989     init_opts();
2990
2991     show_banner();
2992
2993     parse_options(NULL, argc, argv, options, opt_input_file);
2994
2995     if (!input_filename) {
2996         show_usage();
2997         fprintf(stderr, "An input file must be specified\n");
2998         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2999         exit(1);
3000     }
3001
3002     if (display_disable) {
3003         video_disable = 1;
3004     }
3005     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3006 #if !defined(__MINGW32__) && !defined(__APPLE__)
3007     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3008 #endif
3009     if (SDL_Init (flags)) {
3010         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3011         exit(1);
3012     }
3013
3014     if (!display_disable) {
3015 #if HAVE_SDL_VIDEO_SIZE
3016         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3017         fs_screen_width = vi->current_w;
3018         fs_screen_height = vi->current_h;
3019 #endif
3020     }
3021
3022     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3023     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3024     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3025
3026     av_init_packet(&flush_pkt);
3027     flush_pkt.data = "FLUSH";
3028
3029     cur_stream = stream_open(input_filename, file_iformat);
3030
3031     event_loop();
3032
3033     /* never returns */
3034
3035     return 0;
3036 }