]> git.sesse.net Git - ffmpeg/blob - avplay.c
Give all anonymously typedeffed structs in headers a name
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///< byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///< current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
217     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
218     int use_dr1;
219     FrameBuffer *buffer_pool;
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts = -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop = 1;
264 static int framedrop = 1;
265 static int infinite_buffer = 0;
266
267 static int rdftspeed = 20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290     memset(q, 0, sizeof(PacketQueue));
291     q->mutex = SDL_CreateMutex();
292     q->cond = SDL_CreateCond();
293     packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298     AVPacketList *pkt, *pkt1;
299
300     SDL_LockMutex(q->mutex);
301     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302         pkt1 = pkt->next;
303         av_free_packet(&pkt->pkt);
304         av_freep(&pkt);
305     }
306     q->last_pkt = NULL;
307     q->first_pkt = NULL;
308     q->nb_packets = 0;
309     q->size = 0;
310     SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315     packet_queue_flush(q);
316     SDL_DestroyMutex(q->mutex);
317     SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322     AVPacketList *pkt1;
323
324     /* duplicate the packet */
325     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
326         return -1;
327
328     pkt1 = av_malloc(sizeof(AVPacketList));
329     if (!pkt1)
330         return -1;
331     pkt1->pkt = *pkt;
332     pkt1->next = NULL;
333
334
335     SDL_LockMutex(q->mutex);
336
337     if (!q->last_pkt)
338
339         q->first_pkt = pkt1;
340     else
341         q->last_pkt->next = pkt1;
342     q->last_pkt = pkt1;
343     q->nb_packets++;
344     q->size += pkt1->pkt.size + sizeof(*pkt1);
345     /* XXX: should duplicate packet data in DV case */
346     SDL_CondSignal(q->cond);
347
348     SDL_UnlockMutex(q->mutex);
349     return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for (;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #define ALPHA_BLEND(a, oldp, newp, s)\
411 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
412
413 #define RGBA_IN(r, g, b, a, s)\
414 {\
415     unsigned int v = ((const uint32_t *)(s))[0];\
416     a = (v >> 24) & 0xff;\
417     r = (v >> 16) & 0xff;\
418     g = (v >> 8) & 0xff;\
419     b = v & 0xff;\
420 }
421
422 #define YUVA_IN(y, u, v, a, s, pal)\
423 {\
424     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
425     a = (val >> 24) & 0xff;\
426     y = (val >> 16) & 0xff;\
427     u = (val >> 8) & 0xff;\
428     v = val & 0xff;\
429 }
430
431 #define YUVA_OUT(d, y, u, v, a)\
432 {\
433     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 }
435
436
437 #define BPP 1
438
439 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
440 {
441     int wrap, wrap3, width2, skip2;
442     int y, u, v, a, u1, v1, a1, w, h;
443     uint8_t *lum, *cb, *cr;
444     const uint8_t *p;
445     const uint32_t *pal;
446     int dstx, dsty, dstw, dsth;
447
448     dstw = av_clip(rect->w, 0, imgw);
449     dsth = av_clip(rect->h, 0, imgh);
450     dstx = av_clip(rect->x, 0, imgw - dstw);
451     dsty = av_clip(rect->y, 0, imgh - dsth);
452     lum = dst->data[0] + dsty * dst->linesize[0];
453     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
454     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
455
456     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
457     skip2 = dstx >> 1;
458     wrap = dst->linesize[0];
459     wrap3 = rect->pict.linesize[0];
460     p = rect->pict.data[0];
461     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
462
463     if (dsty & 1) {
464         lum += dstx;
465         cb += skip2;
466         cr += skip2;
467
468         if (dstx & 1) {
469             YUVA_IN(y, u, v, a, p, pal);
470             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
472             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
473             cb++;
474             cr++;
475             lum++;
476             p += BPP;
477         }
478         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
479             YUVA_IN(y, u, v, a, p, pal);
480             u1 = u;
481             v1 = v;
482             a1 = a;
483             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484
485             YUVA_IN(y, u, v, a, p + BPP, pal);
486             u1 += u;
487             v1 += v;
488             a1 += a;
489             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
490             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
491             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
492             cb++;
493             cr++;
494             p += 2 * BPP;
495             lum += 2;
496         }
497         if (w) {
498             YUVA_IN(y, u, v, a, p, pal);
499             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
501             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
502             p++;
503             lum++;
504         }
505         p += wrap3 - dstw * BPP;
506         lum += wrap - dstw - dstx;
507         cb += dst->linesize[1] - width2 - skip2;
508         cr += dst->linesize[2] - width2 - skip2;
509     }
510     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
511         lum += dstx;
512         cb += skip2;
513         cr += skip2;
514
515         if (dstx & 1) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             p += wrap3;
522             lum += wrap;
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += -wrap3 + BPP;
533             lum += -wrap + 1;
534         }
535         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
536             YUVA_IN(y, u, v, a, p, pal);
537             u1 = u;
538             v1 = v;
539             a1 = a;
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
542             YUVA_IN(y, u, v, a, p + BPP, pal);
543             u1 += u;
544             v1 += v;
545             a1 += a;
546             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
547             p += wrap3;
548             lum += wrap;
549
550             YUVA_IN(y, u, v, a, p, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555
556             YUVA_IN(y, u, v, a, p + BPP, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
561
562             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
563             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
564
565             cb++;
566             cr++;
567             p += -wrap3 + 2 * BPP;
568             lum += -wrap + 2;
569         }
570         if (w) {
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 = u;
573             v1 = v;
574             a1 = a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             p += wrap3;
577             lum += wrap;
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 += u;
580             v1 += v;
581             a1 += a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
584             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
585             cb++;
586             cr++;
587             p += -wrap3 + BPP;
588             lum += -wrap + 1;
589         }
590         p += wrap3 + (wrap3 - dstw * BPP);
591         lum += wrap + (wrap - dstw - dstx);
592         cb += dst->linesize[1] - width2 - skip2;
593         cr += dst->linesize[2] - width2 - skip2;
594     }
595     /* handle odd height */
596     if (h) {
597         lum += dstx;
598         cb += skip2;
599         cr += skip2;
600
601         if (dstx & 1) {
602             YUVA_IN(y, u, v, a, p, pal);
603             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
605             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
606             cb++;
607             cr++;
608             lum++;
609             p += BPP;
610         }
611         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
618             YUVA_IN(y, u, v, a, p + BPP, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
625             cb++;
626             cr++;
627             p += 2 * BPP;
628             lum += 2;
629         }
630         if (w) {
631             YUVA_IN(y, u, v, a, p, pal);
632             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
633             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
634             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
635         }
636     }
637 }
638
639 static void free_subpicture(SubPicture *sp)
640 {
641     avsubtitle_free(&sp->sub);
642 }
643
644 static void video_image_display(VideoState *is)
645 {
646     VideoPicture *vp;
647     SubPicture *sp;
648     AVPicture pict;
649     float aspect_ratio;
650     int width, height, x, y;
651     SDL_Rect rect;
652     int i;
653
654     vp = &is->pictq[is->pictq_rindex];
655     if (vp->bmp) {
656 #if CONFIG_AVFILTER
657          if (vp->picref->video->pixel_aspect.num == 0)
658              aspect_ratio = 0;
659          else
660              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
661 #else
662
663         /* XXX: use variable in the frame */
664         if (is->video_st->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666         else if (is->video_st->codec->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668         else
669             aspect_ratio = 0;
670 #endif
671         if (aspect_ratio <= 0.0)
672             aspect_ratio = 1.0;
673         aspect_ratio *= (float)vp->width / (float)vp->height;
674
675         if (is->subtitle_st)
676         {
677             if (is->subpq_size > 0)
678             {
679                 sp = &is->subpq[is->subpq_rindex];
680
681                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
682                 {
683                     SDL_LockYUVOverlay (vp->bmp);
684
685                     pict.data[0] = vp->bmp->pixels[0];
686                     pict.data[1] = vp->bmp->pixels[2];
687                     pict.data[2] = vp->bmp->pixels[1];
688
689                     pict.linesize[0] = vp->bmp->pitches[0];
690                     pict.linesize[1] = vp->bmp->pitches[2];
691                     pict.linesize[2] = vp->bmp->pitches[1];
692
693                     for (i = 0; i < sp->sub.num_rects; i++)
694                         blend_subrect(&pict, sp->sub.rects[i],
695                                       vp->bmp->w, vp->bmp->h);
696
697                     SDL_UnlockYUVOverlay (vp->bmp);
698                 }
699             }
700         }
701
702
703         /* XXX: we suppose the screen has a 1.0 pixel ratio */
704         height = is->height;
705         width = ((int)rint(height * aspect_ratio)) & ~1;
706         if (width > is->width) {
707             width = is->width;
708             height = ((int)rint(width / aspect_ratio)) & ~1;
709         }
710         x = (is->width - width) / 2;
711         y = (is->height - height) / 2;
712         is->no_background = 0;
713         rect.x = is->xleft + x;
714         rect.y = is->ytop  + y;
715         rect.w = width;
716         rect.h = height;
717         SDL_DisplayYUVOverlay(vp->bmp, &rect);
718     }
719 }
720
721 /* get the current audio output buffer size, in samples. With SDL, we
722    cannot have a precise information */
723 static int audio_write_get_buf_size(VideoState *is)
724 {
725     return is->audio_buf_size - is->audio_buf_index;
726 }
727
728 static inline int compute_mod(int a, int b)
729 {
730     a = a % b;
731     if (a >= 0)
732         return a;
733     else
734         return a + b;
735 }
736
737 static void video_audio_display(VideoState *s)
738 {
739     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
740     int ch, channels, h, h2, bgcolor, fgcolor;
741     int16_t time_diff;
742     int rdft_bits, nb_freq;
743
744     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
745         ;
746     nb_freq = 1 << (rdft_bits - 1);
747
748     /* compute display index : center on currently output samples */
749     channels = s->sdl_channels;
750     nb_display_channels = channels;
751     if (!s->paused) {
752         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
753         n = 2 * channels;
754         delay = audio_write_get_buf_size(s);
755         delay /= n;
756
757         /* to be more precise, we take into account the time spent since
758            the last buffer computation */
759         if (audio_callback_time) {
760             time_diff = av_gettime() - audio_callback_time;
761             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
762         }
763
764         delay += 2 * data_used;
765         if (delay < data_used)
766             delay = data_used;
767
768         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
769         if (s->show_audio == 1) {
770             h = INT_MIN;
771             for (i = 0; i < 1000; i += channels) {
772                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
773                 int a = s->sample_array[idx];
774                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
775                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
776                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
777                 int score = a - d;
778                 if (h < score && (b ^ c) < 0) {
779                     h = score;
780                     i_start = idx;
781                 }
782             }
783         }
784
785         s->last_i_start = i_start;
786     } else {
787         i_start = s->last_i_start;
788     }
789
790     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791     if (s->show_audio == 1) {
792         fill_rectangle(screen,
793                        s->xleft, s->ytop, s->width, s->height,
794                        bgcolor);
795
796         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
797
798         /* total height for one channel */
799         h = s->height / nb_display_channels;
800         /* graph height / 2 */
801         h2 = (h * 9) / 20;
802         for (ch = 0; ch < nb_display_channels; ch++) {
803             i = i_start + ch;
804             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
805             for (x = 0; x < s->width; x++) {
806                 y = (s->sample_array[i] * h2) >> 15;
807                 if (y < 0) {
808                     y = -y;
809                     ys = y1 - y;
810                 } else {
811                     ys = y1;
812                 }
813                 fill_rectangle(screen,
814                                s->xleft + x, ys, 1, y,
815                                fgcolor);
816                 i += channels;
817                 if (i >= SAMPLE_ARRAY_SIZE)
818                     i -= SAMPLE_ARRAY_SIZE;
819             }
820         }
821
822         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
823
824         for (ch = 1; ch < nb_display_channels; ch++) {
825             y = s->ytop + ch * h;
826             fill_rectangle(screen,
827                            s->xleft, y, s->width, 1,
828                            fgcolor);
829         }
830         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
831     } else {
832         nb_display_channels= FFMIN(nb_display_channels, 2);
833         if (rdft_bits != s->rdft_bits) {
834             av_rdft_end(s->rdft);
835             av_free(s->rdft_data);
836             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
837             s->rdft_bits = rdft_bits;
838             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
839         }
840         {
841             FFTSample *data[2];
842             for (ch = 0; ch < nb_display_channels; ch++) {
843                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
844                 i = i_start + ch;
845                 for (x = 0; x < 2 * nb_freq; x++) {
846                     double w = (x-nb_freq) * (1.0 / nb_freq);
847                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
848                     i += channels;
849                     if (i >= SAMPLE_ARRAY_SIZE)
850                         i -= SAMPLE_ARRAY_SIZE;
851                 }
852                 av_rdft_calc(s->rdft, data[ch]);
853             }
854             // least efficient way to do this, we should of course directly access it but its more than fast enough
855             for (y = 0; y < s->height; y++) {
856                 double w = 1 / sqrt(nb_freq);
857                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
858                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
859                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
860                 a = FFMIN(a, 255);
861                 b = FFMIN(b, 255);
862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
863
864                 fill_rectangle(screen,
865                             s->xpos, s->height-y, 1, 1,
866                             fgcolor);
867             }
868         }
869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870         s->xpos++;
871         if (s->xpos >= s->width)
872             s->xpos= s->xleft;
873     }
874 }
875
876 static int video_open(VideoState *is)
877 {
878     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
879     int w,h;
880
881     if (is_full_screen) flags |= SDL_FULLSCREEN;
882     else                flags |= SDL_RESIZABLE;
883
884     if (is_full_screen && fs_screen_width) {
885         w = fs_screen_width;
886         h = fs_screen_height;
887     } else if (!is_full_screen && screen_width) {
888         w = screen_width;
889         h = screen_height;
890 #if CONFIG_AVFILTER
891     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
892         w = is->out_video_filter->inputs[0]->w;
893         h = is->out_video_filter->inputs[0]->h;
894 #else
895     } else if (is->video_st && is->video_st->codec->width) {
896         w = is->video_st->codec->width;
897         h = is->video_st->codec->height;
898 #endif
899     } else {
900         w = 640;
901         h = 480;
902     }
903     if (screen && is->width == screen->w && screen->w == w
904        && is->height== screen->h && screen->h == h)
905         return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911     screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913     if (!screen) {
914         fprintf(stderr, "SDL: could not set video mode - exiting\n");
915         return -1;
916     }
917     if (!window_title)
918         window_title = input_filename;
919     SDL_WM_SetCaption(window_title, window_title);
920
921     is->width  = screen->w;
922     is->height = screen->h;
923
924     return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930     if (!screen)
931         video_open(cur_stream);
932     if (is->audio_st && is->show_audio)
933         video_audio_display(is);
934     else if (is->video_st)
935         video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940     VideoState *is= opaque;
941     while (!is->abort_request) {
942         SDL_Event event;
943         event.type = FF_REFRESH_EVENT;
944         event.user.data1 = opaque;
945         if (!is->refresh) {
946             is->refresh = 1;
947             SDL_PushEvent(&event);
948         }
949         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950     }
951     return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957     double pts;
958     int hw_buf_size, bytes_per_sec;
959     pts = is->audio_clock;
960     hw_buf_size = audio_write_get_buf_size(is);
961     bytes_per_sec = 0;
962     if (is->audio_st) {
963         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
964                         av_get_bytes_per_sample(is->sdl_sample_fmt);
965     }
966     if (bytes_per_sec)
967         pts -= (double)hw_buf_size / bytes_per_sec;
968     return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974     if (is->paused) {
975         return is->video_current_pts;
976     } else {
977         return is->video_current_pts_drift + av_gettime() / 1000000.0;
978     }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984     int64_t ti;
985     ti = av_gettime();
986     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992     double val;
993
994     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995         if (is->video_st)
996             val = get_video_clock(is);
997         else
998             val = get_audio_clock(is);
999     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000         if (is->audio_st)
1001             val = get_audio_clock(is);
1002         else
1003             val = get_video_clock(is);
1004     } else {
1005         val = get_external_clock(is);
1006     }
1007     return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013     if (!is->seek_req) {
1014         is->seek_pos = pos;
1015         is->seek_rel = rel;
1016         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017         if (seek_by_bytes)
1018             is->seek_flags |= AVSEEK_FLAG_BYTE;
1019         is->seek_req = 1;
1020     }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026     if (is->paused) {
1027         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028         if (is->read_pause_return != AVERROR(ENOSYS)) {
1029             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1030         }
1031         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1032     }
1033     is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038     double delay, sync_threshold, diff;
1039
1040     /* compute nominal delay */
1041     delay = frame_current_pts - is->frame_last_pts;
1042     if (delay <= 0 || delay >= 10.0) {
1043         /* if incorrect delay, use previous one */
1044         delay = is->frame_last_delay;
1045     } else {
1046         is->frame_last_delay = delay;
1047     }
1048     is->frame_last_pts = frame_current_pts;
1049
1050     /* update delay to follow master synchronisation source */
1051     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053         /* if video is slave, we try to correct big delays by
1054            duplicating or deleting a frame */
1055         diff = get_video_clock(is) - get_master_clock(is);
1056
1057         /* skip or repeat frame. We take into account the
1058            delay to compute the threshold. I still don't know
1059            if it is the best guess */
1060         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062             if (diff <= -sync_threshold)
1063                 delay = 0;
1064             else if (diff >= sync_threshold)
1065                 delay = 2 * delay;
1066         }
1067     }
1068     is->frame_timer += delay;
1069
1070     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071             delay, frame_current_pts, -diff);
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             // nothing to do, no picture to display in the que
1088         } else {
1089             double time = av_gettime() / 1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if (time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if (is->pictq_size > 1) {
1101                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             } else {
1105                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1106             }
1107             if (framedrop && time > next_target) {
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if (is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1211                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212             fflush(stdout);
1213             last_time = cur_time;
1214         }
1215     }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220     VideoPicture *vp;
1221     int i;
1222     /* XXX: use a special url_shutdown call to abort parse cleanly */
1223     is->abort_request = 1;
1224     SDL_WaitThread(is->parse_tid, NULL);
1225     SDL_WaitThread(is->refresh_tid, NULL);
1226
1227     /* free all pictures */
1228     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229         vp = &is->pictq[i];
1230 #if CONFIG_AVFILTER
1231         avfilter_unref_bufferp(&vp->picref);
1232 #endif
1233         if (vp->bmp) {
1234             SDL_FreeYUVOverlay(vp->bmp);
1235             vp->bmp = NULL;
1236         }
1237     }
1238     SDL_DestroyMutex(is->pictq_mutex);
1239     SDL_DestroyCond(is->pictq_cond);
1240     SDL_DestroyMutex(is->subpq_mutex);
1241     SDL_DestroyCond(is->subpq_cond);
1242 #if !CONFIG_AVFILTER
1243     if (is->img_convert_ctx)
1244         sws_freeContext(is->img_convert_ctx);
1245 #endif
1246     av_free(is);
1247 }
1248
1249 static void do_exit(void)
1250 {
1251     if (cur_stream) {
1252         stream_close(cur_stream);
1253         cur_stream = NULL;
1254     }
1255     uninit_opts();
1256 #if CONFIG_AVFILTER
1257     avfilter_uninit();
1258 #endif
1259     avformat_network_deinit();
1260     if (show_status)
1261         printf("\n");
1262     SDL_Quit();
1263     av_log(NULL, AV_LOG_QUIET, "");
1264     exit(0);
1265 }
1266
1267 /* allocate a picture (needs to do that in main thread to avoid
1268    potential locking problems */
1269 static void alloc_picture(void *opaque)
1270 {
1271     VideoState *is = opaque;
1272     VideoPicture *vp;
1273
1274     vp = &is->pictq[is->pictq_windex];
1275
1276     if (vp->bmp)
1277         SDL_FreeYUVOverlay(vp->bmp);
1278
1279 #if CONFIG_AVFILTER
1280     avfilter_unref_bufferp(&vp->picref);
1281
1282     vp->width   = is->out_video_filter->inputs[0]->w;
1283     vp->height  = is->out_video_filter->inputs[0]->h;
1284     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1285 #else
1286     vp->width   = is->video_st->codec->width;
1287     vp->height  = is->video_st->codec->height;
1288     vp->pix_fmt = is->video_st->codec->pix_fmt;
1289 #endif
1290
1291     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1292                                    SDL_YV12_OVERLAY,
1293                                    screen);
1294     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1295         /* SDL allocates a buffer smaller than requested if the video
1296          * overlay hardware is unable to support the requested size. */
1297         fprintf(stderr, "Error: the video system does not support an image\n"
1298                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1299                         "to reduce the image size.\n", vp->width, vp->height );
1300         do_exit();
1301     }
1302
1303     SDL_LockMutex(is->pictq_mutex);
1304     vp->allocated = 1;
1305     SDL_CondSignal(is->pictq_cond);
1306     SDL_UnlockMutex(is->pictq_mutex);
1307 }
1308
1309 /**
1310  *
1311  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1312  */
1313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1314 {
1315     VideoPicture *vp;
1316 #if CONFIG_AVFILTER
1317     AVPicture pict_src;
1318 #else
1319     int dst_pix_fmt = PIX_FMT_YUV420P;
1320 #endif
1321     /* wait until we have space to put a new picture */
1322     SDL_LockMutex(is->pictq_mutex);
1323
1324     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1325         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     /* alloc or resize hardware picture buffer */
1339     if (!vp->bmp || vp->reallocate ||
1340 #if CONFIG_AVFILTER
1341         vp->width  != is->out_video_filter->inputs[0]->w ||
1342         vp->height != is->out_video_filter->inputs[0]->h) {
1343 #else
1344         vp->width != is->video_st->codec->width ||
1345         vp->height != is->video_st->codec->height) {
1346 #endif
1347         SDL_Event event;
1348
1349         vp->allocated  = 0;
1350         vp->reallocate = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         SDL_UnlockMutex(is->pictq_mutex);
1364
1365         if (is->videoq.abort_request)
1366             return -1;
1367     }
1368
1369     /* if the frame is not skipped, then display it */
1370     if (vp->bmp) {
1371         AVPicture pict = { { 0 } };
1372 #if CONFIG_AVFILTER
1373         avfilter_unref_bufferp(&vp->picref);
1374         vp->picref = src_frame->opaque;
1375 #endif
1376
1377         /* get a pointer on the bitmap */
1378         SDL_LockYUVOverlay (vp->bmp);
1379
1380         pict.data[0] = vp->bmp->pixels[0];
1381         pict.data[1] = vp->bmp->pixels[2];
1382         pict.data[2] = vp->bmp->pixels[1];
1383
1384         pict.linesize[0] = vp->bmp->pitches[0];
1385         pict.linesize[1] = vp->bmp->pitches[2];
1386         pict.linesize[2] = vp->bmp->pitches[1];
1387
1388 #if CONFIG_AVFILTER
1389         pict_src.data[0] = src_frame->data[0];
1390         pict_src.data[1] = src_frame->data[1];
1391         pict_src.data[2] = src_frame->data[2];
1392
1393         pict_src.linesize[0] = src_frame->linesize[0];
1394         pict_src.linesize[1] = src_frame->linesize[1];
1395         pict_src.linesize[2] = src_frame->linesize[2];
1396
1397         // FIXME use direct rendering
1398         av_picture_copy(&pict, &pict_src,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock = compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 /**
1431  * compute the exact PTS for the picture if it is omitted in the stream
1432  * @param pts1 the dts of the pkt / pts of the frame
1433  */
1434 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1435 {
1436     double frame_delay, pts;
1437
1438     pts = pts1;
1439
1440     if (pts != 0) {
1441         /* update video clock with pts, if present */
1442         is->video_clock = pts;
1443     } else {
1444         pts = is->video_clock;
1445     }
1446     /* update video clock for next frame */
1447     frame_delay = av_q2d(is->video_st->codec->time_base);
1448     /* for MPEG2, the frame can be repeated, so we update the
1449        clock accordingly */
1450     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1451     is->video_clock += frame_delay;
1452
1453     return queue_picture(is, src_frame, pts, pos);
1454 }
1455
1456 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1457 {
1458     int got_picture, i;
1459
1460     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1461         return -1;
1462
1463     if (pkt->data == flush_pkt.data) {
1464         avcodec_flush_buffers(is->video_st->codec);
1465
1466         SDL_LockMutex(is->pictq_mutex);
1467         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1468         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1469             is->pictq[i].target_clock= 0;
1470         }
1471         while (is->pictq_size && !is->videoq.abort_request) {
1472             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1473         }
1474         is->video_current_pos = -1;
1475         SDL_UnlockMutex(is->pictq_mutex);
1476
1477         init_pts_correction(&is->pts_ctx);
1478         is->frame_last_pts = AV_NOPTS_VALUE;
1479         is->frame_last_delay = 0;
1480         is->frame_timer = (double)av_gettime() / 1000000.0;
1481         is->skip_frames = 1;
1482         is->skip_frames_index = 0;
1483         return 0;
1484     }
1485
1486     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1487
1488     if (got_picture) {
1489         if (decoder_reorder_pts == -1) {
1490             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1491         } else if (decoder_reorder_pts) {
1492             *pts = frame->pkt_pts;
1493         } else {
1494             *pts = frame->pkt_dts;
1495         }
1496
1497         if (*pts == AV_NOPTS_VALUE) {
1498             *pts = 0;
1499         }
1500
1501         is->skip_frames_index += 1;
1502         if (is->skip_frames_index >= is->skip_frames) {
1503             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1504             return 1;
1505         }
1506
1507     }
1508     return 0;
1509 }
1510
1511 #if CONFIG_AVFILTER
1512 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1513 {
1514     char sws_flags_str[128];
1515     char buffersrc_args[256];
1516     int ret;
1517     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1518     AVCodecContext *codec = is->video_st->codec;
1519
1520     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1521     graph->scale_sws_opts = av_strdup(sws_flags_str);
1522
1523     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1524              codec->width, codec->height, codec->pix_fmt,
1525              is->video_st->time_base.num, is->video_st->time_base.den,
1526              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1527
1528
1529     if ((ret = avfilter_graph_create_filter(&filt_src,
1530                                             avfilter_get_by_name("buffer"),
1531                                             "src", buffersrc_args, NULL,
1532                                             graph)) < 0)
1533         return ret;
1534     if ((ret = avfilter_graph_create_filter(&filt_out,
1535                                             avfilter_get_by_name("buffersink"),
1536                                             "out", NULL, NULL, graph)) < 0)
1537         return ret;
1538
1539     if ((ret = avfilter_graph_create_filter(&filt_format,
1540                                             avfilter_get_by_name("format"),
1541                                             "format", "yuv420p", NULL, graph)) < 0)
1542         return ret;
1543     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1544         return ret;
1545
1546
1547     if (vfilters) {
1548         AVFilterInOut *outputs = avfilter_inout_alloc();
1549         AVFilterInOut *inputs  = avfilter_inout_alloc();
1550
1551         outputs->name    = av_strdup("in");
1552         outputs->filter_ctx = filt_src;
1553         outputs->pad_idx = 0;
1554         outputs->next    = NULL;
1555
1556         inputs->name    = av_strdup("out");
1557         inputs->filter_ctx = filt_format;
1558         inputs->pad_idx = 0;
1559         inputs->next    = NULL;
1560
1561         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1562             return ret;
1563     } else {
1564         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1565             return ret;
1566     }
1567
1568     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1569         return ret;
1570
1571     is->in_video_filter  = filt_src;
1572     is->out_video_filter = filt_out;
1573
1574     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1575         is->use_dr1 = 1;
1576         codec->get_buffer     = codec_get_buffer;
1577         codec->release_buffer = codec_release_buffer;
1578         codec->opaque         = &is->buffer_pool;
1579     }
1580
1581     return ret;
1582 }
1583
1584 #endif  /* CONFIG_AVFILTER */
1585
1586 static int video_thread(void *arg)
1587 {
1588     AVPacket pkt = { 0 };
1589     VideoState *is = arg;
1590     AVFrame *frame = avcodec_alloc_frame();
1591     int64_t pts_int;
1592     double pts;
1593     int ret;
1594
1595 #if CONFIG_AVFILTER
1596     AVFilterGraph *graph = avfilter_graph_alloc();
1597     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1598     int64_t pos;
1599     int last_w = is->video_st->codec->width;
1600     int last_h = is->video_st->codec->height;
1601
1602     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1603         goto the_end;
1604     filt_in  = is->in_video_filter;
1605     filt_out = is->out_video_filter;
1606 #endif
1607
1608     for (;;) {
1609 #if CONFIG_AVFILTER
1610         AVFilterBufferRef *picref;
1611         AVRational tb;
1612 #endif
1613         while (is->paused && !is->videoq.abort_request)
1614             SDL_Delay(10);
1615
1616         av_free_packet(&pkt);
1617
1618         ret = get_video_frame(is, frame, &pts_int, &pkt);
1619         if (ret < 0)
1620             goto the_end;
1621
1622         if (!ret)
1623             continue;
1624
1625 #if CONFIG_AVFILTER
1626         if (   last_w != is->video_st->codec->width
1627             || last_h != is->video_st->codec->height) {
1628             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1629                     is->video_st->codec->width, is->video_st->codec->height);
1630             avfilter_graph_free(&graph);
1631             graph = avfilter_graph_alloc();
1632             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1633                 goto the_end;
1634             filt_in  = is->in_video_filter;
1635             filt_out = is->out_video_filter;
1636             last_w = is->video_st->codec->width;
1637             last_h = is->video_st->codec->height;
1638         }
1639
1640         frame->pts = pts_int;
1641         if (is->use_dr1) {
1642             FrameBuffer      *buf = frame->opaque;
1643             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1644                                         frame->data, frame->linesize,
1645                                         AV_PERM_READ | AV_PERM_PRESERVE,
1646                                         frame->width, frame->height,
1647                                         frame->format);
1648
1649             avfilter_copy_frame_props(fb, frame);
1650             fb->buf->priv           = buf;
1651             fb->buf->free           = filter_release_buffer;
1652
1653             buf->refcount++;
1654             av_buffersrc_buffer(filt_in, fb);
1655
1656         } else
1657             av_buffersrc_write_frame(filt_in, frame);
1658
1659         while (ret >= 0) {
1660             ret = av_buffersink_read(filt_out, &picref);
1661             if (ret < 0) {
1662                 ret = 0;
1663                 break;
1664             }
1665
1666             avfilter_copy_buf_props(frame, picref);
1667
1668             pts_int = picref->pts;
1669             tb      = filt_out->inputs[0]->time_base;
1670             pos     = picref->pos;
1671             frame->opaque = picref;
1672
1673             if (av_cmp_q(tb, is->video_st->time_base)) {
1674                 av_unused int64_t pts1 = pts_int;
1675                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1676                 av_dlog(NULL, "video_thread(): "
1677                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1678                         tb.num, tb.den, pts1,
1679                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1680             }
1681             pts = pts_int * av_q2d(is->video_st->time_base);
1682             ret = output_picture2(is, frame, pts, pos);
1683         }
1684 #else
1685         pts = pts_int * av_q2d(is->video_st->time_base);
1686         ret = output_picture2(is, frame, pts,  pkt.pos);
1687 #endif
1688
1689         if (ret < 0)
1690             goto the_end;
1691
1692         if (step)
1693             if (cur_stream)
1694                 stream_pause(cur_stream);
1695     }
1696  the_end:
1697 #if CONFIG_AVFILTER
1698     av_freep(&vfilters);
1699     avfilter_graph_free(&graph);
1700 #endif
1701     av_free_packet(&pkt);
1702     avcodec_free_frame(&frame);
1703     return 0;
1704 }
1705
1706 static int subtitle_thread(void *arg)
1707 {
1708     VideoState *is = arg;
1709     SubPicture *sp;
1710     AVPacket pkt1, *pkt = &pkt1;
1711     int got_subtitle;
1712     double pts;
1713     int i, j;
1714     int r, g, b, y, u, v, a;
1715
1716     for (;;) {
1717         while (is->paused && !is->subtitleq.abort_request) {
1718             SDL_Delay(10);
1719         }
1720         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1721             break;
1722
1723         if (pkt->data == flush_pkt.data) {
1724             avcodec_flush_buffers(is->subtitle_st->codec);
1725             continue;
1726         }
1727         SDL_LockMutex(is->subpq_mutex);
1728         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1729                !is->subtitleq.abort_request) {
1730             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1731         }
1732         SDL_UnlockMutex(is->subpq_mutex);
1733
1734         if (is->subtitleq.abort_request)
1735             return 0;
1736
1737         sp = &is->subpq[is->subpq_windex];
1738
1739        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1740            this packet, if any */
1741         pts = 0;
1742         if (pkt->pts != AV_NOPTS_VALUE)
1743             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1744
1745         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1746                                  &got_subtitle, pkt);
1747
1748         if (got_subtitle && sp->sub.format == 0) {
1749             sp->pts = pts;
1750
1751             for (i = 0; i < sp->sub.num_rects; i++)
1752             {
1753                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1754                 {
1755                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1756                     y = RGB_TO_Y_CCIR(r, g, b);
1757                     u = RGB_TO_U_CCIR(r, g, b, 0);
1758                     v = RGB_TO_V_CCIR(r, g, b, 0);
1759                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1760                 }
1761             }
1762
1763             /* now we can update the picture count */
1764             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1765                 is->subpq_windex = 0;
1766             SDL_LockMutex(is->subpq_mutex);
1767             is->subpq_size++;
1768             SDL_UnlockMutex(is->subpq_mutex);
1769         }
1770         av_free_packet(pkt);
1771     }
1772     return 0;
1773 }
1774
1775 /* copy samples for viewing in editor window */
1776 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1777 {
1778     int size, len;
1779
1780     size = samples_size / sizeof(short);
1781     while (size > 0) {
1782         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1783         if (len > size)
1784             len = size;
1785         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1786         samples += len;
1787         is->sample_array_index += len;
1788         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1789             is->sample_array_index = 0;
1790         size -= len;
1791     }
1792 }
1793
1794 /* return the new audio buffer size (samples can be added or deleted
1795    to get better sync if video or external master clock) */
1796 static int synchronize_audio(VideoState *is, short *samples,
1797                              int samples_size1, double pts)
1798 {
1799     int n, samples_size;
1800     double ref_clock;
1801
1802     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1803     samples_size = samples_size1;
1804
1805     /* if not master, then we try to remove or add samples to correct the clock */
1806     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1807          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1808         double diff, avg_diff;
1809         int wanted_size, min_size, max_size, nb_samples;
1810
1811         ref_clock = get_master_clock(is);
1812         diff = get_audio_clock(is) - ref_clock;
1813
1814         if (diff < AV_NOSYNC_THRESHOLD) {
1815             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1816             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1817                 /* not enough measures to have a correct estimate */
1818                 is->audio_diff_avg_count++;
1819             } else {
1820                 /* estimate the A-V difference */
1821                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1822
1823                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1824                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1825                     nb_samples = samples_size / n;
1826
1827                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1828                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1829                     if (wanted_size < min_size)
1830                         wanted_size = min_size;
1831                     else if (wanted_size > max_size)
1832                         wanted_size = max_size;
1833
1834                     /* add or remove samples to correction the synchro */
1835                     if (wanted_size < samples_size) {
1836                         /* remove samples */
1837                         samples_size = wanted_size;
1838                     } else if (wanted_size > samples_size) {
1839                         uint8_t *samples_end, *q;
1840                         int nb;
1841
1842                         /* add samples */
1843                         nb = (samples_size - wanted_size);
1844                         samples_end = (uint8_t *)samples + samples_size - n;
1845                         q = samples_end + n;
1846                         while (nb > 0) {
1847                             memcpy(q, samples_end, n);
1848                             q += n;
1849                             nb -= n;
1850                         }
1851                         samples_size = wanted_size;
1852                     }
1853                 }
1854                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1855                         diff, avg_diff, samples_size - samples_size1,
1856                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1857             }
1858         } else {
1859             /* too big difference : may be initial PTS errors, so
1860                reset A-V filter */
1861             is->audio_diff_avg_count = 0;
1862             is->audio_diff_cum       = 0;
1863         }
1864     }
1865
1866     return samples_size;
1867 }
1868
1869 /* decode one audio frame and returns its uncompressed size */
1870 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1871 {
1872     AVPacket *pkt_temp = &is->audio_pkt_temp;
1873     AVPacket *pkt = &is->audio_pkt;
1874     AVCodecContext *dec = is->audio_st->codec;
1875     int n, len1, data_size, got_frame;
1876     double pts;
1877     int new_packet = 0;
1878     int flush_complete = 0;
1879
1880     for (;;) {
1881         /* NOTE: the audio packet can contain several frames */
1882         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1883             int resample_changed, audio_resample;
1884
1885             if (!is->frame) {
1886                 if (!(is->frame = avcodec_alloc_frame()))
1887                     return AVERROR(ENOMEM);
1888             } else
1889                 avcodec_get_frame_defaults(is->frame);
1890
1891             if (flush_complete)
1892                 break;
1893             new_packet = 0;
1894             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1895             if (len1 < 0) {
1896                 /* if error, we skip the frame */
1897                 pkt_temp->size = 0;
1898                 break;
1899             }
1900
1901             pkt_temp->data += len1;
1902             pkt_temp->size -= len1;
1903
1904             if (!got_frame) {
1905                 /* stop sending empty packets if the decoder is finished */
1906                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1907                     flush_complete = 1;
1908                 continue;
1909             }
1910             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1911                                                    is->frame->nb_samples,
1912                                                    dec->sample_fmt, 1);
1913
1914             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
1915                              dec->channel_layout != is->sdl_channel_layout;
1916
1917             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
1918                                dec->channel_layout != is->resample_channel_layout;
1919
1920             if ((!is->avr && audio_resample) || resample_changed) {
1921                 int ret;
1922                 if (is->avr)
1923                     avresample_close(is->avr);
1924                 else if (audio_resample) {
1925                     is->avr = avresample_alloc_context();
1926                     if (!is->avr) {
1927                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1928                         break;
1929                     }
1930                 }
1931                 if (audio_resample) {
1932                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
1933                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
1934                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
1935                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1936                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
1937                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
1938
1939                     if ((ret = avresample_open(is->avr)) < 0) {
1940                         fprintf(stderr, "error initializing libavresample\n");
1941                         break;
1942                     }
1943                 }
1944                 is->resample_sample_fmt     = dec->sample_fmt;
1945                 is->resample_channel_layout = dec->channel_layout;
1946             }
1947
1948             if (audio_resample) {
1949                 void *tmp_out;
1950                 int out_samples, out_size, out_linesize;
1951                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1952                 int nb_samples = is->frame->nb_samples;
1953
1954                 out_size = av_samples_get_buffer_size(&out_linesize,
1955                                                       is->sdl_channels,
1956                                                       nb_samples,
1957                                                       is->sdl_sample_fmt, 0);
1958                 tmp_out = av_realloc(is->audio_buf1, out_size);
1959                 if (!tmp_out)
1960                     return AVERROR(ENOMEM);
1961                 is->audio_buf1 = tmp_out;
1962
1963                 out_samples = avresample_convert(is->avr,
1964                                                  &is->audio_buf1,
1965                                                  out_linesize, nb_samples,
1966                                                  is->frame->data,
1967                                                  is->frame->linesize[0],
1968                                                  is->frame->nb_samples);
1969                 if (out_samples < 0) {
1970                     fprintf(stderr, "avresample_convert() failed\n");
1971                     break;
1972                 }
1973                 is->audio_buf = is->audio_buf1;
1974                 data_size = out_samples * osize * is->sdl_channels;
1975             } else {
1976                 is->audio_buf = is->frame->data[0];
1977             }
1978
1979             /* if no pts, then compute it */
1980             pts = is->audio_clock;
1981             *pts_ptr = pts;
1982             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1983             is->audio_clock += (double)data_size /
1984                 (double)(n * dec->sample_rate);
1985 #ifdef DEBUG
1986             {
1987                 static double last_clock;
1988                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1989                        is->audio_clock - last_clock,
1990                        is->audio_clock, pts);
1991                 last_clock = is->audio_clock;
1992             }
1993 #endif
1994             return data_size;
1995         }
1996
1997         /* free the current packet */
1998         if (pkt->data)
1999             av_free_packet(pkt);
2000         memset(pkt_temp, 0, sizeof(*pkt_temp));
2001
2002         if (is->paused || is->audioq.abort_request) {
2003             return -1;
2004         }
2005
2006         /* read next packet */
2007         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2008             return -1;
2009
2010         if (pkt->data == flush_pkt.data) {
2011             avcodec_flush_buffers(dec);
2012             flush_complete = 0;
2013         }
2014
2015         *pkt_temp = *pkt;
2016
2017         /* if update the audio clock with the pts */
2018         if (pkt->pts != AV_NOPTS_VALUE) {
2019             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2020         }
2021     }
2022 }
2023
2024 /* prepare a new audio buffer */
2025 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2026 {
2027     VideoState *is = opaque;
2028     int audio_size, len1;
2029     double pts;
2030
2031     audio_callback_time = av_gettime();
2032
2033     while (len > 0) {
2034         if (is->audio_buf_index >= is->audio_buf_size) {
2035            audio_size = audio_decode_frame(is, &pts);
2036            if (audio_size < 0) {
2037                 /* if error, just output silence */
2038                is->audio_buf      = is->silence_buf;
2039                is->audio_buf_size = sizeof(is->silence_buf);
2040            } else {
2041                if (is->show_audio)
2042                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2043                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2044                                               pts);
2045                is->audio_buf_size = audio_size;
2046            }
2047            is->audio_buf_index = 0;
2048         }
2049         len1 = is->audio_buf_size - is->audio_buf_index;
2050         if (len1 > len)
2051             len1 = len;
2052         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2053         len -= len1;
2054         stream += len1;
2055         is->audio_buf_index += len1;
2056     }
2057 }
2058
2059 /* open a given stream. Return 0 if OK */
2060 static int stream_component_open(VideoState *is, int stream_index)
2061 {
2062     AVFormatContext *ic = is->ic;
2063     AVCodecContext *avctx;
2064     AVCodec *codec;
2065     SDL_AudioSpec wanted_spec, spec;
2066     AVDictionary *opts;
2067     AVDictionaryEntry *t = NULL;
2068
2069     if (stream_index < 0 || stream_index >= ic->nb_streams)
2070         return -1;
2071     avctx = ic->streams[stream_index]->codec;
2072
2073     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2074
2075     codec = avcodec_find_decoder(avctx->codec_id);
2076     avctx->debug_mv          = debug_mv;
2077     avctx->debug             = debug;
2078     avctx->workaround_bugs   = workaround_bugs;
2079     avctx->idct_algo         = idct;
2080     avctx->skip_frame        = skip_frame;
2081     avctx->skip_idct         = skip_idct;
2082     avctx->skip_loop_filter  = skip_loop_filter;
2083     avctx->error_concealment = error_concealment;
2084
2085     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2086
2087     if (!av_dict_get(opts, "threads", NULL, 0))
2088         av_dict_set(&opts, "threads", "auto", 0);
2089     if (!codec ||
2090         avcodec_open2(avctx, codec, &opts) < 0)
2091         return -1;
2092     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2093         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2094         return AVERROR_OPTION_NOT_FOUND;
2095     }
2096
2097     /* prepare audio output */
2098     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2099         wanted_spec.freq = avctx->sample_rate;
2100         wanted_spec.format = AUDIO_S16SYS;
2101
2102         if (!avctx->channel_layout)
2103             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2104         if (!avctx->channel_layout) {
2105             fprintf(stderr, "unable to guess channel layout\n");
2106             return -1;
2107         }
2108         if (avctx->channels == 1)
2109             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2110         else
2111             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2112         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2113
2114         wanted_spec.channels = is->sdl_channels;
2115         wanted_spec.silence = 0;
2116         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2117         wanted_spec.callback = sdl_audio_callback;
2118         wanted_spec.userdata = is;
2119         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2120             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2121             return -1;
2122         }
2123         is->audio_hw_buf_size = spec.size;
2124         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2125         is->resample_sample_fmt     = is->sdl_sample_fmt;
2126         is->resample_channel_layout = is->sdl_channel_layout;
2127     }
2128
2129     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2130     switch (avctx->codec_type) {
2131     case AVMEDIA_TYPE_AUDIO:
2132         is->audio_stream = stream_index;
2133         is->audio_st = ic->streams[stream_index];
2134         is->audio_buf_size  = 0;
2135         is->audio_buf_index = 0;
2136
2137         /* init averaging filter */
2138         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2139         is->audio_diff_avg_count = 0;
2140         /* since we do not have a precise anough audio fifo fullness,
2141            we correct audio sync only if larger than this threshold */
2142         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2143
2144         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2145         packet_queue_init(&is->audioq);
2146         SDL_PauseAudio(0);
2147         break;
2148     case AVMEDIA_TYPE_VIDEO:
2149         is->video_stream = stream_index;
2150         is->video_st = ic->streams[stream_index];
2151
2152         packet_queue_init(&is->videoq);
2153         is->video_tid = SDL_CreateThread(video_thread, is);
2154         break;
2155     case AVMEDIA_TYPE_SUBTITLE:
2156         is->subtitle_stream = stream_index;
2157         is->subtitle_st = ic->streams[stream_index];
2158         packet_queue_init(&is->subtitleq);
2159
2160         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2161         break;
2162     default:
2163         break;
2164     }
2165     return 0;
2166 }
2167
2168 static void stream_component_close(VideoState *is, int stream_index)
2169 {
2170     AVFormatContext *ic = is->ic;
2171     AVCodecContext *avctx;
2172
2173     if (stream_index < 0 || stream_index >= ic->nb_streams)
2174         return;
2175     avctx = ic->streams[stream_index]->codec;
2176
2177     switch (avctx->codec_type) {
2178     case AVMEDIA_TYPE_AUDIO:
2179         packet_queue_abort(&is->audioq);
2180
2181         SDL_CloseAudio();
2182
2183         packet_queue_end(&is->audioq);
2184         av_free_packet(&is->audio_pkt);
2185         if (is->avr)
2186             avresample_free(&is->avr);
2187         av_freep(&is->audio_buf1);
2188         is->audio_buf = NULL;
2189         avcodec_free_frame(&is->frame);
2190
2191         if (is->rdft) {
2192             av_rdft_end(is->rdft);
2193             av_freep(&is->rdft_data);
2194             is->rdft = NULL;
2195             is->rdft_bits = 0;
2196         }
2197         break;
2198     case AVMEDIA_TYPE_VIDEO:
2199         packet_queue_abort(&is->videoq);
2200
2201         /* note: we also signal this mutex to make sure we deblock the
2202            video thread in all cases */
2203         SDL_LockMutex(is->pictq_mutex);
2204         SDL_CondSignal(is->pictq_cond);
2205         SDL_UnlockMutex(is->pictq_mutex);
2206
2207         SDL_WaitThread(is->video_tid, NULL);
2208
2209         packet_queue_end(&is->videoq);
2210         break;
2211     case AVMEDIA_TYPE_SUBTITLE:
2212         packet_queue_abort(&is->subtitleq);
2213
2214         /* note: we also signal this mutex to make sure we deblock the
2215            video thread in all cases */
2216         SDL_LockMutex(is->subpq_mutex);
2217         is->subtitle_stream_changed = 1;
2218
2219         SDL_CondSignal(is->subpq_cond);
2220         SDL_UnlockMutex(is->subpq_mutex);
2221
2222         SDL_WaitThread(is->subtitle_tid, NULL);
2223
2224         packet_queue_end(&is->subtitleq);
2225         break;
2226     default:
2227         break;
2228     }
2229
2230     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2231     avcodec_close(avctx);
2232 #if CONFIG_AVFILTER
2233     free_buffer_pool(&is->buffer_pool);
2234 #endif
2235     switch (avctx->codec_type) {
2236     case AVMEDIA_TYPE_AUDIO:
2237         is->audio_st = NULL;
2238         is->audio_stream = -1;
2239         break;
2240     case AVMEDIA_TYPE_VIDEO:
2241         is->video_st = NULL;
2242         is->video_stream = -1;
2243         break;
2244     case AVMEDIA_TYPE_SUBTITLE:
2245         is->subtitle_st = NULL;
2246         is->subtitle_stream = -1;
2247         break;
2248     default:
2249         break;
2250     }
2251 }
2252
2253 /* since we have only one decoding thread, we can use a global
2254    variable instead of a thread local variable */
2255 static VideoState *global_video_state;
2256
2257 static int decode_interrupt_cb(void *ctx)
2258 {
2259     return global_video_state && global_video_state->abort_request;
2260 }
2261
2262 /* this thread gets the stream from the disk or the network */
2263 static int decode_thread(void *arg)
2264 {
2265     VideoState *is = arg;
2266     AVFormatContext *ic = NULL;
2267     int err, i, ret;
2268     int st_index[AVMEDIA_TYPE_NB];
2269     AVPacket pkt1, *pkt = &pkt1;
2270     int eof = 0;
2271     int pkt_in_play_range = 0;
2272     AVDictionaryEntry *t;
2273     AVDictionary **opts;
2274     int orig_nb_streams;
2275
2276     memset(st_index, -1, sizeof(st_index));
2277     is->video_stream = -1;
2278     is->audio_stream = -1;
2279     is->subtitle_stream = -1;
2280
2281     global_video_state = is;
2282
2283     ic = avformat_alloc_context();
2284     ic->interrupt_callback.callback = decode_interrupt_cb;
2285     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2286     if (err < 0) {
2287         print_error(is->filename, err);
2288         ret = -1;
2289         goto fail;
2290     }
2291     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2292         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2293         ret = AVERROR_OPTION_NOT_FOUND;
2294         goto fail;
2295     }
2296     is->ic = ic;
2297
2298     if (genpts)
2299         ic->flags |= AVFMT_FLAG_GENPTS;
2300
2301     opts = setup_find_stream_info_opts(ic, codec_opts);
2302     orig_nb_streams = ic->nb_streams;
2303
2304     err = avformat_find_stream_info(ic, opts);
2305     if (err < 0) {
2306         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2307         ret = -1;
2308         goto fail;
2309     }
2310     for (i = 0; i < orig_nb_streams; i++)
2311         av_dict_free(&opts[i]);
2312     av_freep(&opts);
2313
2314     if (ic->pb)
2315         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2316
2317     if (seek_by_bytes < 0)
2318         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2319
2320     /* if seeking requested, we execute it */
2321     if (start_time != AV_NOPTS_VALUE) {
2322         int64_t timestamp;
2323
2324         timestamp = start_time;
2325         /* add the stream start time */
2326         if (ic->start_time != AV_NOPTS_VALUE)
2327             timestamp += ic->start_time;
2328         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2329         if (ret < 0) {
2330             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2331                     is->filename, (double)timestamp / AV_TIME_BASE);
2332         }
2333     }
2334
2335     for (i = 0; i < ic->nb_streams; i++)
2336         ic->streams[i]->discard = AVDISCARD_ALL;
2337     if (!video_disable)
2338         st_index[AVMEDIA_TYPE_VIDEO] =
2339             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2340                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2341     if (!audio_disable)
2342         st_index[AVMEDIA_TYPE_AUDIO] =
2343             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2344                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2345                                 st_index[AVMEDIA_TYPE_VIDEO],
2346                                 NULL, 0);
2347     if (!video_disable)
2348         st_index[AVMEDIA_TYPE_SUBTITLE] =
2349             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2350                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2351                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2352                                  st_index[AVMEDIA_TYPE_AUDIO] :
2353                                  st_index[AVMEDIA_TYPE_VIDEO]),
2354                                 NULL, 0);
2355     if (show_status) {
2356         av_dump_format(ic, 0, is->filename, 0);
2357     }
2358
2359     /* open the streams */
2360     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2361         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2362     }
2363
2364     ret = -1;
2365     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2366         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2367     }
2368     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2369     if (ret < 0) {
2370         if (!display_disable)
2371             is->show_audio = 2;
2372     }
2373
2374     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2375         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2376     }
2377
2378     if (is->video_stream < 0 && is->audio_stream < 0) {
2379         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2380         ret = -1;
2381         goto fail;
2382     }
2383
2384     for (;;) {
2385         if (is->abort_request)
2386             break;
2387         if (is->paused != is->last_paused) {
2388             is->last_paused = is->paused;
2389             if (is->paused)
2390                 is->read_pause_return = av_read_pause(ic);
2391             else
2392                 av_read_play(ic);
2393         }
2394 #if CONFIG_RTSP_DEMUXER
2395         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2396             /* wait 10 ms to avoid trying to get another packet */
2397             /* XXX: horrible */
2398             SDL_Delay(10);
2399             continue;
2400         }
2401 #endif
2402         if (is->seek_req) {
2403             int64_t seek_target = is->seek_pos;
2404             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2405             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2406 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2407 //      of the seek_pos/seek_rel variables
2408
2409             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2410             if (ret < 0) {
2411                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2412             } else {
2413                 if (is->audio_stream >= 0) {
2414                     packet_queue_flush(&is->audioq);
2415                     packet_queue_put(&is->audioq, &flush_pkt);
2416                 }
2417                 if (is->subtitle_stream >= 0) {
2418                     packet_queue_flush(&is->subtitleq);
2419                     packet_queue_put(&is->subtitleq, &flush_pkt);
2420                 }
2421                 if (is->video_stream >= 0) {
2422                     packet_queue_flush(&is->videoq);
2423                     packet_queue_put(&is->videoq, &flush_pkt);
2424                 }
2425             }
2426             is->seek_req = 0;
2427             eof = 0;
2428         }
2429
2430         /* if the queue are full, no need to read more */
2431         if (!infinite_buffer &&
2432               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2433             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2434                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2435                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2436             /* wait 10 ms */
2437             SDL_Delay(10);
2438             continue;
2439         }
2440         if (eof) {
2441             if (is->video_stream >= 0) {
2442                 av_init_packet(pkt);
2443                 pkt->data = NULL;
2444                 pkt->size = 0;
2445                 pkt->stream_index = is->video_stream;
2446                 packet_queue_put(&is->videoq, pkt);
2447             }
2448             if (is->audio_stream >= 0 &&
2449                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2450                 av_init_packet(pkt);
2451                 pkt->data = NULL;
2452                 pkt->size = 0;
2453                 pkt->stream_index = is->audio_stream;
2454                 packet_queue_put(&is->audioq, pkt);
2455             }
2456             SDL_Delay(10);
2457             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2458                 if (loop != 1 && (!loop || --loop)) {
2459                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2460                 } else if (autoexit) {
2461                     ret = AVERROR_EOF;
2462                     goto fail;
2463                 }
2464             }
2465             continue;
2466         }
2467         ret = av_read_frame(ic, pkt);
2468         if (ret < 0) {
2469             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2470                 eof = 1;
2471             if (ic->pb && ic->pb->error)
2472                 break;
2473             SDL_Delay(100); /* wait for user event */
2474             continue;
2475         }
2476         /* check if packet is in play range specified by user, then queue, otherwise discard */
2477         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2478                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2479                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2480                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2481                 <= ((double)duration / 1000000);
2482         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2483             packet_queue_put(&is->audioq, pkt);
2484         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2485             packet_queue_put(&is->videoq, pkt);
2486         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2487             packet_queue_put(&is->subtitleq, pkt);
2488         } else {
2489             av_free_packet(pkt);
2490         }
2491     }
2492     /* wait until the end */
2493     while (!is->abort_request) {
2494         SDL_Delay(100);
2495     }
2496
2497     ret = 0;
2498  fail:
2499     /* disable interrupting */
2500     global_video_state = NULL;
2501
2502     /* close each stream */
2503     if (is->audio_stream >= 0)
2504         stream_component_close(is, is->audio_stream);
2505     if (is->video_stream >= 0)
2506         stream_component_close(is, is->video_stream);
2507     if (is->subtitle_stream >= 0)
2508         stream_component_close(is, is->subtitle_stream);
2509     if (is->ic) {
2510         avformat_close_input(&is->ic);
2511     }
2512
2513     if (ret != 0) {
2514         SDL_Event event;
2515
2516         event.type = FF_QUIT_EVENT;
2517         event.user.data1 = is;
2518         SDL_PushEvent(&event);
2519     }
2520     return 0;
2521 }
2522
2523 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2524 {
2525     VideoState *is;
2526
2527     is = av_mallocz(sizeof(VideoState));
2528     if (!is)
2529         return NULL;
2530     av_strlcpy(is->filename, filename, sizeof(is->filename));
2531     is->iformat = iformat;
2532     is->ytop    = 0;
2533     is->xleft   = 0;
2534
2535     /* start video display */
2536     is->pictq_mutex = SDL_CreateMutex();
2537     is->pictq_cond  = SDL_CreateCond();
2538
2539     is->subpq_mutex = SDL_CreateMutex();
2540     is->subpq_cond  = SDL_CreateCond();
2541
2542     is->av_sync_type = av_sync_type;
2543     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2544     if (!is->parse_tid) {
2545         av_free(is);
2546         return NULL;
2547     }
2548     return is;
2549 }
2550
2551 static void stream_cycle_channel(VideoState *is, int codec_type)
2552 {
2553     AVFormatContext *ic = is->ic;
2554     int start_index, stream_index;
2555     AVStream *st;
2556
2557     if (codec_type == AVMEDIA_TYPE_VIDEO)
2558         start_index = is->video_stream;
2559     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2560         start_index = is->audio_stream;
2561     else
2562         start_index = is->subtitle_stream;
2563     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2564         return;
2565     stream_index = start_index;
2566     for (;;) {
2567         if (++stream_index >= is->ic->nb_streams)
2568         {
2569             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2570             {
2571                 stream_index = -1;
2572                 goto the_end;
2573             } else
2574                 stream_index = 0;
2575         }
2576         if (stream_index == start_index)
2577             return;
2578         st = ic->streams[stream_index];
2579         if (st->codec->codec_type == codec_type) {
2580             /* check that parameters are OK */
2581             switch (codec_type) {
2582             case AVMEDIA_TYPE_AUDIO:
2583                 if (st->codec->sample_rate != 0 &&
2584                     st->codec->channels != 0)
2585                     goto the_end;
2586                 break;
2587             case AVMEDIA_TYPE_VIDEO:
2588             case AVMEDIA_TYPE_SUBTITLE:
2589                 goto the_end;
2590             default:
2591                 break;
2592             }
2593         }
2594     }
2595  the_end:
2596     stream_component_close(is, start_index);
2597     stream_component_open(is, stream_index);
2598 }
2599
2600
2601 static void toggle_full_screen(void)
2602 {
2603 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2604     /* OS X needs to empty the picture_queue */
2605     int i;
2606     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2607         cur_stream->pictq[i].reallocate = 1;
2608 #endif
2609     is_full_screen = !is_full_screen;
2610     video_open(cur_stream);
2611 }
2612
2613 static void toggle_pause(void)
2614 {
2615     if (cur_stream)
2616         stream_pause(cur_stream);
2617     step = 0;
2618 }
2619
2620 static void step_to_next_frame(void)
2621 {
2622     if (cur_stream) {
2623         /* if the stream is paused unpause it, then step */
2624         if (cur_stream->paused)
2625             stream_pause(cur_stream);
2626     }
2627     step = 1;
2628 }
2629
2630 static void toggle_audio_display(void)
2631 {
2632     if (cur_stream) {
2633         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2634         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2635         fill_rectangle(screen,
2636                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2637                        bgcolor);
2638         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2639     }
2640 }
2641
2642 /* handle an event sent by the GUI */
2643 static void event_loop(void)
2644 {
2645     SDL_Event event;
2646     double incr, pos, frac;
2647
2648     for (;;) {
2649         double x;
2650         SDL_WaitEvent(&event);
2651         switch (event.type) {
2652         case SDL_KEYDOWN:
2653             if (exit_on_keydown) {
2654                 do_exit();
2655                 break;
2656             }
2657             switch (event.key.keysym.sym) {
2658             case SDLK_ESCAPE:
2659             case SDLK_q:
2660                 do_exit();
2661                 break;
2662             case SDLK_f:
2663                 toggle_full_screen();
2664                 break;
2665             case SDLK_p:
2666             case SDLK_SPACE:
2667                 toggle_pause();
2668                 break;
2669             case SDLK_s: // S: Step to next frame
2670                 step_to_next_frame();
2671                 break;
2672             case SDLK_a:
2673                 if (cur_stream)
2674                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2675                 break;
2676             case SDLK_v:
2677                 if (cur_stream)
2678                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2679                 break;
2680             case SDLK_t:
2681                 if (cur_stream)
2682                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2683                 break;
2684             case SDLK_w:
2685                 toggle_audio_display();
2686                 break;
2687             case SDLK_LEFT:
2688                 incr = -10.0;
2689                 goto do_seek;
2690             case SDLK_RIGHT:
2691                 incr = 10.0;
2692                 goto do_seek;
2693             case SDLK_UP:
2694                 incr = 60.0;
2695                 goto do_seek;
2696             case SDLK_DOWN:
2697                 incr = -60.0;
2698             do_seek:
2699                 if (cur_stream) {
2700                     if (seek_by_bytes) {
2701                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2702                             pos = cur_stream->video_current_pos;
2703                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2704                             pos = cur_stream->audio_pkt.pos;
2705                         } else
2706                             pos = avio_tell(cur_stream->ic->pb);
2707                         if (cur_stream->ic->bit_rate)
2708                             incr *= cur_stream->ic->bit_rate / 8.0;
2709                         else
2710                             incr *= 180000.0;
2711                         pos += incr;
2712                         stream_seek(cur_stream, pos, incr, 1);
2713                     } else {
2714                         pos = get_master_clock(cur_stream);
2715                         pos += incr;
2716                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2717                     }
2718                 }
2719                 break;
2720             default:
2721                 break;
2722             }
2723             break;
2724         case SDL_MOUSEBUTTONDOWN:
2725             if (exit_on_mousedown) {
2726                 do_exit();
2727                 break;
2728             }
2729         case SDL_MOUSEMOTION:
2730             if (event.type == SDL_MOUSEBUTTONDOWN) {
2731                 x = event.button.x;
2732             } else {
2733                 if (event.motion.state != SDL_PRESSED)
2734                     break;
2735                 x = event.motion.x;
2736             }
2737             if (cur_stream) {
2738                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2739                     uint64_t size =  avio_size(cur_stream->ic->pb);
2740                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2741                 } else {
2742                     int64_t ts;
2743                     int ns, hh, mm, ss;
2744                     int tns, thh, tmm, tss;
2745                     tns  = cur_stream->ic->duration / 1000000LL;
2746                     thh  = tns / 3600;
2747                     tmm  = (tns % 3600) / 60;
2748                     tss  = (tns % 60);
2749                     frac = x / cur_stream->width;
2750                     ns   = frac * tns;
2751                     hh   = ns / 3600;
2752                     mm   = (ns % 3600) / 60;
2753                     ss   = (ns % 60);
2754                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2755                             hh, mm, ss, thh, tmm, tss);
2756                     ts = frac * cur_stream->ic->duration;
2757                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2758                         ts += cur_stream->ic->start_time;
2759                     stream_seek(cur_stream, ts, 0, 0);
2760                 }
2761             }
2762             break;
2763         case SDL_VIDEORESIZE:
2764             if (cur_stream) {
2765                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2766                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2767                 screen_width  = cur_stream->width  = event.resize.w;
2768                 screen_height = cur_stream->height = event.resize.h;
2769             }
2770             break;
2771         case SDL_QUIT:
2772         case FF_QUIT_EVENT:
2773             do_exit();
2774             break;
2775         case FF_ALLOC_EVENT:
2776             video_open(event.user.data1);
2777             alloc_picture(event.user.data1);
2778             break;
2779         case FF_REFRESH_EVENT:
2780             video_refresh_timer(event.user.data1);
2781             cur_stream->refresh = 0;
2782             break;
2783         default:
2784             break;
2785         }
2786     }
2787 }
2788
2789 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2790 {
2791     av_log(NULL, AV_LOG_ERROR,
2792            "Option '%s' has been removed, use private format options instead\n", opt);
2793     return AVERROR(EINVAL);
2794 }
2795
2796 static int opt_width(void *optctx, const char *opt, const char *arg)
2797 {
2798     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2799     return 0;
2800 }
2801
2802 static int opt_height(void *optctx, const char *opt, const char *arg)
2803 {
2804     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2805     return 0;
2806 }
2807
2808 static int opt_format(void *optctx, const char *opt, const char *arg)
2809 {
2810     file_iformat = av_find_input_format(arg);
2811     if (!file_iformat) {
2812         fprintf(stderr, "Unknown input format: %s\n", arg);
2813         return AVERROR(EINVAL);
2814     }
2815     return 0;
2816 }
2817
2818 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2819 {
2820     av_log(NULL, AV_LOG_ERROR,
2821            "Option '%s' has been removed, use private format options instead\n", opt);
2822     return AVERROR(EINVAL);
2823 }
2824
2825 static int opt_sync(void *optctx, const char *opt, const char *arg)
2826 {
2827     if (!strcmp(arg, "audio"))
2828         av_sync_type = AV_SYNC_AUDIO_MASTER;
2829     else if (!strcmp(arg, "video"))
2830         av_sync_type = AV_SYNC_VIDEO_MASTER;
2831     else if (!strcmp(arg, "ext"))
2832         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2833     else {
2834         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2835         exit(1);
2836     }
2837     return 0;
2838 }
2839
2840 static int opt_seek(void *optctx, const char *opt, const char *arg)
2841 {
2842     start_time = parse_time_or_die(opt, arg, 1);
2843     return 0;
2844 }
2845
2846 static int opt_duration(void *optctx, const char *opt, const char *arg)
2847 {
2848     duration = parse_time_or_die(opt, arg, 1);
2849     return 0;
2850 }
2851
2852 static int opt_debug(void *optctx, const char *opt, const char *arg)
2853 {
2854     av_log_set_level(99);
2855     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2856     return 0;
2857 }
2858
2859 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2860 {
2861     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2862     return 0;
2863 }
2864
2865 static const OptionDef options[] = {
2866 #include "cmdutils_common_opts.h"
2867     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2868     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2869     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2870     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2871     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2872     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2873     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2874     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2875     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2876     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2877     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2878     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2879     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2880     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2881     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2882     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2883     { "debug", HAS_ARG | OPT_EXPERT, { .func_arg = opt_debug }, "print specific debug info", "" },
2884     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2885     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2886     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2887     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2888     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2889     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2890     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2891     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2892     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2893     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2894     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2895     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2896     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2897     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2898     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2899     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2900     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2901     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2902 #if CONFIG_AVFILTER
2903     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2904 #endif
2905     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2906     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2907     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2908     { NULL, },
2909 };
2910
2911 static void show_usage(void)
2912 {
2913     printf("Simple media player\n");
2914     printf("usage: %s [options] input_file\n", program_name);
2915     printf("\n");
2916 }
2917
2918 void show_help_default(const char *opt, const char *arg)
2919 {
2920     av_log_set_callback(log_callback_help);
2921     show_usage();
2922     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2923     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2924     printf("\n");
2925     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2926     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2927 #if !CONFIG_AVFILTER
2928     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2929 #endif
2930     printf("\nWhile playing:\n"
2931            "q, ESC              quit\n"
2932            "f                   toggle full screen\n"
2933            "p, SPC              pause\n"
2934            "a                   cycle audio channel\n"
2935            "v                   cycle video channel\n"
2936            "t                   cycle subtitle channel\n"
2937            "w                   show audio waves\n"
2938            "s                   activate frame-step mode\n"
2939            "left/right          seek backward/forward 10 seconds\n"
2940            "down/up             seek backward/forward 1 minute\n"
2941            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2942            );
2943 }
2944
2945 static void opt_input_file(void *optctx, const char *filename)
2946 {
2947     if (input_filename) {
2948         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2949                 filename, input_filename);
2950         exit(1);
2951     }
2952     if (!strcmp(filename, "-"))
2953         filename = "pipe:";
2954     input_filename = filename;
2955 }
2956
2957 /* Called from the main */
2958 int main(int argc, char **argv)
2959 {
2960     int flags;
2961
2962     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2963     parse_loglevel(argc, argv, options);
2964
2965     /* register all codecs, demux and protocols */
2966     avcodec_register_all();
2967 #if CONFIG_AVDEVICE
2968     avdevice_register_all();
2969 #endif
2970 #if CONFIG_AVFILTER
2971     avfilter_register_all();
2972 #endif
2973     av_register_all();
2974     avformat_network_init();
2975
2976     init_opts();
2977
2978     show_banner();
2979
2980     parse_options(NULL, argc, argv, options, opt_input_file);
2981
2982     if (!input_filename) {
2983         show_usage();
2984         fprintf(stderr, "An input file must be specified\n");
2985         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2986         exit(1);
2987     }
2988
2989     if (display_disable) {
2990         video_disable = 1;
2991     }
2992     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2993 #if !defined(__MINGW32__) && !defined(__APPLE__)
2994     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2995 #endif
2996     if (SDL_Init (flags)) {
2997         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2998         exit(1);
2999     }
3000
3001     if (!display_disable) {
3002 #if HAVE_SDL_VIDEO_SIZE
3003         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3004         fs_screen_width = vi->current_w;
3005         fs_screen_height = vi->current_h;
3006 #endif
3007     }
3008
3009     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3010     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3011     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3012
3013     av_init_packet(&flush_pkt);
3014     flush_pkt.data = "FLUSH";
3015
3016     cur_stream = stream_open(input_filename, file_iformat);
3017
3018     event_loop();
3019
3020     /* never returns */
3021
3022     return 0;
3023 }