]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavutil/opt.h"
44 #include "libavcodec/avfft.h"
45 #include "libswresample/swresample.h"
46
47 #if CONFIG_AVFILTER
48 # include "libavfilter/avcodec.h"
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/avfiltergraph.h"
51 # include "libavfilter/buffersink.h"
52 #endif
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58
59 #include <unistd.h>
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     double duration;                             ///< expected duration of the frame
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *read_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int force_refresh;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
159     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
160     uint8_t *audio_buf;
161     uint8_t *audio_buf1;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     int audio_write_buf_size;
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     enum AVSampleFormat audio_tgt_fmt;
169     int audio_src_channels;
170     int audio_tgt_channels;
171     int64_t audio_src_channel_layout;
172     int64_t audio_tgt_channel_layout;
173     int audio_src_freq;
174     int audio_tgt_freq;
175     struct SwrContext *swr_ctx;
176     double audio_current_pts;
177     double audio_current_pts_drift;
178     int frame_drops_early;
179     int frame_drops_late;
180     AVFrame *frame;
181
182     enum ShowMode {
183         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
184     } show_mode;
185     int16_t sample_array[SAMPLE_ARRAY_SIZE];
186     int sample_array_index;
187     int last_i_start;
188     RDFTContext *rdft;
189     int rdft_bits;
190     FFTSample *rdft_data;
191     int xpos;
192
193     SDL_Thread *subtitle_tid;
194     int subtitle_stream;
195     int subtitle_stream_changed;
196     AVStream *subtitle_st;
197     PacketQueue subtitleq;
198     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
199     int subpq_size, subpq_rindex, subpq_windex;
200     SDL_mutex *subpq_mutex;
201     SDL_cond *subpq_cond;
202
203     double frame_timer;
204     double frame_last_pts;
205     double frame_last_duration;
206     double frame_last_dropped_pts;
207     double frame_last_returned_time;
208     double frame_last_filter_delay;
209     int64_t frame_last_dropped_pos;
210     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
211     int video_stream;
212     AVStream *video_st;
213     PacketQueue videoq;
214     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
215     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
216     int64_t video_current_pos;                   ///< current displayed file pos
217     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
218     int pictq_size, pictq_rindex, pictq_windex;
219     SDL_mutex *pictq_mutex;
220     SDL_cond *pictq_cond;
221 #if !CONFIG_AVFILTER
222     struct SwsContext *img_convert_ctx;
223 #endif
224
225     char filename[1024];
226     int width, height, xleft, ytop;
227     int step;
228
229 #if CONFIG_AVFILTER
230     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
231 #endif
232
233     int refresh;
234 } VideoState;
235
236 static int opt_help(const char *opt, const char *arg);
237
238 /* options specified by the user */
239 static AVInputFormat *file_iformat;
240 static const char *input_filename;
241 static const char *window_title;
242 static int fs_screen_width;
243 static int fs_screen_height;
244 static int screen_width  = 0;
245 static int screen_height = 0;
246 static int audio_disable;
247 static int video_disable;
248 static int wanted_stream[AVMEDIA_TYPE_NB] = {
249     [AVMEDIA_TYPE_AUDIO]    = -1,
250     [AVMEDIA_TYPE_VIDEO]    = -1,
251     [AVMEDIA_TYPE_SUBTITLE] = -1,
252 };
253 static int seek_by_bytes = -1;
254 static int display_disable;
255 static int show_status = 1;
256 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
257 static int64_t start_time = AV_NOPTS_VALUE;
258 static int64_t duration = AV_NOPTS_VALUE;
259 static int workaround_bugs = 1;
260 static int fast = 0;
261 static int genpts = 0;
262 static int lowres = 0;
263 static int idct = FF_IDCT_AUTO;
264 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
265 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
266 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
267 static int error_concealment = 3;
268 static int decoder_reorder_pts = -1;
269 static int autoexit;
270 static int exit_on_keydown;
271 static int exit_on_mousedown;
272 static int loop = 1;
273 static int framedrop = -1;
274 static enum ShowMode show_mode = SHOW_MODE_NONE;
275 static const char *audio_codec_name;
276 static const char *subtitle_codec_name;
277 static const char *video_codec_name;
278 static int rdftspeed = 20;
279 #if CONFIG_AVFILTER
280 static char *vfilters = NULL;
281 #endif
282
283 /* current context */
284 static int is_full_screen;
285 static int64_t audio_callback_time;
286
287 static AVPacket flush_pkt;
288
289 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
290 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
291 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
292
293 static SDL_Surface *screen;
294
295 void av_noreturn exit_program(int ret)
296 {
297     exit(ret);
298 }
299
300 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
301 {
302     AVPacketList *pkt1;
303
304     /* duplicate the packet */
305     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
306         return -1;
307
308     pkt1 = av_malloc(sizeof(AVPacketList));
309     if (!pkt1)
310         return -1;
311     pkt1->pkt = *pkt;
312     pkt1->next = NULL;
313
314
315     SDL_LockMutex(q->mutex);
316
317     if (!q->last_pkt)
318
319         q->first_pkt = pkt1;
320     else
321         q->last_pkt->next = pkt1;
322     q->last_pkt = pkt1;
323     q->nb_packets++;
324     q->size += pkt1->pkt.size + sizeof(*pkt1);
325     /* XXX: should duplicate packet data in DV case */
326     SDL_CondSignal(q->cond);
327
328     SDL_UnlockMutex(q->mutex);
329     return 0;
330 }
331
332 /* packet queue handling */
333 static void packet_queue_init(PacketQueue *q)
334 {
335     memset(q, 0, sizeof(PacketQueue));
336     q->mutex = SDL_CreateMutex();
337     q->cond = SDL_CreateCond();
338     packet_queue_put(q, &flush_pkt);
339 }
340
341 static void packet_queue_flush(PacketQueue *q)
342 {
343     AVPacketList *pkt, *pkt1;
344
345     SDL_LockMutex(q->mutex);
346     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
347         pkt1 = pkt->next;
348         av_free_packet(&pkt->pkt);
349         av_freep(&pkt);
350     }
351     q->last_pkt = NULL;
352     q->first_pkt = NULL;
353     q->nb_packets = 0;
354     q->size = 0;
355     SDL_UnlockMutex(q->mutex);
356 }
357
358 static void packet_queue_end(PacketQueue *q)
359 {
360     packet_queue_flush(q);
361     SDL_DestroyMutex(q->mutex);
362     SDL_DestroyCond(q->cond);
363 }
364
365 static void packet_queue_abort(PacketQueue *q)
366 {
367     SDL_LockMutex(q->mutex);
368
369     q->abort_request = 1;
370
371     SDL_CondSignal(q->cond);
372
373     SDL_UnlockMutex(q->mutex);
374 }
375
376 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
377 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
378 {
379     AVPacketList *pkt1;
380     int ret;
381
382     SDL_LockMutex(q->mutex);
383
384     for (;;) {
385         if (q->abort_request) {
386             ret = -1;
387             break;
388         }
389
390         pkt1 = q->first_pkt;
391         if (pkt1) {
392             q->first_pkt = pkt1->next;
393             if (!q->first_pkt)
394                 q->last_pkt = NULL;
395             q->nb_packets--;
396             q->size -= pkt1->pkt.size + sizeof(*pkt1);
397             *pkt = pkt1->pkt;
398             av_free(pkt1);
399             ret = 1;
400             break;
401         } else if (!block) {
402             ret = 0;
403             break;
404         } else {
405             SDL_CondWait(q->cond, q->mutex);
406         }
407     }
408     SDL_UnlockMutex(q->mutex);
409     return ret;
410 }
411
412 static inline void fill_rectangle(SDL_Surface *screen,
413                                   int x, int y, int w, int h, int color)
414 {
415     SDL_Rect rect;
416     rect.x = x;
417     rect.y = y;
418     rect.w = w;
419     rect.h = h;
420     SDL_FillRect(screen, &rect, color);
421 }
422
423 #define ALPHA_BLEND(a, oldp, newp, s)\
424 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
425
426 #define RGBA_IN(r, g, b, a, s)\
427 {\
428     unsigned int v = ((const uint32_t *)(s))[0];\
429     a = (v >> 24) & 0xff;\
430     r = (v >> 16) & 0xff;\
431     g = (v >> 8) & 0xff;\
432     b = v & 0xff;\
433 }
434
435 #define YUVA_IN(y, u, v, a, s, pal)\
436 {\
437     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
438     a = (val >> 24) & 0xff;\
439     y = (val >> 16) & 0xff;\
440     u = (val >> 8) & 0xff;\
441     v = val & 0xff;\
442 }
443
444 #define YUVA_OUT(d, y, u, v, a)\
445 {\
446     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
447 }
448
449
450 #define BPP 1
451
452 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
453 {
454     int wrap, wrap3, width2, skip2;
455     int y, u, v, a, u1, v1, a1, w, h;
456     uint8_t *lum, *cb, *cr;
457     const uint8_t *p;
458     const uint32_t *pal;
459     int dstx, dsty, dstw, dsth;
460
461     dstw = av_clip(rect->w, 0, imgw);
462     dsth = av_clip(rect->h, 0, imgh);
463     dstx = av_clip(rect->x, 0, imgw - dstw);
464     dsty = av_clip(rect->y, 0, imgh - dsth);
465     lum = dst->data[0] + dsty * dst->linesize[0];
466     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
467     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
468
469     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
470     skip2 = dstx >> 1;
471     wrap = dst->linesize[0];
472     wrap3 = rect->pict.linesize[0];
473     p = rect->pict.data[0];
474     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
475
476     if (dsty & 1) {
477         lum += dstx;
478         cb += skip2;
479         cr += skip2;
480
481         if (dstx & 1) {
482             YUVA_IN(y, u, v, a, p, pal);
483             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
485             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
486             cb++;
487             cr++;
488             lum++;
489             p += BPP;
490         }
491         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
492             YUVA_IN(y, u, v, a, p, pal);
493             u1 = u;
494             v1 = v;
495             a1 = a;
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497
498             YUVA_IN(y, u, v, a, p + BPP, pal);
499             u1 += u;
500             v1 += v;
501             a1 += a;
502             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
503             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
504             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
505             cb++;
506             cr++;
507             p += 2 * BPP;
508             lum += 2;
509         }
510         if (w) {
511             YUVA_IN(y, u, v, a, p, pal);
512             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
514             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
515             p++;
516             lum++;
517         }
518         p += wrap3 - dstw * BPP;
519         lum += wrap - dstw - dstx;
520         cb += dst->linesize[1] - width2 - skip2;
521         cr += dst->linesize[2] - width2 - skip2;
522     }
523     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
524         lum += dstx;
525         cb += skip2;
526         cr += skip2;
527
528         if (dstx & 1) {
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 = u;
531             v1 = v;
532             a1 = a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534             p += wrap3;
535             lum += wrap;
536             YUVA_IN(y, u, v, a, p, pal);
537             u1 += u;
538             v1 += v;
539             a1 += a;
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
542             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
543             cb++;
544             cr++;
545             p += -wrap3 + BPP;
546             lum += -wrap + 1;
547         }
548         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 = u;
551             v1 = v;
552             a1 = a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555             YUVA_IN(y, u, v, a, p + BPP, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560             p += wrap3;
561             lum += wrap;
562
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
569             YUVA_IN(y, u, v, a, p + BPP, pal);
570             u1 += u;
571             v1 += v;
572             a1 += a;
573             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
574
575             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
576             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
577
578             cb++;
579             cr++;
580             p += -wrap3 + 2 * BPP;
581             lum += -wrap + 2;
582         }
583         if (w) {
584             YUVA_IN(y, u, v, a, p, pal);
585             u1 = u;
586             v1 = v;
587             a1 = a;
588             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
589             p += wrap3;
590             lum += wrap;
591             YUVA_IN(y, u, v, a, p, pal);
592             u1 += u;
593             v1 += v;
594             a1 += a;
595             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
597             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
598             cb++;
599             cr++;
600             p += -wrap3 + BPP;
601             lum += -wrap + 1;
602         }
603         p += wrap3 + (wrap3 - dstw * BPP);
604         lum += wrap + (wrap - dstw - dstx);
605         cb += dst->linesize[1] - width2 - skip2;
606         cr += dst->linesize[2] - width2 - skip2;
607     }
608     /* handle odd height */
609     if (h) {
610         lum += dstx;
611         cb += skip2;
612         cr += skip2;
613
614         if (dstx & 1) {
615             YUVA_IN(y, u, v, a, p, pal);
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
618             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
619             cb++;
620             cr++;
621             lum++;
622             p += BPP;
623         }
624         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
625             YUVA_IN(y, u, v, a, p, pal);
626             u1 = u;
627             v1 = v;
628             a1 = a;
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630
631             YUVA_IN(y, u, v, a, p + BPP, pal);
632             u1 += u;
633             v1 += v;
634             a1 += a;
635             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
636             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
637             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
638             cb++;
639             cr++;
640             p += 2 * BPP;
641             lum += 2;
642         }
643         if (w) {
644             YUVA_IN(y, u, v, a, p, pal);
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
647             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
648         }
649     }
650 }
651
652 static void free_subpicture(SubPicture *sp)
653 {
654     avsubtitle_free(&sp->sub);
655 }
656
657 static void video_image_display(VideoState *is)
658 {
659     VideoPicture *vp;
660     SubPicture *sp;
661     AVPicture pict;
662     float aspect_ratio;
663     int width, height, x, y;
664     SDL_Rect rect;
665     int i;
666
667     vp = &is->pictq[is->pictq_rindex];
668     if (vp->bmp) {
669 #if CONFIG_AVFILTER
670          if (vp->picref->video->sample_aspect_ratio.num == 0)
671              aspect_ratio = 0;
672          else
673              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
674 #else
675
676         /* XXX: use variable in the frame */
677         if (is->video_st->sample_aspect_ratio.num)
678             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
679         else if (is->video_st->codec->sample_aspect_ratio.num)
680             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
681         else
682             aspect_ratio = 0;
683 #endif
684         if (aspect_ratio <= 0.0)
685             aspect_ratio = 1.0;
686         aspect_ratio *= (float)vp->width / (float)vp->height;
687
688         if (is->subtitle_st) {
689             if (is->subpq_size > 0) {
690                 sp = &is->subpq[is->subpq_rindex];
691
692                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
693                     SDL_LockYUVOverlay (vp->bmp);
694
695                     pict.data[0] = vp->bmp->pixels[0];
696                     pict.data[1] = vp->bmp->pixels[2];
697                     pict.data[2] = vp->bmp->pixels[1];
698
699                     pict.linesize[0] = vp->bmp->pitches[0];
700                     pict.linesize[1] = vp->bmp->pitches[2];
701                     pict.linesize[2] = vp->bmp->pitches[1];
702
703                     for (i = 0; i < sp->sub.num_rects; i++)
704                         blend_subrect(&pict, sp->sub.rects[i],
705                                       vp->bmp->w, vp->bmp->h);
706
707                     SDL_UnlockYUVOverlay (vp->bmp);
708                 }
709             }
710         }
711
712
713         /* XXX: we suppose the screen has a 1.0 pixel ratio */
714         height = is->height;
715         width = ((int)rint(height * aspect_ratio)) & ~1;
716         if (width > is->width) {
717             width = is->width;
718             height = ((int)rint(width / aspect_ratio)) & ~1;
719         }
720         x = (is->width - width) / 2;
721         y = (is->height - height) / 2;
722         is->no_background = 0;
723         rect.x = is->xleft + x;
724         rect.y = is->ytop  + y;
725         rect.w = FFMAX(width,  1);
726         rect.h = FFMAX(height, 1);
727         SDL_DisplayYUVOverlay(vp->bmp, &rect);
728     }
729 }
730
731 static inline int compute_mod(int a, int b)
732 {
733     return a < 0 ? a%b + b : a%b;
734 }
735
736 static void video_audio_display(VideoState *s)
737 {
738     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
739     int ch, channels, h, h2, bgcolor, fgcolor;
740     int16_t time_diff;
741     int rdft_bits, nb_freq;
742
743     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
744         ;
745     nb_freq = 1 << (rdft_bits - 1);
746
747     /* compute display index : center on currently output samples */
748     channels = s->audio_tgt_channels;
749     nb_display_channels = channels;
750     if (!s->paused) {
751         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
752         n = 2 * channels;
753         delay = s->audio_write_buf_size;
754         delay /= n;
755
756         /* to be more precise, we take into account the time spent since
757            the last buffer computation */
758         if (audio_callback_time) {
759             time_diff = av_gettime() - audio_callback_time;
760             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
761         }
762
763         delay += 2 * data_used;
764         if (delay < data_used)
765             delay = data_used;
766
767         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
768         if (s->show_mode == SHOW_MODE_WAVES) {
769             h = INT_MIN;
770             for (i = 0; i < 1000; i += channels) {
771                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
772                 int a = s->sample_array[idx];
773                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
774                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
775                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
776                 int score = a - d;
777                 if (h < score && (b ^ c) < 0) {
778                     h = score;
779                     i_start = idx;
780                 }
781             }
782         }
783
784         s->last_i_start = i_start;
785     } else {
786         i_start = s->last_i_start;
787     }
788
789     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
790     if (s->show_mode == SHOW_MODE_WAVES) {
791         fill_rectangle(screen,
792                        s->xleft, s->ytop, s->width, s->height,
793                        bgcolor);
794
795         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797         /* total height for one channel */
798         h = s->height / nb_display_channels;
799         /* graph height / 2 */
800         h2 = (h * 9) / 20;
801         for (ch = 0; ch < nb_display_channels; ch++) {
802             i = i_start + ch;
803             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804             for (x = 0; x < s->width; x++) {
805                 y = (s->sample_array[i] * h2) >> 15;
806                 if (y < 0) {
807                     y = -y;
808                     ys = y1 - y;
809                 } else {
810                     ys = y1;
811                 }
812                 fill_rectangle(screen,
813                                s->xleft + x, ys, 1, y,
814                                fgcolor);
815                 i += channels;
816                 if (i >= SAMPLE_ARRAY_SIZE)
817                     i -= SAMPLE_ARRAY_SIZE;
818             }
819         }
820
821         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823         for (ch = 1; ch < nb_display_channels; ch++) {
824             y = s->ytop + ch * h;
825             fill_rectangle(screen,
826                            s->xleft, y, s->width, 1,
827                            fgcolor);
828         }
829         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830     } else {
831         nb_display_channels= FFMIN(nb_display_channels, 2);
832         if (rdft_bits != s->rdft_bits) {
833             av_rdft_end(s->rdft);
834             av_free(s->rdft_data);
835             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
836             s->rdft_bits = rdft_bits;
837             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
838         }
839         {
840             FFTSample *data[2];
841             for (ch = 0; ch < nb_display_channels; ch++) {
842                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
843                 i = i_start + ch;
844                 for (x = 0; x < 2 * nb_freq; x++) {
845                     double w = (x-nb_freq) * (1.0 / nb_freq);
846                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
847                     i += channels;
848                     if (i >= SAMPLE_ARRAY_SIZE)
849                         i -= SAMPLE_ARRAY_SIZE;
850                 }
851                 av_rdft_calc(s->rdft, data[ch]);
852             }
853             // least efficient way to do this, we should of course directly access it but its more than fast enough
854             for (y = 0; y < s->height; y++) {
855                 double w = 1 / sqrt(nb_freq);
856                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
857                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
858                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
859                 a = FFMIN(a, 255);
860                 b = FFMIN(b, 255);
861                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
862
863                 fill_rectangle(screen,
864                             s->xpos, s->height-y, 1, 1,
865                             fgcolor);
866             }
867         }
868         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
869         if (!s->paused)
870             s->xpos++;
871         if (s->xpos >= s->width)
872             s->xpos= s->xleft;
873     }
874 }
875
876 static void stream_close(VideoState *is)
877 {
878     VideoPicture *vp;
879     int i;
880     /* XXX: use a special url_shutdown call to abort parse cleanly */
881     is->abort_request = 1;
882     SDL_WaitThread(is->read_tid, NULL);
883     SDL_WaitThread(is->refresh_tid, NULL);
884
885     /* free all pictures */
886     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
887         vp = &is->pictq[i];
888 #if CONFIG_AVFILTER
889         if (vp->picref) {
890             avfilter_unref_buffer(vp->picref);
891             vp->picref = NULL;
892         }
893 #endif
894         if (vp->bmp) {
895             SDL_FreeYUVOverlay(vp->bmp);
896             vp->bmp = NULL;
897         }
898     }
899     SDL_DestroyMutex(is->pictq_mutex);
900     SDL_DestroyCond(is->pictq_cond);
901     SDL_DestroyMutex(is->subpq_mutex);
902     SDL_DestroyCond(is->subpq_cond);
903 #if !CONFIG_AVFILTER
904     if (is->img_convert_ctx)
905         sws_freeContext(is->img_convert_ctx);
906 #endif
907     av_free(is);
908 }
909
910 static void do_exit(VideoState *is)
911 {
912     if (is) {
913         stream_close(is);
914     }
915     av_lockmgr_register(NULL);
916     uninit_opts();
917 #if CONFIG_AVFILTER
918     avfilter_uninit();
919 #endif
920     avformat_network_deinit();
921     if (show_status)
922         printf("\n");
923     SDL_Quit();
924     av_log(NULL, AV_LOG_QUIET, "%s", "");
925     exit(0);
926 }
927
928 static void sigterm_handler(int sig)
929 {
930     exit(123);
931 }
932
933 static int video_open(VideoState *is, int force_set_video_mode)
934 {
935     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
936     int w,h;
937
938     if (is_full_screen) flags |= SDL_FULLSCREEN;
939     else                flags |= SDL_RESIZABLE;
940
941     if (is_full_screen && fs_screen_width) {
942         w = fs_screen_width;
943         h = fs_screen_height;
944     } else if (!is_full_screen && screen_width) {
945         w = screen_width;
946         h = screen_height;
947 #if CONFIG_AVFILTER
948     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
949         w = is->out_video_filter->inputs[0]->w;
950         h = is->out_video_filter->inputs[0]->h;
951 #else
952     } else if (is->video_st && is->video_st->codec->width) {
953         w = is->video_st->codec->width;
954         h = is->video_st->codec->height;
955 #endif
956     } else {
957         w = 640;
958         h = 480;
959     }
960     if (screen && is->width == screen->w && screen->w == w
961        && is->height== screen->h && screen->h == h && !force_set_video_mode)
962         return 0;
963     screen = SDL_SetVideoMode(w, h, 0, flags);
964     if (!screen) {
965         fprintf(stderr, "SDL: could not set video mode - exiting\n");
966         do_exit(is);
967     }
968     if (!window_title)
969         window_title = input_filename;
970     SDL_WM_SetCaption(window_title, window_title);
971
972     is->width  = screen->w;
973     is->height = screen->h;
974
975     return 0;
976 }
977
978 /* display the current picture, if any */
979 static void video_display(VideoState *is)
980 {
981     if (!screen)
982         video_open(is, 0);
983     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
984         video_audio_display(is);
985     else if (is->video_st)
986         video_image_display(is);
987 }
988
989 static int refresh_thread(void *opaque)
990 {
991     VideoState *is= opaque;
992     while (!is->abort_request) {
993         SDL_Event event;
994         event.type = FF_REFRESH_EVENT;
995         event.user.data1 = opaque;
996         if (!is->refresh && (!is->paused || is->force_refresh)) {
997             is->refresh = 1;
998             SDL_PushEvent(&event);
999         }
1000         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1001         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1002     }
1003     return 0;
1004 }
1005
1006 /* get the current audio clock value */
1007 static double get_audio_clock(VideoState *is)
1008 {
1009     if (is->paused) {
1010         return is->audio_current_pts;
1011     } else {
1012         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1013     }
1014 }
1015
1016 /* get the current video clock value */
1017 static double get_video_clock(VideoState *is)
1018 {
1019     if (is->paused) {
1020         return is->video_current_pts;
1021     } else {
1022         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1023     }
1024 }
1025
1026 /* get the current external clock value */
1027 static double get_external_clock(VideoState *is)
1028 {
1029     int64_t ti;
1030     ti = av_gettime();
1031     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1032 }
1033
1034 /* get the current master clock value */
1035 static double get_master_clock(VideoState *is)
1036 {
1037     double val;
1038
1039     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1040         if (is->video_st)
1041             val = get_video_clock(is);
1042         else
1043             val = get_audio_clock(is);
1044     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1045         if (is->audio_st)
1046             val = get_audio_clock(is);
1047         else
1048             val = get_video_clock(is);
1049     } else {
1050         val = get_external_clock(is);
1051     }
1052     return val;
1053 }
1054
1055 /* seek in the stream */
1056 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1057 {
1058     if (!is->seek_req) {
1059         is->seek_pos = pos;
1060         is->seek_rel = rel;
1061         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1062         if (seek_by_bytes)
1063             is->seek_flags |= AVSEEK_FLAG_BYTE;
1064         is->seek_req = 1;
1065     }
1066 }
1067
1068 /* pause or resume the video */
1069 static void stream_toggle_pause(VideoState *is)
1070 {
1071     if (is->paused) {
1072         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1073         if (is->read_pause_return != AVERROR(ENOSYS)) {
1074             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1075         }
1076         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1077     }
1078     is->paused = !is->paused;
1079 }
1080
1081 static double compute_target_delay(double delay, VideoState *is)
1082 {
1083     double sync_threshold, diff;
1084
1085     /* update delay to follow master synchronisation source */
1086     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1087          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1088         /* if video is slave, we try to correct big delays by
1089            duplicating or deleting a frame */
1090         diff = get_video_clock(is) - get_master_clock(is);
1091
1092         /* skip or repeat frame. We take into account the
1093            delay to compute the threshold. I still don't know
1094            if it is the best guess */
1095         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1096         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1097             if (diff <= -sync_threshold)
1098                 delay = 0;
1099             else if (diff >= sync_threshold)
1100                 delay = 2 * delay;
1101         }
1102     }
1103
1104     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1105             delay, -diff);
1106
1107     return delay;
1108 }
1109
1110 static void pictq_next_picture(VideoState *is) {
1111     /* update queue size and signal for next picture */
1112     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113         is->pictq_rindex = 0;
1114
1115     SDL_LockMutex(is->pictq_mutex);
1116     is->pictq_size--;
1117     SDL_CondSignal(is->pictq_cond);
1118     SDL_UnlockMutex(is->pictq_mutex);
1119 }
1120
1121 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1122     double time = av_gettime() / 1000000.0;
1123     /* update current video pts */
1124     is->video_current_pts = pts;
1125     is->video_current_pts_drift = is->video_current_pts - time;
1126     is->video_current_pos = pos;
1127     is->frame_last_pts = pts;
1128 }
1129
1130 /* called to display each frame */
1131 static void video_refresh(void *opaque)
1132 {
1133     VideoState *is = opaque;
1134     VideoPicture *vp;
1135     double time;
1136
1137     SubPicture *sp, *sp2;
1138
1139     if (is->video_st) {
1140 retry:
1141         if (is->pictq_size == 0) {
1142             SDL_LockMutex(is->pictq_mutex);
1143             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1144                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1145                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1146             }
1147             SDL_UnlockMutex(is->pictq_mutex);
1148             // nothing to do, no picture to display in the que
1149         } else {
1150             double last_duration, duration, delay;
1151             /* dequeue the picture */
1152             vp = &is->pictq[is->pictq_rindex];
1153
1154             if (vp->skip) {
1155                 pictq_next_picture(is);
1156                 goto retry;
1157             }
1158
1159             if (is->paused)
1160                 goto display;
1161
1162             /* compute nominal last_duration */
1163             last_duration = vp->pts - is->frame_last_pts;
1164             if (last_duration > 0 && last_duration < 10.0) {
1165                 /* if duration of the last frame was sane, update last_duration in video state */
1166                 is->frame_last_duration = last_duration;
1167             }
1168             delay = compute_target_delay(is->frame_last_duration, is);
1169
1170             time= av_gettime()/1000000.0;
1171             if (time < is->frame_timer + delay)
1172                 return;
1173
1174             if (delay > 0)
1175                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1176
1177             SDL_LockMutex(is->pictq_mutex);
1178             update_video_pts(is, vp->pts, vp->pos);
1179             SDL_UnlockMutex(is->pictq_mutex);
1180
1181             if (is->pictq_size > 1) {
1182                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1183                 duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1184             } else {
1185                 duration = vp->duration;
1186             }
1187
1188             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1189                 if(is->pictq_size > 1){
1190                     is->frame_drops_late++;
1191                     pictq_next_picture(is);
1192                     goto retry;
1193                 }
1194             }
1195
1196             if (is->subtitle_st) {
1197                 if (is->subtitle_stream_changed) {
1198                     SDL_LockMutex(is->subpq_mutex);
1199
1200                     while (is->subpq_size) {
1201                         free_subpicture(&is->subpq[is->subpq_rindex]);
1202
1203                         /* update queue size and signal for next picture */
1204                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1205                             is->subpq_rindex = 0;
1206
1207                         is->subpq_size--;
1208                     }
1209                     is->subtitle_stream_changed = 0;
1210
1211                     SDL_CondSignal(is->subpq_cond);
1212                     SDL_UnlockMutex(is->subpq_mutex);
1213                 } else {
1214                     if (is->subpq_size > 0) {
1215                         sp = &is->subpq[is->subpq_rindex];
1216
1217                         if (is->subpq_size > 1)
1218                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1219                         else
1220                             sp2 = NULL;
1221
1222                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1223                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1224                         {
1225                             free_subpicture(sp);
1226
1227                             /* update queue size and signal for next picture */
1228                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1229                                 is->subpq_rindex = 0;
1230
1231                             SDL_LockMutex(is->subpq_mutex);
1232                             is->subpq_size--;
1233                             SDL_CondSignal(is->subpq_cond);
1234                             SDL_UnlockMutex(is->subpq_mutex);
1235                         }
1236                     }
1237                 }
1238             }
1239
1240 display:
1241             /* display picture */
1242             if (!display_disable)
1243                 video_display(is);
1244
1245             if (!is->paused)
1246                 pictq_next_picture(is);
1247         }
1248     } else if (is->audio_st) {
1249         /* draw the next audio frame */
1250
1251         /* if only audio stream, then display the audio bars (better
1252            than nothing, just to test the implementation */
1253
1254         /* display picture */
1255         if (!display_disable)
1256             video_display(is);
1257     }
1258     is->force_refresh = 0;
1259     if (show_status) {
1260         static int64_t last_time;
1261         int64_t cur_time;
1262         int aqsize, vqsize, sqsize;
1263         double av_diff;
1264
1265         cur_time = av_gettime();
1266         if (!last_time || (cur_time - last_time) >= 30000) {
1267             aqsize = 0;
1268             vqsize = 0;
1269             sqsize = 0;
1270             if (is->audio_st)
1271                 aqsize = is->audioq.size;
1272             if (is->video_st)
1273                 vqsize = is->videoq.size;
1274             if (is->subtitle_st)
1275                 sqsize = is->subtitleq.size;
1276             av_diff = 0;
1277             if (is->audio_st && is->video_st)
1278                 av_diff = get_audio_clock(is) - get_video_clock(is);
1279             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1280                    get_master_clock(is),
1281                    av_diff,
1282                    is->frame_drops_early + is->frame_drops_late,
1283                    aqsize / 1024,
1284                    vqsize / 1024,
1285                    sqsize,
1286                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1287                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1288             fflush(stdout);
1289             last_time = cur_time;
1290         }
1291     }
1292 }
1293
1294 /* allocate a picture (needs to do that in main thread to avoid
1295    potential locking problems */
1296 static void alloc_picture(void *opaque)
1297 {
1298     VideoState *is = opaque;
1299     VideoPicture *vp;
1300
1301     vp = &is->pictq[is->pictq_windex];
1302
1303     if (vp->bmp)
1304         SDL_FreeYUVOverlay(vp->bmp);
1305
1306 #if CONFIG_AVFILTER
1307     if (vp->picref)
1308         avfilter_unref_buffer(vp->picref);
1309     vp->picref = NULL;
1310
1311     vp->width   = is->out_video_filter->inputs[0]->w;
1312     vp->height  = is->out_video_filter->inputs[0]->h;
1313     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1314 #else
1315     vp->width   = is->video_st->codec->width;
1316     vp->height  = is->video_st->codec->height;
1317     vp->pix_fmt = is->video_st->codec->pix_fmt;
1318 #endif
1319
1320     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1321                                    SDL_YV12_OVERLAY,
1322                                    screen);
1323     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1324         /* SDL allocates a buffer smaller than requested if the video
1325          * overlay hardware is unable to support the requested size. */
1326         fprintf(stderr, "Error: the video system does not support an image\n"
1327                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1328                         "to reduce the image size.\n", vp->width, vp->height );
1329         do_exit(is);
1330     }
1331
1332     SDL_LockMutex(is->pictq_mutex);
1333     vp->allocated = 1;
1334     SDL_CondSignal(is->pictq_cond);
1335     SDL_UnlockMutex(is->pictq_mutex);
1336 }
1337
1338 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1339 {
1340     VideoPicture *vp;
1341     double frame_delay, pts = pts1;
1342
1343     /* compute the exact PTS for the picture if it is omitted in the stream
1344      * pts1 is the dts of the pkt / pts of the frame */
1345     if (pts != 0) {
1346         /* update video clock with pts, if present */
1347         is->video_clock = pts;
1348     } else {
1349         pts = is->video_clock;
1350     }
1351     /* update video clock for next frame */
1352     frame_delay = av_q2d(is->video_st->codec->time_base);
1353     /* for MPEG2, the frame can be repeated, so we update the
1354        clock accordingly */
1355     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1356     is->video_clock += frame_delay;
1357
1358 #if defined(DEBUG_SYNC) && 0
1359     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1360            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1361 #endif
1362
1363     /* wait until we have space to put a new picture */
1364     SDL_LockMutex(is->pictq_mutex);
1365
1366     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1367            !is->videoq.abort_request) {
1368         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1369     }
1370     SDL_UnlockMutex(is->pictq_mutex);
1371
1372     if (is->videoq.abort_request)
1373         return -1;
1374
1375     vp = &is->pictq[is->pictq_windex];
1376
1377     vp->duration = frame_delay;
1378
1379     /* alloc or resize hardware picture buffer */
1380     if (!vp->bmp || vp->reallocate ||
1381 #if CONFIG_AVFILTER
1382         vp->width  != is->out_video_filter->inputs[0]->w ||
1383         vp->height != is->out_video_filter->inputs[0]->h) {
1384 #else
1385         vp->width != is->video_st->codec->width ||
1386         vp->height != is->video_st->codec->height) {
1387 #endif
1388         SDL_Event event;
1389
1390         vp->allocated  = 0;
1391         vp->reallocate = 0;
1392
1393         /* the allocation must be done in the main thread to avoid
1394            locking problems */
1395         event.type = FF_ALLOC_EVENT;
1396         event.user.data1 = is;
1397         SDL_PushEvent(&event);
1398
1399         /* wait until the picture is allocated */
1400         SDL_LockMutex(is->pictq_mutex);
1401         while (!vp->allocated && !is->videoq.abort_request) {
1402             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1403         }
1404         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1405         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1406             while (!vp->allocated) {
1407                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408             }
1409         }
1410         SDL_UnlockMutex(is->pictq_mutex);
1411
1412         if (is->videoq.abort_request)
1413             return -1;
1414     }
1415
1416     /* if the frame is not skipped, then display it */
1417     if (vp->bmp) {
1418         AVPicture pict = { { 0 } };
1419 #if CONFIG_AVFILTER
1420         if (vp->picref)
1421             avfilter_unref_buffer(vp->picref);
1422         vp->picref = src_frame->opaque;
1423 #endif
1424
1425         /* get a pointer on the bitmap */
1426         SDL_LockYUVOverlay (vp->bmp);
1427
1428         pict.data[0] = vp->bmp->pixels[0];
1429         pict.data[1] = vp->bmp->pixels[2];
1430         pict.data[2] = vp->bmp->pixels[1];
1431
1432         pict.linesize[0] = vp->bmp->pitches[0];
1433         pict.linesize[1] = vp->bmp->pitches[2];
1434         pict.linesize[2] = vp->bmp->pitches[1];
1435
1436 #if CONFIG_AVFILTER
1437         // FIXME use direct rendering
1438         av_picture_copy(&pict, (AVPicture *)src_frame,
1439                         vp->pix_fmt, vp->width, vp->height);
1440 #else
1441         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1442         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1443             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1444             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1445         if (is->img_convert_ctx == NULL) {
1446             fprintf(stderr, "Cannot initialize the conversion context\n");
1447             exit(1);
1448         }
1449         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1450                   0, vp->height, pict.data, pict.linesize);
1451 #endif
1452         /* update the bitmap content */
1453         SDL_UnlockYUVOverlay(vp->bmp);
1454
1455         vp->pts = pts;
1456         vp->pos = pos;
1457         vp->skip = 0;
1458
1459         /* now we can update the picture count */
1460         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1461             is->pictq_windex = 0;
1462         SDL_LockMutex(is->pictq_mutex);
1463         is->pictq_size++;
1464         SDL_UnlockMutex(is->pictq_mutex);
1465     }
1466     return 0;
1467 }
1468
1469 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1470 {
1471     int got_picture, i;
1472
1473     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1474         return -1;
1475
1476     if (pkt->data == flush_pkt.data) {
1477         avcodec_flush_buffers(is->video_st->codec);
1478
1479         SDL_LockMutex(is->pictq_mutex);
1480         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1481         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1482             is->pictq[i].skip = 1;
1483         }
1484         while (is->pictq_size && !is->videoq.abort_request) {
1485             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1486         }
1487         is->video_current_pos = -1;
1488         is->frame_last_pts = AV_NOPTS_VALUE;
1489         is->frame_last_duration = 0;
1490         is->frame_timer = (double)av_gettime() / 1000000.0;
1491         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1492         SDL_UnlockMutex(is->pictq_mutex);
1493
1494         return 0;
1495     }
1496
1497     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1498
1499     if (got_picture) {
1500         int ret = 1;
1501
1502         if (decoder_reorder_pts == -1) {
1503             *pts = av_frame_get_best_effort_timestamp(frame);
1504         } else if (decoder_reorder_pts) {
1505             *pts = frame->pkt_pts;
1506         } else {
1507             *pts = frame->pkt_dts;
1508         }
1509
1510         if (*pts == AV_NOPTS_VALUE) {
1511             *pts = 0;
1512         }
1513
1514         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1515              (framedrop>0 || (framedrop && is->audio_st))) {
1516             SDL_LockMutex(is->pictq_mutex);
1517             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1518                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1519                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1520                 double ptsdiff = dpts - is->frame_last_pts;
1521                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1522                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1523                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1524                     is->frame_last_dropped_pos = pkt->pos;
1525                     is->frame_last_dropped_pts = dpts;
1526                     is->frame_drops_early++;
1527                     ret = 0;
1528                 }
1529             }
1530             SDL_UnlockMutex(is->pictq_mutex);
1531         }
1532
1533         if (ret)
1534             is->frame_last_returned_time = av_gettime() / 1000000.0;
1535
1536         return ret;
1537     }
1538     return 0;
1539 }
1540
1541 #if CONFIG_AVFILTER
1542 typedef struct {
1543     VideoState *is;
1544     AVFrame *frame;
1545     int use_dr1;
1546 } FilterPriv;
1547
1548 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1549 {
1550     AVFilterContext *ctx = codec->opaque;
1551     AVFilterBufferRef  *ref;
1552     int perms = AV_PERM_WRITE;
1553     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1554     unsigned edge;
1555     int pixel_size;
1556
1557     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1558
1559     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1560         perms |= AV_PERM_NEG_LINESIZES;
1561
1562     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1563         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1564         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1565         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1566     }
1567     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1568
1569     w = codec->width;
1570     h = codec->height;
1571
1572     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1573         return -1;
1574
1575     avcodec_align_dimensions2(codec, &w, &h, stride);
1576     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1577     w += edge << 1;
1578     h += edge << 1;
1579     if (codec->pix_fmt != ctx->outputs[0]->format) {
1580         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1581         return -1;
1582     }
1583     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1584         return -1;
1585
1586     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1587     ref->video->w = codec->width;
1588     ref->video->h = codec->height;
1589     for (i = 0; i < 4; i ++) {
1590         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1591         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1592
1593         if (ref->data[i]) {
1594             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1595         }
1596         pic->data[i]     = ref->data[i];
1597         pic->linesize[i] = ref->linesize[i];
1598     }
1599     pic->opaque = ref;
1600     pic->type   = FF_BUFFER_TYPE_USER;
1601     pic->reordered_opaque = codec->reordered_opaque;
1602     pic->width               = codec->width;
1603     pic->height              = codec->height;
1604     pic->format              = codec->pix_fmt;
1605     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1606     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1607     else            pic->pkt_pts = AV_NOPTS_VALUE;
1608     return 0;
1609 }
1610
1611 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1612 {
1613     memset(pic->data, 0, sizeof(pic->data));
1614     avfilter_unref_buffer(pic->opaque);
1615 }
1616
1617 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1618 {
1619     AVFilterBufferRef *ref = pic->opaque;
1620
1621     if (pic->data[0] == NULL) {
1622         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1623         return codec->get_buffer(codec, pic);
1624     }
1625
1626     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1627         (codec->pix_fmt != ref->format)) {
1628         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1629         return -1;
1630     }
1631
1632     pic->reordered_opaque = codec->reordered_opaque;
1633     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1634     else            pic->pkt_pts = AV_NOPTS_VALUE;
1635     return 0;
1636 }
1637
1638 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1639 {
1640     FilterPriv *priv = ctx->priv;
1641     AVCodecContext *codec;
1642     if (!opaque) return -1;
1643
1644     priv->is = opaque;
1645     codec    = priv->is->video_st->codec;
1646     codec->opaque = ctx;
1647     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1648         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1649         priv->use_dr1 = 1;
1650         codec->get_buffer     = input_get_buffer;
1651         codec->release_buffer = input_release_buffer;
1652         codec->reget_buffer   = input_reget_buffer;
1653         codec->thread_safe_callbacks = 1;
1654     }
1655
1656     priv->frame = avcodec_alloc_frame();
1657
1658     return 0;
1659 }
1660
1661 static void input_uninit(AVFilterContext *ctx)
1662 {
1663     FilterPriv *priv = ctx->priv;
1664     av_free(priv->frame);
1665 }
1666
1667 static int input_request_frame(AVFilterLink *link)
1668 {
1669     FilterPriv *priv = link->src->priv;
1670     AVFilterBufferRef *picref;
1671     int64_t pts = 0;
1672     AVPacket pkt;
1673     int ret;
1674
1675     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1676         av_free_packet(&pkt);
1677     if (ret < 0)
1678         return -1;
1679
1680     if (priv->use_dr1 && priv->frame->opaque) {
1681         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1682     } else {
1683         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1684         av_image_copy(picref->data, picref->linesize,
1685                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1686                       picref->format, link->w, link->h);
1687     }
1688     av_free_packet(&pkt);
1689
1690     avfilter_copy_frame_props(picref, priv->frame);
1691     picref->pts = pts;
1692
1693     avfilter_start_frame(link, picref);
1694     avfilter_draw_slice(link, 0, link->h, 1);
1695     avfilter_end_frame(link);
1696
1697     return 0;
1698 }
1699
1700 static int input_query_formats(AVFilterContext *ctx)
1701 {
1702     FilterPriv *priv = ctx->priv;
1703     enum PixelFormat pix_fmts[] = {
1704         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1705     };
1706
1707     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1708     return 0;
1709 }
1710
1711 static int input_config_props(AVFilterLink *link)
1712 {
1713     FilterPriv *priv  = link->src->priv;
1714     AVStream *s = priv->is->video_st;
1715
1716     link->w = s->codec->width;
1717     link->h = s->codec->height;
1718     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1719         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1720     link->time_base = s->time_base;
1721
1722     return 0;
1723 }
1724
1725 static AVFilter input_filter =
1726 {
1727     .name      = "ffplay_input",
1728
1729     .priv_size = sizeof(FilterPriv),
1730
1731     .init      = input_init,
1732     .uninit    = input_uninit,
1733
1734     .query_formats = input_query_formats,
1735
1736     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1737     .outputs   = (AVFilterPad[]) {{ .name = "default",
1738                                     .type = AVMEDIA_TYPE_VIDEO,
1739                                     .request_frame = input_request_frame,
1740                                     .config_props  = input_config_props, },
1741                                   { .name = NULL }},
1742 };
1743
1744 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1745 {
1746     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1747     char sws_flags_str[128];
1748     int ret;
1749     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1750     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
1751     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1752     graph->scale_sws_opts = av_strdup(sws_flags_str);
1753
1754     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1755                                             NULL, is, graph)) < 0)
1756         return ret;
1757
1758 #if FF_API_OLD_VSINK_API
1759     ret = avfilter_graph_create_filter(&filt_out,
1760                                        avfilter_get_by_name("buffersink"),
1761                                        "out", NULL, pix_fmts, graph);
1762 #else
1763     buffersink_params->pixel_fmts = pix_fmts;
1764     ret = avfilter_graph_create_filter(&filt_out,
1765                                        avfilter_get_by_name("buffersink"),
1766                                        "out", NULL, buffersink_params, graph);
1767 #endif
1768     av_freep(&buffersink_params);
1769     if (ret < 0)
1770         return ret;
1771
1772     if ((ret = avfilter_graph_create_filter(&filt_format,
1773                                             avfilter_get_by_name("format"),
1774                                             "format", "yuv420p", NULL, graph)) < 0)
1775         return ret;
1776     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1777         return ret;
1778
1779
1780     if (vfilters) {
1781         AVFilterInOut *outputs = avfilter_inout_alloc();
1782         AVFilterInOut *inputs  = avfilter_inout_alloc();
1783
1784         outputs->name    = av_strdup("in");
1785         outputs->filter_ctx = filt_src;
1786         outputs->pad_idx = 0;
1787         outputs->next    = NULL;
1788
1789         inputs->name    = av_strdup("out");
1790         inputs->filter_ctx = filt_format;
1791         inputs->pad_idx = 0;
1792         inputs->next    = NULL;
1793
1794         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1795             return ret;
1796     } else {
1797         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1798             return ret;
1799     }
1800
1801     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1802         return ret;
1803
1804     is->out_video_filter = filt_out;
1805
1806     return ret;
1807 }
1808
1809 #endif  /* CONFIG_AVFILTER */
1810
1811 static int video_thread(void *arg)
1812 {
1813     VideoState *is = arg;
1814     AVFrame *frame = avcodec_alloc_frame();
1815     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1816     double pts;
1817     int ret;
1818
1819 #if CONFIG_AVFILTER
1820     AVFilterGraph *graph = avfilter_graph_alloc();
1821     AVFilterContext *filt_out = NULL;
1822     int last_w = is->video_st->codec->width;
1823     int last_h = is->video_st->codec->height;
1824
1825     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1826         goto the_end;
1827     filt_out = is->out_video_filter;
1828 #endif
1829
1830     for (;;) {
1831 #if !CONFIG_AVFILTER
1832         AVPacket pkt;
1833 #else
1834         AVFilterBufferRef *picref;
1835         AVRational tb = filt_out->inputs[0]->time_base;
1836 #endif
1837         while (is->paused && !is->videoq.abort_request)
1838             SDL_Delay(10);
1839 #if CONFIG_AVFILTER
1840         if (   last_w != is->video_st->codec->width
1841             || last_h != is->video_st->codec->height) {
1842             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1843                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1844             avfilter_graph_free(&graph);
1845             graph = avfilter_graph_alloc();
1846             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1847                 goto the_end;
1848             filt_out = is->out_video_filter;
1849             last_w = is->video_st->codec->width;
1850             last_h = is->video_st->codec->height;
1851         }
1852         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1853         if (picref) {
1854             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1855             pts_int = picref->pts;
1856             tb      = filt_out->inputs[0]->time_base;
1857             pos     = picref->pos;
1858             frame->opaque = picref;
1859
1860             ret = 1;
1861         }
1862
1863         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1864             av_unused int64_t pts1 = pts_int;
1865             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1866             av_dlog(NULL, "video_thread(): "
1867                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1868                     tb.num, tb.den, pts1,
1869                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1870         }
1871 #else
1872         ret = get_video_frame(is, frame, &pts_int, &pkt);
1873         pos = pkt.pos;
1874         av_free_packet(&pkt);
1875 #endif
1876
1877         if (ret < 0)
1878             goto the_end;
1879
1880         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1881         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1882             is->frame_last_filter_delay = 0;
1883
1884 #if CONFIG_AVFILTER
1885         if (!picref)
1886             continue;
1887 #endif
1888
1889         pts = pts_int * av_q2d(is->video_st->time_base);
1890
1891         ret = queue_picture(is, frame, pts, pos);
1892
1893         if (ret < 0)
1894             goto the_end;
1895
1896         if (is->step)
1897             stream_toggle_pause(is);
1898     }
1899  the_end:
1900 #if CONFIG_AVFILTER
1901     av_freep(&vfilters);
1902     avfilter_graph_free(&graph);
1903 #endif
1904     av_free(frame);
1905     return 0;
1906 }
1907
1908 static int subtitle_thread(void *arg)
1909 {
1910     VideoState *is = arg;
1911     SubPicture *sp;
1912     AVPacket pkt1, *pkt = &pkt1;
1913     int got_subtitle;
1914     double pts;
1915     int i, j;
1916     int r, g, b, y, u, v, a;
1917
1918     for (;;) {
1919         while (is->paused && !is->subtitleq.abort_request) {
1920             SDL_Delay(10);
1921         }
1922         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1923             break;
1924
1925         if (pkt->data == flush_pkt.data) {
1926             avcodec_flush_buffers(is->subtitle_st->codec);
1927             continue;
1928         }
1929         SDL_LockMutex(is->subpq_mutex);
1930         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1931                !is->subtitleq.abort_request) {
1932             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1933         }
1934         SDL_UnlockMutex(is->subpq_mutex);
1935
1936         if (is->subtitleq.abort_request)
1937             return 0;
1938
1939         sp = &is->subpq[is->subpq_windex];
1940
1941        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1942            this packet, if any */
1943         pts = 0;
1944         if (pkt->pts != AV_NOPTS_VALUE)
1945             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1946
1947         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1948                                  &got_subtitle, pkt);
1949
1950         if (got_subtitle && sp->sub.format == 0) {
1951             sp->pts = pts;
1952
1953             for (i = 0; i < sp->sub.num_rects; i++)
1954             {
1955                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1956                 {
1957                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1958                     y = RGB_TO_Y_CCIR(r, g, b);
1959                     u = RGB_TO_U_CCIR(r, g, b, 0);
1960                     v = RGB_TO_V_CCIR(r, g, b, 0);
1961                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1962                 }
1963             }
1964
1965             /* now we can update the picture count */
1966             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1967                 is->subpq_windex = 0;
1968             SDL_LockMutex(is->subpq_mutex);
1969             is->subpq_size++;
1970             SDL_UnlockMutex(is->subpq_mutex);
1971         }
1972         av_free_packet(pkt);
1973     }
1974     return 0;
1975 }
1976
1977 /* copy samples for viewing in editor window */
1978 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1979 {
1980     int size, len;
1981
1982     size = samples_size / sizeof(short);
1983     while (size > 0) {
1984         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1985         if (len > size)
1986             len = size;
1987         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1988         samples += len;
1989         is->sample_array_index += len;
1990         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1991             is->sample_array_index = 0;
1992         size -= len;
1993     }
1994 }
1995
1996 /* return the wanted number of samples to get better sync if sync_type is video
1997  * or external master clock */
1998 static int synchronize_audio(VideoState *is, int nb_samples)
1999 {
2000     int wanted_nb_samples = nb_samples;
2001
2002     /* if not master, then we try to remove or add samples to correct the clock */
2003     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2004          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2005         double diff, avg_diff;
2006         int min_nb_samples, max_nb_samples;
2007
2008         diff = get_audio_clock(is) - get_master_clock(is);
2009
2010         if (diff < AV_NOSYNC_THRESHOLD) {
2011             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2012             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2013                 /* not enough measures to have a correct estimate */
2014                 is->audio_diff_avg_count++;
2015             } else {
2016                 /* estimate the A-V difference */
2017                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2018
2019                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2020                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
2021                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2022                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2023                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2024                 }
2025                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2026                         diff, avg_diff, wanted_nb_samples - nb_samples,
2027                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2028             }
2029         } else {
2030             /* too big difference : may be initial PTS errors, so
2031                reset A-V filter */
2032             is->audio_diff_avg_count = 0;
2033             is->audio_diff_cum       = 0;
2034         }
2035     }
2036
2037     return wanted_nb_samples;
2038 }
2039
2040 /* decode one audio frame and returns its uncompressed size */
2041 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2042 {
2043     AVPacket *pkt_temp = &is->audio_pkt_temp;
2044     AVPacket *pkt = &is->audio_pkt;
2045     AVCodecContext *dec = is->audio_st->codec;
2046     int len1, len2, data_size, resampled_data_size;
2047     int64_t dec_channel_layout;
2048     int got_frame;
2049     double pts;
2050     int new_packet = 0;
2051     int flush_complete = 0;
2052     int wanted_nb_samples;
2053
2054     for (;;) {
2055         /* NOTE: the audio packet can contain several frames */
2056         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2057             if (!is->frame) {
2058                 if (!(is->frame = avcodec_alloc_frame()))
2059                     return AVERROR(ENOMEM);
2060             } else
2061                 avcodec_get_frame_defaults(is->frame);
2062
2063             if (flush_complete)
2064                 break;
2065             new_packet = 0;
2066             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2067             if (len1 < 0) {
2068                 /* if error, we skip the frame */
2069                 pkt_temp->size = 0;
2070                 break;
2071             }
2072
2073             pkt_temp->data += len1;
2074             pkt_temp->size -= len1;
2075
2076             if (!got_frame) {
2077                 /* stop sending empty packets if the decoder is finished */
2078                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2079                     flush_complete = 1;
2080                 continue;
2081             }
2082             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2083                                                    is->frame->nb_samples,
2084                                                    dec->sample_fmt, 1);
2085
2086             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2087             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2088
2089             if (dec->sample_fmt != is->audio_src_fmt ||
2090                 dec_channel_layout != is->audio_src_channel_layout ||
2091                 dec->sample_rate != is->audio_src_freq ||
2092                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2093                 if (is->swr_ctx)
2094                     swr_free(&is->swr_ctx);
2095                 is->swr_ctx = swr_alloc_set_opts(NULL,
2096                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2097                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2098                                                  0, NULL);
2099                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2100                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2101                         dec->sample_rate,
2102                         av_get_sample_fmt_name(dec->sample_fmt),
2103                         dec->channels,
2104                         is->audio_tgt_freq,
2105                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2106                         is->audio_tgt_channels);
2107                     break;
2108                 }
2109                 is->audio_src_channel_layout = dec_channel_layout;
2110                 is->audio_src_channels = dec->channels;
2111                 is->audio_src_freq = dec->sample_rate;
2112                 is->audio_src_fmt = dec->sample_fmt;
2113             }
2114
2115             resampled_data_size = data_size;
2116             if (is->swr_ctx) {
2117                 const uint8_t *in[] = { is->frame->data[0] };
2118                 uint8_t *out[] = {is->audio_buf2};
2119                 if (wanted_nb_samples != is->frame->nb_samples) {
2120                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
2121                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
2122                         fprintf(stderr, "swr_set_compensation() failed\n");
2123                         break;
2124                     }
2125                 }
2126                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2127                                                 in, is->frame->nb_samples);
2128                 if (len2 < 0) {
2129                     fprintf(stderr, "audio_resample() failed\n");
2130                     break;
2131                 }
2132                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2133                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2134                     swr_init(is->swr_ctx);
2135                 }
2136                 is->audio_buf = is->audio_buf2;
2137                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2138             } else {
2139                 is->audio_buf = is->frame->data[0];
2140             }
2141
2142             /* if no pts, then compute it */
2143             pts = is->audio_clock;
2144             *pts_ptr = pts;
2145             is->audio_clock += (double)data_size /
2146                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2147 #ifdef DEBUG
2148             {
2149                 static double last_clock;
2150                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2151                        is->audio_clock - last_clock,
2152                        is->audio_clock, pts);
2153                 last_clock = is->audio_clock;
2154             }
2155 #endif
2156             return resampled_data_size;
2157         }
2158
2159         /* free the current packet */
2160         if (pkt->data)
2161             av_free_packet(pkt);
2162         memset(pkt_temp, 0, sizeof(*pkt_temp));
2163
2164         if (is->paused || is->audioq.abort_request) {
2165             return -1;
2166         }
2167
2168         /* read next packet */
2169         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2170             return -1;
2171
2172         if (pkt->data == flush_pkt.data) {
2173             avcodec_flush_buffers(dec);
2174             flush_complete = 0;
2175         }
2176
2177         *pkt_temp = *pkt;
2178
2179         /* if update the audio clock with the pts */
2180         if (pkt->pts != AV_NOPTS_VALUE) {
2181             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2182         }
2183     }
2184 }
2185
2186 /* prepare a new audio buffer */
2187 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2188 {
2189     VideoState *is = opaque;
2190     int audio_size, len1;
2191     int bytes_per_sec;
2192     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
2193     double pts;
2194
2195     audio_callback_time = av_gettime();
2196
2197     while (len > 0) {
2198         if (is->audio_buf_index >= is->audio_buf_size) {
2199            audio_size = audio_decode_frame(is, &pts);
2200            if (audio_size < 0) {
2201                 /* if error, just output silence */
2202                is->audio_buf      = is->silence_buf;
2203                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2204            } else {
2205                if (is->show_mode != SHOW_MODE_VIDEO)
2206                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2207                is->audio_buf_size = audio_size;
2208            }
2209            is->audio_buf_index = 0;
2210         }
2211         len1 = is->audio_buf_size - is->audio_buf_index;
2212         if (len1 > len)
2213             len1 = len;
2214         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2215         len -= len1;
2216         stream += len1;
2217         is->audio_buf_index += len1;
2218     }
2219     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2220     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2221     /* Let's assume the audio driver that is used by SDL has two periods. */
2222     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2223     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2224 }
2225
2226 /* open a given stream. Return 0 if OK */
2227 static int stream_component_open(VideoState *is, int stream_index)
2228 {
2229     AVFormatContext *ic = is->ic;
2230     AVCodecContext *avctx;
2231     AVCodec *codec;
2232     SDL_AudioSpec wanted_spec, spec;
2233     AVDictionary *opts;
2234     AVDictionaryEntry *t = NULL;
2235     int64_t wanted_channel_layout = 0;
2236     int wanted_nb_channels;
2237     const char *env;
2238
2239     if (stream_index < 0 || stream_index >= ic->nb_streams)
2240         return -1;
2241     avctx = ic->streams[stream_index]->codec;
2242
2243     codec = avcodec_find_decoder(avctx->codec_id);
2244     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2245
2246     switch(avctx->codec_type){
2247         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2248         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2249         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2250     }
2251     if (!codec)
2252         return -1;
2253
2254     avctx->workaround_bugs   = workaround_bugs;
2255     avctx->lowres            = lowres;
2256     if(avctx->lowres > codec->max_lowres){
2257         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2258                 codec->max_lowres);
2259         avctx->lowres= codec->max_lowres;
2260     }
2261     avctx->idct_algo         = idct;
2262     avctx->skip_frame        = skip_frame;
2263     avctx->skip_idct         = skip_idct;
2264     avctx->skip_loop_filter  = skip_loop_filter;
2265     avctx->error_concealment = error_concealment;
2266
2267     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2268     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2269     if(codec->capabilities & CODEC_CAP_DR1)
2270         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2271
2272     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2273         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2274         env = SDL_getenv("SDL_AUDIO_CHANNELS");
2275         if (env)
2276             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2277         if (!wanted_channel_layout) {
2278             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2279             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2280             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2281             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2282             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > 6)) {
2283                 wanted_nb_channels--;
2284                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2285             }
2286         }
2287         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2288         wanted_spec.freq = avctx->sample_rate;
2289         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2290             fprintf(stderr, "Invalid sample rate or channel count!\n");
2291             return -1;
2292         }
2293     }
2294
2295     if (!av_dict_get(opts, "threads", NULL, 0))
2296         av_dict_set(&opts, "threads", "auto", 0);
2297     if (!codec ||
2298         avcodec_open2(avctx, codec, &opts) < 0)
2299         return -1;
2300     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2301         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2302         return AVERROR_OPTION_NOT_FOUND;
2303     }
2304
2305     /* prepare audio output */
2306     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2307         wanted_spec.format = AUDIO_S16SYS;
2308         wanted_spec.silence = 0;
2309         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2310         wanted_spec.callback = sdl_audio_callback;
2311         wanted_spec.userdata = is;
2312         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2313             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2314             return -1;
2315         }
2316         is->audio_hw_buf_size = spec.size;
2317         if (spec.format != AUDIO_S16SYS) {
2318             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2319             return -1;
2320         }
2321         if (spec.channels != wanted_spec.channels) {
2322             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2323             if (!wanted_channel_layout) {
2324                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2325                 return -1;
2326             }
2327         }
2328         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2329         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2330         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2331         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2332     }
2333
2334     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2335     switch (avctx->codec_type) {
2336     case AVMEDIA_TYPE_AUDIO:
2337         is->audio_stream = stream_index;
2338         is->audio_st = ic->streams[stream_index];
2339         is->audio_buf_size  = 0;
2340         is->audio_buf_index = 0;
2341
2342         /* init averaging filter */
2343         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2344         is->audio_diff_avg_count = 0;
2345         /* since we do not have a precise anough audio fifo fullness,
2346            we correct audio sync only if larger than this threshold */
2347         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2348
2349         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2350         packet_queue_init(&is->audioq);
2351         SDL_PauseAudio(0);
2352         break;
2353     case AVMEDIA_TYPE_VIDEO:
2354         is->video_stream = stream_index;
2355         is->video_st = ic->streams[stream_index];
2356
2357         packet_queue_init(&is->videoq);
2358         is->video_tid = SDL_CreateThread(video_thread, is);
2359         break;
2360     case AVMEDIA_TYPE_SUBTITLE:
2361         is->subtitle_stream = stream_index;
2362         is->subtitle_st = ic->streams[stream_index];
2363         packet_queue_init(&is->subtitleq);
2364
2365         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2366         break;
2367     default:
2368         break;
2369     }
2370     return 0;
2371 }
2372
2373 static void stream_component_close(VideoState *is, int stream_index)
2374 {
2375     AVFormatContext *ic = is->ic;
2376     AVCodecContext *avctx;
2377
2378     if (stream_index < 0 || stream_index >= ic->nb_streams)
2379         return;
2380     avctx = ic->streams[stream_index]->codec;
2381
2382     switch (avctx->codec_type) {
2383     case AVMEDIA_TYPE_AUDIO:
2384         packet_queue_abort(&is->audioq);
2385
2386         SDL_CloseAudio();
2387
2388         packet_queue_end(&is->audioq);
2389         av_free_packet(&is->audio_pkt);
2390         if (is->swr_ctx)
2391             swr_free(&is->swr_ctx);
2392         av_freep(&is->audio_buf1);
2393         is->audio_buf = NULL;
2394         av_freep(&is->frame);
2395
2396         if (is->rdft) {
2397             av_rdft_end(is->rdft);
2398             av_freep(&is->rdft_data);
2399             is->rdft = NULL;
2400             is->rdft_bits = 0;
2401         }
2402         break;
2403     case AVMEDIA_TYPE_VIDEO:
2404         packet_queue_abort(&is->videoq);
2405
2406         /* note: we also signal this mutex to make sure we deblock the
2407            video thread in all cases */
2408         SDL_LockMutex(is->pictq_mutex);
2409         SDL_CondSignal(is->pictq_cond);
2410         SDL_UnlockMutex(is->pictq_mutex);
2411
2412         SDL_WaitThread(is->video_tid, NULL);
2413
2414         packet_queue_end(&is->videoq);
2415         break;
2416     case AVMEDIA_TYPE_SUBTITLE:
2417         packet_queue_abort(&is->subtitleq);
2418
2419         /* note: we also signal this mutex to make sure we deblock the
2420            video thread in all cases */
2421         SDL_LockMutex(is->subpq_mutex);
2422         is->subtitle_stream_changed = 1;
2423
2424         SDL_CondSignal(is->subpq_cond);
2425         SDL_UnlockMutex(is->subpq_mutex);
2426
2427         SDL_WaitThread(is->subtitle_tid, NULL);
2428
2429         packet_queue_end(&is->subtitleq);
2430         break;
2431     default:
2432         break;
2433     }
2434
2435     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2436     avcodec_close(avctx);
2437     switch (avctx->codec_type) {
2438     case AVMEDIA_TYPE_AUDIO:
2439         is->audio_st = NULL;
2440         is->audio_stream = -1;
2441         break;
2442     case AVMEDIA_TYPE_VIDEO:
2443         is->video_st = NULL;
2444         is->video_stream = -1;
2445         break;
2446     case AVMEDIA_TYPE_SUBTITLE:
2447         is->subtitle_st = NULL;
2448         is->subtitle_stream = -1;
2449         break;
2450     default:
2451         break;
2452     }
2453 }
2454
2455 static int decode_interrupt_cb(void *ctx)
2456 {
2457     VideoState *is = ctx;
2458     return is->abort_request;
2459 }
2460
2461 /* this thread gets the stream from the disk or the network */
2462 static int read_thread(void *arg)
2463 {
2464     VideoState *is = arg;
2465     AVFormatContext *ic = NULL;
2466     int err, i, ret;
2467     int st_index[AVMEDIA_TYPE_NB];
2468     AVPacket pkt1, *pkt = &pkt1;
2469     int eof = 0;
2470     int pkt_in_play_range = 0;
2471     AVDictionaryEntry *t;
2472     AVDictionary **opts;
2473     int orig_nb_streams;
2474
2475     memset(st_index, -1, sizeof(st_index));
2476     is->video_stream = -1;
2477     is->audio_stream = -1;
2478     is->subtitle_stream = -1;
2479
2480     ic = avformat_alloc_context();
2481     ic->interrupt_callback.callback = decode_interrupt_cb;
2482     ic->interrupt_callback.opaque = is;
2483     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2484     if (err < 0) {
2485         print_error(is->filename, err);
2486         ret = -1;
2487         goto fail;
2488     }
2489     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2490         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2491         ret = AVERROR_OPTION_NOT_FOUND;
2492         goto fail;
2493     }
2494     is->ic = ic;
2495
2496     if (genpts)
2497         ic->flags |= AVFMT_FLAG_GENPTS;
2498
2499     opts = setup_find_stream_info_opts(ic, codec_opts);
2500     orig_nb_streams = ic->nb_streams;
2501
2502     err = avformat_find_stream_info(ic, opts);
2503     if (err < 0) {
2504         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2505         ret = -1;
2506         goto fail;
2507     }
2508     for (i = 0; i < orig_nb_streams; i++)
2509         av_dict_free(&opts[i]);
2510     av_freep(&opts);
2511
2512     if (ic->pb)
2513         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2514
2515     if (seek_by_bytes < 0)
2516         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2517
2518     /* if seeking requested, we execute it */
2519     if (start_time != AV_NOPTS_VALUE) {
2520         int64_t timestamp;
2521
2522         timestamp = start_time;
2523         /* add the stream start time */
2524         if (ic->start_time != AV_NOPTS_VALUE)
2525             timestamp += ic->start_time;
2526         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2527         if (ret < 0) {
2528             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2529                     is->filename, (double)timestamp / AV_TIME_BASE);
2530         }
2531     }
2532
2533     for (i = 0; i < ic->nb_streams; i++)
2534         ic->streams[i]->discard = AVDISCARD_ALL;
2535     if (!video_disable)
2536         st_index[AVMEDIA_TYPE_VIDEO] =
2537             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2538                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2539     if (!audio_disable)
2540         st_index[AVMEDIA_TYPE_AUDIO] =
2541             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2542                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2543                                 st_index[AVMEDIA_TYPE_VIDEO],
2544                                 NULL, 0);
2545     if (!video_disable)
2546         st_index[AVMEDIA_TYPE_SUBTITLE] =
2547             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2548                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2549                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2550                                  st_index[AVMEDIA_TYPE_AUDIO] :
2551                                  st_index[AVMEDIA_TYPE_VIDEO]),
2552                                 NULL, 0);
2553     if (show_status) {
2554         av_dump_format(ic, 0, is->filename, 0);
2555     }
2556
2557     is->show_mode = show_mode;
2558
2559     /* open the streams */
2560     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2561         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2562     }
2563
2564     ret = -1;
2565     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2566         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2567     }
2568     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2569     if (is->show_mode == SHOW_MODE_NONE)
2570         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2571
2572     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2573         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2574     }
2575
2576     if (is->video_stream < 0 && is->audio_stream < 0) {
2577         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2578         ret = -1;
2579         goto fail;
2580     }
2581
2582     for (;;) {
2583         if (is->abort_request)
2584             break;
2585         if (is->paused != is->last_paused) {
2586             is->last_paused = is->paused;
2587             if (is->paused)
2588                 is->read_pause_return = av_read_pause(ic);
2589             else
2590                 av_read_play(ic);
2591         }
2592 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2593         if (is->paused &&
2594                 (!strcmp(ic->iformat->name, "rtsp") ||
2595                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2596             /* wait 10 ms to avoid trying to get another packet */
2597             /* XXX: horrible */
2598             SDL_Delay(10);
2599             continue;
2600         }
2601 #endif
2602         if (is->seek_req) {
2603             int64_t seek_target = is->seek_pos;
2604             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2605             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2606 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2607 //      of the seek_pos/seek_rel variables
2608
2609             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2610             if (ret < 0) {
2611                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2612             } else {
2613                 if (is->audio_stream >= 0) {
2614                     packet_queue_flush(&is->audioq);
2615                     packet_queue_put(&is->audioq, &flush_pkt);
2616                 }
2617                 if (is->subtitle_stream >= 0) {
2618                     packet_queue_flush(&is->subtitleq);
2619                     packet_queue_put(&is->subtitleq, &flush_pkt);
2620                 }
2621                 if (is->video_stream >= 0) {
2622                     packet_queue_flush(&is->videoq);
2623                     packet_queue_put(&is->videoq, &flush_pkt);
2624                 }
2625             }
2626             is->seek_req = 0;
2627             eof = 0;
2628         }
2629
2630         /* if the queue are full, no need to read more */
2631         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2632             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0)
2633                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2634                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
2635             /* wait 10 ms */
2636             SDL_Delay(10);
2637             continue;
2638         }
2639         if (eof) {
2640             if (is->video_stream >= 0) {
2641                 av_init_packet(pkt);
2642                 pkt->data = NULL;
2643                 pkt->size = 0;
2644                 pkt->stream_index = is->video_stream;
2645                 packet_queue_put(&is->videoq, pkt);
2646             }
2647             if (is->audio_stream >= 0 &&
2648                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2649                 av_init_packet(pkt);
2650                 pkt->data = NULL;
2651                 pkt->size = 0;
2652                 pkt->stream_index = is->audio_stream;
2653                 packet_queue_put(&is->audioq, pkt);
2654             }
2655             SDL_Delay(10);
2656             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2657                 if (loop != 1 && (!loop || --loop)) {
2658                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2659                 } else if (autoexit) {
2660                     ret = AVERROR_EOF;
2661                     goto fail;
2662                 }
2663             }
2664             eof=0;
2665             continue;
2666         }
2667         ret = av_read_frame(ic, pkt);
2668         if (ret < 0) {
2669             if (ret == AVERROR_EOF || url_feof(ic->pb))
2670                 eof = 1;
2671             if (ic->pb && ic->pb->error)
2672                 break;
2673             SDL_Delay(100); /* wait for user event */
2674             continue;
2675         }
2676         /* check if packet is in play range specified by user, then queue, otherwise discard */
2677         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2678                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2679                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2680                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2681                 <= ((double)duration / 1000000);
2682         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2683             packet_queue_put(&is->audioq, pkt);
2684         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2685             packet_queue_put(&is->videoq, pkt);
2686         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2687             packet_queue_put(&is->subtitleq, pkt);
2688         } else {
2689             av_free_packet(pkt);
2690         }
2691     }
2692     /* wait until the end */
2693     while (!is->abort_request) {
2694         SDL_Delay(100);
2695     }
2696
2697     ret = 0;
2698  fail:
2699     /* close each stream */
2700     if (is->audio_stream >= 0)
2701         stream_component_close(is, is->audio_stream);
2702     if (is->video_stream >= 0)
2703         stream_component_close(is, is->video_stream);
2704     if (is->subtitle_stream >= 0)
2705         stream_component_close(is, is->subtitle_stream);
2706     if (is->ic) {
2707         avformat_close_input(&is->ic);
2708     }
2709
2710     if (ret != 0) {
2711         SDL_Event event;
2712
2713         event.type = FF_QUIT_EVENT;
2714         event.user.data1 = is;
2715         SDL_PushEvent(&event);
2716     }
2717     return 0;
2718 }
2719
2720 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2721 {
2722     VideoState *is;
2723
2724     is = av_mallocz(sizeof(VideoState));
2725     if (!is)
2726         return NULL;
2727     av_strlcpy(is->filename, filename, sizeof(is->filename));
2728     is->iformat = iformat;
2729     is->ytop    = 0;
2730     is->xleft   = 0;
2731
2732     /* start video display */
2733     is->pictq_mutex = SDL_CreateMutex();
2734     is->pictq_cond  = SDL_CreateCond();
2735
2736     is->subpq_mutex = SDL_CreateMutex();
2737     is->subpq_cond  = SDL_CreateCond();
2738
2739     is->av_sync_type = av_sync_type;
2740     is->read_tid     = SDL_CreateThread(read_thread, is);
2741     if (!is->read_tid) {
2742         av_free(is);
2743         return NULL;
2744     }
2745     return is;
2746 }
2747
2748 static void stream_cycle_channel(VideoState *is, int codec_type)
2749 {
2750     AVFormatContext *ic = is->ic;
2751     int start_index, stream_index;
2752     AVStream *st;
2753
2754     if (codec_type == AVMEDIA_TYPE_VIDEO)
2755         start_index = is->video_stream;
2756     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2757         start_index = is->audio_stream;
2758     else
2759         start_index = is->subtitle_stream;
2760     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2761         return;
2762     stream_index = start_index;
2763     for (;;) {
2764         if (++stream_index >= is->ic->nb_streams)
2765         {
2766             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2767             {
2768                 stream_index = -1;
2769                 goto the_end;
2770             } else
2771                 stream_index = 0;
2772         }
2773         if (stream_index == start_index)
2774             return;
2775         st = ic->streams[stream_index];
2776         if (st->codec->codec_type == codec_type) {
2777             /* check that parameters are OK */
2778             switch (codec_type) {
2779             case AVMEDIA_TYPE_AUDIO:
2780                 if (st->codec->sample_rate != 0 &&
2781                     st->codec->channels != 0)
2782                     goto the_end;
2783                 break;
2784             case AVMEDIA_TYPE_VIDEO:
2785             case AVMEDIA_TYPE_SUBTITLE:
2786                 goto the_end;
2787             default:
2788                 break;
2789             }
2790         }
2791     }
2792  the_end:
2793     stream_component_close(is, start_index);
2794     stream_component_open(is, stream_index);
2795 }
2796
2797
2798 static void toggle_full_screen(VideoState *is)
2799 {
2800     av_unused int i;
2801     is_full_screen = !is_full_screen;
2802 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2803     /* OS X needs to reallocate the SDL overlays */
2804     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2805         is->pictq[i].reallocate = 1;
2806     }
2807 #endif
2808     video_open(is, 1);
2809 }
2810
2811 static void toggle_pause(VideoState *is)
2812 {
2813     stream_toggle_pause(is);
2814     is->step = 0;
2815 }
2816
2817 static void step_to_next_frame(VideoState *is)
2818 {
2819     /* if the stream is paused unpause it, then step */
2820     if (is->paused)
2821         stream_toggle_pause(is);
2822     is->step = 1;
2823 }
2824
2825 static void toggle_audio_display(VideoState *is)
2826 {
2827     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2828     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2829     fill_rectangle(screen,
2830                 is->xleft, is->ytop, is->width, is->height,
2831                 bgcolor);
2832     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2833 }
2834
2835 /* handle an event sent by the GUI */
2836 static void event_loop(VideoState *cur_stream)
2837 {
2838     SDL_Event event;
2839     double incr, pos, frac;
2840
2841     for (;;) {
2842         double x;
2843         SDL_WaitEvent(&event);
2844         switch (event.type) {
2845         case SDL_KEYDOWN:
2846             if (exit_on_keydown) {
2847                 do_exit(cur_stream);
2848                 break;
2849             }
2850             switch (event.key.keysym.sym) {
2851             case SDLK_ESCAPE:
2852             case SDLK_q:
2853                 do_exit(cur_stream);
2854                 break;
2855             case SDLK_f:
2856                 toggle_full_screen(cur_stream);
2857                 cur_stream->force_refresh = 1;
2858                 break;
2859             case SDLK_p:
2860             case SDLK_SPACE:
2861                 toggle_pause(cur_stream);
2862                 break;
2863             case SDLK_s: // S: Step to next frame
2864                 step_to_next_frame(cur_stream);
2865                 break;
2866             case SDLK_a:
2867                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2868                 break;
2869             case SDLK_v:
2870                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2871                 break;
2872             case SDLK_t:
2873                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2874                 break;
2875             case SDLK_w:
2876                 toggle_audio_display(cur_stream);
2877                 cur_stream->force_refresh = 1;
2878                 break;
2879             case SDLK_PAGEUP:
2880                 incr = 600.0;
2881                 goto do_seek;
2882             case SDLK_PAGEDOWN:
2883                 incr = -600.0;
2884                 goto do_seek;
2885             case SDLK_LEFT:
2886                 incr = -10.0;
2887                 goto do_seek;
2888             case SDLK_RIGHT:
2889                 incr = 10.0;
2890                 goto do_seek;
2891             case SDLK_UP:
2892                 incr = 60.0;
2893                 goto do_seek;
2894             case SDLK_DOWN:
2895                 incr = -60.0;
2896             do_seek:
2897                     if (seek_by_bytes) {
2898                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2899                             pos = cur_stream->video_current_pos;
2900                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2901                             pos = cur_stream->audio_pkt.pos;
2902                         } else
2903                             pos = avio_tell(cur_stream->ic->pb);
2904                         if (cur_stream->ic->bit_rate)
2905                             incr *= cur_stream->ic->bit_rate / 8.0;
2906                         else
2907                             incr *= 180000.0;
2908                         pos += incr;
2909                         stream_seek(cur_stream, pos, incr, 1);
2910                     } else {
2911                         pos = get_master_clock(cur_stream);
2912                         pos += incr;
2913                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2914                     }
2915                 break;
2916             default:
2917                 break;
2918             }
2919             break;
2920         case SDL_VIDEOEXPOSE:
2921             cur_stream->force_refresh = 1;
2922             break;
2923         case SDL_MOUSEBUTTONDOWN:
2924             if (exit_on_mousedown) {
2925                 do_exit(cur_stream);
2926                 break;
2927             }
2928         case SDL_MOUSEMOTION:
2929             if (event.type == SDL_MOUSEBUTTONDOWN) {
2930                 x = event.button.x;
2931             } else {
2932                 if (event.motion.state != SDL_PRESSED)
2933                     break;
2934                 x = event.motion.x;
2935             }
2936                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2937                     uint64_t size =  avio_size(cur_stream->ic->pb);
2938                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2939                 } else {
2940                     int64_t ts;
2941                     int ns, hh, mm, ss;
2942                     int tns, thh, tmm, tss;
2943                     tns  = cur_stream->ic->duration / 1000000LL;
2944                     thh  = tns / 3600;
2945                     tmm  = (tns % 3600) / 60;
2946                     tss  = (tns % 60);
2947                     frac = x / cur_stream->width;
2948                     ns   = frac * tns;
2949                     hh   = ns / 3600;
2950                     mm   = (ns % 3600) / 60;
2951                     ss   = (ns % 60);
2952                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2953                             hh, mm, ss, thh, tmm, tss);
2954                     ts = frac * cur_stream->ic->duration;
2955                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2956                         ts += cur_stream->ic->start_time;
2957                     stream_seek(cur_stream, ts, 0, 0);
2958                 }
2959             break;
2960         case SDL_VIDEORESIZE:
2961                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2962                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2963                 screen_width  = cur_stream->width  = event.resize.w;
2964                 screen_height = cur_stream->height = event.resize.h;
2965                 cur_stream->force_refresh = 1;
2966             break;
2967         case SDL_QUIT:
2968         case FF_QUIT_EVENT:
2969             do_exit(cur_stream);
2970             break;
2971         case FF_ALLOC_EVENT:
2972             video_open(event.user.data1, 0);
2973             alloc_picture(event.user.data1);
2974             break;
2975         case FF_REFRESH_EVENT:
2976             video_refresh(event.user.data1);
2977             cur_stream->refresh = 0;
2978             break;
2979         default:
2980             break;
2981         }
2982     }
2983 }
2984
2985 static int opt_frame_size(const char *opt, const char *arg)
2986 {
2987     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2988     return opt_default("video_size", arg);
2989 }
2990
2991 static int opt_width(const char *opt, const char *arg)
2992 {
2993     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2994     return 0;
2995 }
2996
2997 static int opt_height(const char *opt, const char *arg)
2998 {
2999     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3000     return 0;
3001 }
3002
3003 static int opt_format(const char *opt, const char *arg)
3004 {
3005     file_iformat = av_find_input_format(arg);
3006     if (!file_iformat) {
3007         fprintf(stderr, "Unknown input format: %s\n", arg);
3008         return AVERROR(EINVAL);
3009     }
3010     return 0;
3011 }
3012
3013 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3014 {
3015     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3016     return opt_default("pixel_format", arg);
3017 }
3018
3019 static int opt_sync(const char *opt, const char *arg)
3020 {
3021     if (!strcmp(arg, "audio"))
3022         av_sync_type = AV_SYNC_AUDIO_MASTER;
3023     else if (!strcmp(arg, "video"))
3024         av_sync_type = AV_SYNC_VIDEO_MASTER;
3025     else if (!strcmp(arg, "ext"))
3026         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3027     else {
3028         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3029         exit(1);
3030     }
3031     return 0;
3032 }
3033
3034 static int opt_seek(const char *opt, const char *arg)
3035 {
3036     start_time = parse_time_or_die(opt, arg, 1);
3037     return 0;
3038 }
3039
3040 static int opt_duration(const char *opt, const char *arg)
3041 {
3042     duration = parse_time_or_die(opt, arg, 1);
3043     return 0;
3044 }
3045
3046 static int opt_show_mode(const char *opt, const char *arg)
3047 {
3048     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3049                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3050                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3051                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3052     return 0;
3053 }
3054
3055 static void opt_input_file(void *optctx, const char *filename)
3056 {
3057     if (input_filename) {
3058         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3059                 filename, input_filename);
3060         exit_program(1);
3061     }
3062     if (!strcmp(filename, "-"))
3063         filename = "pipe:";
3064     input_filename = filename;
3065 }
3066
3067 static int opt_codec(void *o, const char *opt, const char *arg)
3068 {
3069     switch(opt[strlen(opt)-1]){
3070     case 'a' :    audio_codec_name = arg; break;
3071     case 's' : subtitle_codec_name = arg; break;
3072     case 'v' :    video_codec_name = arg; break;
3073     }
3074     return 0;
3075 }
3076
3077 static int dummy;
3078
3079 static const OptionDef options[] = {
3080 #include "cmdutils_common_opts.h"
3081     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3082     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3083     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3084     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3085     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3086     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3087     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3088     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3089     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3090     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3091     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3092     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3093     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3094     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3095     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3096     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3097     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3098     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3099     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3100     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3101     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3102     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3103     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3104     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3105     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3106     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3107     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3108     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3109     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3110     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3111     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3112     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3113     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3114 #if CONFIG_AVFILTER
3115     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3116 #endif
3117     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3118     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3119     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3120     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3121     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3122     { NULL, },
3123 };
3124
3125 static void show_usage(void)
3126 {
3127     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3128     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3129     av_log(NULL, AV_LOG_INFO, "\n");
3130 }
3131
3132 static int opt_help(const char *opt, const char *arg)
3133 {
3134     av_log_set_callback(log_callback_help);
3135     show_usage();
3136     show_help_options(options, "Main options:\n",
3137                       OPT_EXPERT, 0);
3138     show_help_options(options, "\nAdvanced options:\n",
3139                       OPT_EXPERT, OPT_EXPERT);
3140     printf("\n");
3141     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3142     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3143 #if !CONFIG_AVFILTER
3144     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3145 #endif
3146     printf("\nWhile playing:\n"
3147            "q, ESC              quit\n"
3148            "f                   toggle full screen\n"
3149            "p, SPC              pause\n"
3150            "a                   cycle audio channel\n"
3151            "v                   cycle video channel\n"
3152            "t                   cycle subtitle channel\n"
3153            "w                   show audio waves\n"
3154            "s                   activate frame-step mode\n"
3155            "left/right          seek backward/forward 10 seconds\n"
3156            "down/up             seek backward/forward 1 minute\n"
3157            "page down/page up   seek backward/forward 10 minutes\n"
3158            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3159            );
3160     return 0;
3161 }
3162
3163 static int lockmgr(void **mtx, enum AVLockOp op)
3164 {
3165    switch(op) {
3166       case AV_LOCK_CREATE:
3167           *mtx = SDL_CreateMutex();
3168           if(!*mtx)
3169               return 1;
3170           return 0;
3171       case AV_LOCK_OBTAIN:
3172           return !!SDL_LockMutex(*mtx);
3173       case AV_LOCK_RELEASE:
3174           return !!SDL_UnlockMutex(*mtx);
3175       case AV_LOCK_DESTROY:
3176           SDL_DestroyMutex(*mtx);
3177           return 0;
3178    }
3179    return 1;
3180 }
3181
3182 /* Called from the main */
3183 int main(int argc, char **argv)
3184 {
3185     int flags;
3186     VideoState *is;
3187
3188     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3189     parse_loglevel(argc, argv, options);
3190
3191     /* register all codecs, demux and protocols */
3192     avcodec_register_all();
3193 #if CONFIG_AVDEVICE
3194     avdevice_register_all();
3195 #endif
3196 #if CONFIG_AVFILTER
3197     avfilter_register_all();
3198 #endif
3199     av_register_all();
3200     avformat_network_init();
3201
3202     init_opts();
3203
3204     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3205     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3206
3207     show_banner(argc, argv, options);
3208
3209     parse_options(NULL, argc, argv, options, opt_input_file);
3210
3211     if (!input_filename) {
3212         show_usage();
3213         fprintf(stderr, "An input file must be specified\n");
3214         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3215         exit(1);
3216     }
3217
3218     if (display_disable) {
3219         video_disable = 1;
3220     }
3221     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3222     if (audio_disable)
3223         flags &= ~SDL_INIT_AUDIO;
3224 #if !defined(__MINGW32__) && !defined(__APPLE__)
3225     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3226 #endif
3227     if (SDL_Init (flags)) {
3228         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3229         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3230         exit(1);
3231     }
3232
3233     if (!display_disable) {
3234 #if HAVE_SDL_VIDEO_SIZE
3235         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3236         fs_screen_width = vi->current_w;
3237         fs_screen_height = vi->current_h;
3238 #endif
3239     }
3240
3241     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3242     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3243     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3244
3245     if (av_lockmgr_register(lockmgr)) {
3246         fprintf(stderr, "Could not initialize lock manager!\n");
3247         do_exit(NULL);
3248     }
3249
3250     av_init_packet(&flush_pkt);
3251     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3252
3253     is = stream_open(input_filename, file_iformat);
3254     if (!is) {
3255         fprintf(stderr, "Failed to initialize VideoState!\n");
3256         do_exit(NULL);
3257     }
3258
3259     event_loop(is);
3260
3261     /* never returns */
3262
3263     return 0;
3264 }