]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Mark avui encoder experimental.
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavutil/opt.h"
44 #include "libavcodec/avfft.h"
45 #include "libswresample/swresample.h"
46
47 #if CONFIG_AVFILTER
48 # include "libavfilter/avcodec.h"
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/avfiltergraph.h"
51 # include "libavfilter/buffersink.h"
52 #endif
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58
59 #include <unistd.h>
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     double duration;                             ///< expected duration of the frame
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     AVRational sample_aspect_ratio;
108     int allocated;
109     int reallocate;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *read_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int force_refresh;
136     int paused;
137     int last_paused;
138     int seek_req;
139     int seek_flags;
140     int64_t seek_pos;
141     int64_t seek_rel;
142     int read_pause_return;
143     AVFormatContext *ic;
144
145     int audio_stream;
146
147     int av_sync_type;
148     double external_clock; /* external clock base */
149     int64_t external_clock_time;
150
151     double audio_clock;
152     double audio_diff_cum; /* used for AV difference average computation */
153     double audio_diff_avg_coef;
154     double audio_diff_threshold;
155     int audio_diff_avg_count;
156     AVStream *audio_st;
157     PacketQueue audioq;
158     int audio_hw_buf_size;
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
160     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
161     uint8_t *audio_buf;
162     uint8_t *audio_buf1;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     int audio_write_buf_size;
166     AVPacket audio_pkt_temp;
167     AVPacket audio_pkt;
168     enum AVSampleFormat audio_src_fmt;
169     enum AVSampleFormat audio_tgt_fmt;
170     int audio_src_channels;
171     int audio_tgt_channels;
172     int64_t audio_src_channel_layout;
173     int64_t audio_tgt_channel_layout;
174     int audio_src_freq;
175     int audio_tgt_freq;
176     struct SwrContext *swr_ctx;
177     double audio_current_pts;
178     double audio_current_pts_drift;
179     int frame_drops_early;
180     int frame_drops_late;
181     AVFrame *frame;
182
183     enum ShowMode {
184         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
185     } show_mode;
186     int16_t sample_array[SAMPLE_ARRAY_SIZE];
187     int sample_array_index;
188     int last_i_start;
189     RDFTContext *rdft;
190     int rdft_bits;
191     FFTSample *rdft_data;
192     int xpos;
193
194     SDL_Thread *subtitle_tid;
195     int subtitle_stream;
196     int subtitle_stream_changed;
197     AVStream *subtitle_st;
198     PacketQueue subtitleq;
199     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
200     int subpq_size, subpq_rindex, subpq_windex;
201     SDL_mutex *subpq_mutex;
202     SDL_cond *subpq_cond;
203
204     double frame_timer;
205     double frame_last_pts;
206     double frame_last_duration;
207     double frame_last_dropped_pts;
208     double frame_last_returned_time;
209     double frame_last_filter_delay;
210     int64_t frame_last_dropped_pos;
211     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
212     int video_stream;
213     AVStream *video_st;
214     PacketQueue videoq;
215     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
216     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
217     int64_t video_current_pos;                   ///< current displayed file pos
218     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
219     int pictq_size, pictq_rindex, pictq_windex;
220     SDL_mutex *pictq_mutex;
221     SDL_cond *pictq_cond;
222 #if !CONFIG_AVFILTER
223     struct SwsContext *img_convert_ctx;
224 #endif
225
226     char filename[1024];
227     int width, height, xleft, ytop;
228     int step;
229
230 #if CONFIG_AVFILTER
231     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
232 #endif
233
234     int refresh;
235     int last_video_stream, last_audio_stream, last_subtitle_stream;
236 } VideoState;
237
238 typedef struct AllocEventProps {
239     VideoState *is;
240     AVFrame *frame;
241 } AllocEventProps;
242
243 static int opt_help(const char *opt, const char *arg);
244
245 /* options specified by the user */
246 static AVInputFormat *file_iformat;
247 static const char *input_filename;
248 static const char *window_title;
249 static int fs_screen_width;
250 static int fs_screen_height;
251 static int screen_width  = 0;
252 static int screen_height = 0;
253 static int audio_disable;
254 static int video_disable;
255 static int wanted_stream[AVMEDIA_TYPE_NB] = {
256     [AVMEDIA_TYPE_AUDIO]    = -1,
257     [AVMEDIA_TYPE_VIDEO]    = -1,
258     [AVMEDIA_TYPE_SUBTITLE] = -1,
259 };
260 static int seek_by_bytes = -1;
261 static int display_disable;
262 static int show_status = 1;
263 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
264 static int64_t start_time = AV_NOPTS_VALUE;
265 static int64_t duration = AV_NOPTS_VALUE;
266 static int workaround_bugs = 1;
267 static int fast = 0;
268 static int genpts = 0;
269 static int lowres = 0;
270 static int idct = FF_IDCT_AUTO;
271 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
272 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
273 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
274 static int error_concealment = 3;
275 static int decoder_reorder_pts = -1;
276 static int autoexit;
277 static int exit_on_keydown;
278 static int exit_on_mousedown;
279 static int loop = 1;
280 static int framedrop = -1;
281 static enum ShowMode show_mode = SHOW_MODE_NONE;
282 static const char *audio_codec_name;
283 static const char *subtitle_codec_name;
284 static const char *video_codec_name;
285 static int rdftspeed = 20;
286 #if CONFIG_AVFILTER
287 static char *vfilters = NULL;
288 #endif
289
290 /* current context */
291 static int is_full_screen;
292 static int64_t audio_callback_time;
293
294 static AVPacket flush_pkt;
295
296 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
297 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
298 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
299
300 static SDL_Surface *screen;
301
302 void av_noreturn exit_program(int ret)
303 {
304     exit(ret);
305 }
306
307 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
308 {
309     AVPacketList *pkt1;
310
311     if (q->abort_request)
312        return -1;
313
314     pkt1 = av_malloc(sizeof(AVPacketList));
315     if (!pkt1)
316         return -1;
317     pkt1->pkt = *pkt;
318     pkt1->next = NULL;
319
320     if (!q->last_pkt)
321         q->first_pkt = pkt1;
322     else
323         q->last_pkt->next = pkt1;
324     q->last_pkt = pkt1;
325     q->nb_packets++;
326     q->size += pkt1->pkt.size + sizeof(*pkt1);
327     /* XXX: should duplicate packet data in DV case */
328     SDL_CondSignal(q->cond);
329     return 0;
330 }
331
332 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
333 {
334     int ret;
335
336     /* duplicate the packet */
337     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
338         return -1;
339
340     SDL_LockMutex(q->mutex);
341     ret = packet_queue_put_private(q, pkt);
342     SDL_UnlockMutex(q->mutex);
343
344     if (pkt != &flush_pkt && ret < 0)
345         av_free_packet(pkt);
346
347     return ret;
348 }
349
350 /* packet queue handling */
351 static void packet_queue_init(PacketQueue *q)
352 {
353     memset(q, 0, sizeof(PacketQueue));
354     q->mutex = SDL_CreateMutex();
355     q->cond = SDL_CreateCond();
356     q->abort_request = 1;
357 }
358
359 static void packet_queue_flush(PacketQueue *q)
360 {
361     AVPacketList *pkt, *pkt1;
362
363     SDL_LockMutex(q->mutex);
364     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
365         pkt1 = pkt->next;
366         av_free_packet(&pkt->pkt);
367         av_freep(&pkt);
368     }
369     q->last_pkt = NULL;
370     q->first_pkt = NULL;
371     q->nb_packets = 0;
372     q->size = 0;
373     SDL_UnlockMutex(q->mutex);
374 }
375
376 static void packet_queue_destroy(PacketQueue *q)
377 {
378     packet_queue_flush(q);
379     SDL_DestroyMutex(q->mutex);
380     SDL_DestroyCond(q->cond);
381 }
382
383 static void packet_queue_abort(PacketQueue *q)
384 {
385     SDL_LockMutex(q->mutex);
386
387     q->abort_request = 1;
388
389     SDL_CondSignal(q->cond);
390
391     SDL_UnlockMutex(q->mutex);
392 }
393
394 static void packet_queue_start(PacketQueue *q)
395 {
396     SDL_LockMutex(q->mutex);
397     q->abort_request = 0;
398     packet_queue_put_private(q, &flush_pkt);
399     SDL_UnlockMutex(q->mutex);
400 }
401
402 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
403 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
404 {
405     AVPacketList *pkt1;
406     int ret;
407
408     SDL_LockMutex(q->mutex);
409
410     for (;;) {
411         if (q->abort_request) {
412             ret = -1;
413             break;
414         }
415
416         pkt1 = q->first_pkt;
417         if (pkt1) {
418             q->first_pkt = pkt1->next;
419             if (!q->first_pkt)
420                 q->last_pkt = NULL;
421             q->nb_packets--;
422             q->size -= pkt1->pkt.size + sizeof(*pkt1);
423             *pkt = pkt1->pkt;
424             av_free(pkt1);
425             ret = 1;
426             break;
427         } else if (!block) {
428             ret = 0;
429             break;
430         } else {
431             SDL_CondWait(q->cond, q->mutex);
432         }
433     }
434     SDL_UnlockMutex(q->mutex);
435     return ret;
436 }
437
438 static inline void fill_rectangle(SDL_Surface *screen,
439                                   int x, int y, int w, int h, int color)
440 {
441     SDL_Rect rect;
442     rect.x = x;
443     rect.y = y;
444     rect.w = w;
445     rect.h = h;
446     SDL_FillRect(screen, &rect, color);
447 }
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454     unsigned int v = ((const uint32_t *)(s))[0];\
455     a = (v >> 24) & 0xff;\
456     r = (v >> 16) & 0xff;\
457     g = (v >> 8) & 0xff;\
458     b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464     a = (val >> 24) & 0xff;\
465     y = (val >> 16) & 0xff;\
466     u = (val >> 8) & 0xff;\
467     v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480     int wrap, wrap3, width2, skip2;
481     int y, u, v, a, u1, v1, a1, w, h;
482     uint8_t *lum, *cb, *cr;
483     const uint8_t *p;
484     const uint32_t *pal;
485     int dstx, dsty, dstw, dsth;
486
487     dstw = av_clip(rect->w, 0, imgw);
488     dsth = av_clip(rect->h, 0, imgh);
489     dstx = av_clip(rect->x, 0, imgw - dstw);
490     dsty = av_clip(rect->y, 0, imgh - dsth);
491     lum = dst->data[0] + dsty * dst->linesize[0];
492     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496     skip2 = dstx >> 1;
497     wrap = dst->linesize[0];
498     wrap3 = rect->pict.linesize[0];
499     p = rect->pict.data[0];
500     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501
502     if (dsty & 1) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512             cb++;
513             cr++;
514             lum++;
515             p += BPP;
516         }
517         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524             YUVA_IN(y, u, v, a, p + BPP, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += 2 * BPP;
534             lum += 2;
535         }
536         if (w) {
537             YUVA_IN(y, u, v, a, p, pal);
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541             p++;
542             lum++;
543         }
544         p += wrap3 - dstw * BPP;
545         lum += wrap - dstw - dstx;
546         cb += dst->linesize[1] - width2 - skip2;
547         cr += dst->linesize[2] - width2 - skip2;
548     }
549     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
550         lum += dstx;
551         cb += skip2;
552         cr += skip2;
553
554         if (dstx & 1) {
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 = u;
557             v1 = v;
558             a1 = a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560             p += wrap3;
561             lum += wrap;
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 += u;
564             v1 += v;
565             a1 += a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569             cb++;
570             cr++;
571             p += -wrap3 + BPP;
572             lum += -wrap + 1;
573         }
574         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581             YUVA_IN(y, u, v, a, p + BPP, pal);
582             u1 += u;
583             v1 += v;
584             a1 += a;
585             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586             p += wrap3;
587             lum += wrap;
588
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595             YUVA_IN(y, u, v, a, p + BPP, pal);
596             u1 += u;
597             v1 += v;
598             a1 += a;
599             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604             cb++;
605             cr++;
606             p += -wrap3 + 2 * BPP;
607             lum += -wrap + 2;
608         }
609         if (w) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             p += wrap3;
616             lum += wrap;
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624             cb++;
625             cr++;
626             p += -wrap3 + BPP;
627             lum += -wrap + 1;
628         }
629         p += wrap3 + (wrap3 - dstw * BPP);
630         lum += wrap + (wrap - dstw - dstx);
631         cb += dst->linesize[1] - width2 - skip2;
632         cr += dst->linesize[2] - width2 - skip2;
633     }
634     /* handle odd height */
635     if (h) {
636         lum += dstx;
637         cb += skip2;
638         cr += skip2;
639
640         if (dstx & 1) {
641             YUVA_IN(y, u, v, a, p, pal);
642             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645             cb++;
646             cr++;
647             lum++;
648             p += BPP;
649         }
650         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 = u;
653             v1 = v;
654             a1 = a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657             YUVA_IN(y, u, v, a, p + BPP, pal);
658             u1 += u;
659             v1 += v;
660             a1 += a;
661             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664             cb++;
665             cr++;
666             p += 2 * BPP;
667             lum += 2;
668         }
669         if (w) {
670             YUVA_IN(y, u, v, a, p, pal);
671             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674         }
675     }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680     avsubtitle_free(&sp->sub);
681 }
682
683 static void video_image_display(VideoState *is)
684 {
685     VideoPicture *vp;
686     SubPicture *sp;
687     AVPicture pict;
688     float aspect_ratio;
689     int width, height, x, y;
690     SDL_Rect rect;
691     int i;
692
693     vp = &is->pictq[is->pictq_rindex];
694     if (vp->bmp) {
695         if (vp->sample_aspect_ratio.num == 0)
696             aspect_ratio = 0;
697         else
698             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
699
700         if (aspect_ratio <= 0.0)
701             aspect_ratio = 1.0;
702         aspect_ratio *= (float)vp->width / (float)vp->height;
703
704         if (is->subtitle_st) {
705             if (is->subpq_size > 0) {
706                 sp = &is->subpq[is->subpq_rindex];
707
708                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
709                     SDL_LockYUVOverlay (vp->bmp);
710
711                     pict.data[0] = vp->bmp->pixels[0];
712                     pict.data[1] = vp->bmp->pixels[2];
713                     pict.data[2] = vp->bmp->pixels[1];
714
715                     pict.linesize[0] = vp->bmp->pitches[0];
716                     pict.linesize[1] = vp->bmp->pitches[2];
717                     pict.linesize[2] = vp->bmp->pitches[1];
718
719                     for (i = 0; i < sp->sub.num_rects; i++)
720                         blend_subrect(&pict, sp->sub.rects[i],
721                                       vp->bmp->w, vp->bmp->h);
722
723                     SDL_UnlockYUVOverlay (vp->bmp);
724                 }
725             }
726         }
727
728
729         /* XXX: we suppose the screen has a 1.0 pixel ratio */
730         height = is->height;
731         width = ((int)rint(height * aspect_ratio)) & ~1;
732         if (width > is->width) {
733             width = is->width;
734             height = ((int)rint(width / aspect_ratio)) & ~1;
735         }
736         x = (is->width - width) / 2;
737         y = (is->height - height) / 2;
738         is->no_background = 0;
739         rect.x = is->xleft + x;
740         rect.y = is->ytop  + y;
741         rect.w = FFMAX(width,  1);
742         rect.h = FFMAX(height, 1);
743         SDL_DisplayYUVOverlay(vp->bmp, &rect);
744     }
745 }
746
747 static inline int compute_mod(int a, int b)
748 {
749     return a < 0 ? a%b + b : a%b;
750 }
751
752 static void video_audio_display(VideoState *s)
753 {
754     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
755     int ch, channels, h, h2, bgcolor, fgcolor;
756     int16_t time_diff;
757     int rdft_bits, nb_freq;
758
759     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
760         ;
761     nb_freq = 1 << (rdft_bits - 1);
762
763     /* compute display index : center on currently output samples */
764     channels = s->audio_tgt_channels;
765     nb_display_channels = channels;
766     if (!s->paused) {
767         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
768         n = 2 * channels;
769         delay = s->audio_write_buf_size;
770         delay /= n;
771
772         /* to be more precise, we take into account the time spent since
773            the last buffer computation */
774         if (audio_callback_time) {
775             time_diff = av_gettime() - audio_callback_time;
776             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
777         }
778
779         delay += 2 * data_used;
780         if (delay < data_used)
781             delay = data_used;
782
783         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
784         if (s->show_mode == SHOW_MODE_WAVES) {
785             h = INT_MIN;
786             for (i = 0; i < 1000; i += channels) {
787                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
788                 int a = s->sample_array[idx];
789                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
790                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
791                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
792                 int score = a - d;
793                 if (h < score && (b ^ c) < 0) {
794                     h = score;
795                     i_start = idx;
796                 }
797             }
798         }
799
800         s->last_i_start = i_start;
801     } else {
802         i_start = s->last_i_start;
803     }
804
805     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
806     if (s->show_mode == SHOW_MODE_WAVES) {
807         fill_rectangle(screen,
808                        s->xleft, s->ytop, s->width, s->height,
809                        bgcolor);
810
811         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
812
813         /* total height for one channel */
814         h = s->height / nb_display_channels;
815         /* graph height / 2 */
816         h2 = (h * 9) / 20;
817         for (ch = 0; ch < nb_display_channels; ch++) {
818             i = i_start + ch;
819             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
820             for (x = 0; x < s->width; x++) {
821                 y = (s->sample_array[i] * h2) >> 15;
822                 if (y < 0) {
823                     y = -y;
824                     ys = y1 - y;
825                 } else {
826                     ys = y1;
827                 }
828                 fill_rectangle(screen,
829                                s->xleft + x, ys, 1, y,
830                                fgcolor);
831                 i += channels;
832                 if (i >= SAMPLE_ARRAY_SIZE)
833                     i -= SAMPLE_ARRAY_SIZE;
834             }
835         }
836
837         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
838
839         for (ch = 1; ch < nb_display_channels; ch++) {
840             y = s->ytop + ch * h;
841             fill_rectangle(screen,
842                            s->xleft, y, s->width, 1,
843                            fgcolor);
844         }
845         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
846     } else {
847         nb_display_channels= FFMIN(nb_display_channels, 2);
848         if (rdft_bits != s->rdft_bits) {
849             av_rdft_end(s->rdft);
850             av_free(s->rdft_data);
851             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
852             s->rdft_bits = rdft_bits;
853             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
854         }
855         {
856             FFTSample *data[2];
857             for (ch = 0; ch < nb_display_channels; ch++) {
858                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
859                 i = i_start + ch;
860                 for (x = 0; x < 2 * nb_freq; x++) {
861                     double w = (x-nb_freq) * (1.0 / nb_freq);
862                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
863                     i += channels;
864                     if (i >= SAMPLE_ARRAY_SIZE)
865                         i -= SAMPLE_ARRAY_SIZE;
866                 }
867                 av_rdft_calc(s->rdft, data[ch]);
868             }
869             // least efficient way to do this, we should of course directly access it but its more than fast enough
870             for (y = 0; y < s->height; y++) {
871                 double w = 1 / sqrt(nb_freq);
872                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
873                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
874                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
875                 a = FFMIN(a, 255);
876                 b = FFMIN(b, 255);
877                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
878
879                 fill_rectangle(screen,
880                             s->xpos, s->height-y, 1, 1,
881                             fgcolor);
882             }
883         }
884         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
885         if (!s->paused)
886             s->xpos++;
887         if (s->xpos >= s->width)
888             s->xpos= s->xleft;
889     }
890 }
891
892 static void stream_close(VideoState *is)
893 {
894     VideoPicture *vp;
895     int i;
896     /* XXX: use a special url_shutdown call to abort parse cleanly */
897     is->abort_request = 1;
898     SDL_WaitThread(is->read_tid, NULL);
899     SDL_WaitThread(is->refresh_tid, NULL);
900     packet_queue_destroy(&is->videoq);
901     packet_queue_destroy(&is->audioq);
902     packet_queue_destroy(&is->subtitleq);
903
904     /* free all pictures */
905     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
906         vp = &is->pictq[i];
907 #if CONFIG_AVFILTER
908         if (vp->picref) {
909             avfilter_unref_buffer(vp->picref);
910             vp->picref = NULL;
911         }
912 #endif
913         if (vp->bmp) {
914             SDL_FreeYUVOverlay(vp->bmp);
915             vp->bmp = NULL;
916         }
917     }
918     SDL_DestroyMutex(is->pictq_mutex);
919     SDL_DestroyCond(is->pictq_cond);
920     SDL_DestroyMutex(is->subpq_mutex);
921     SDL_DestroyCond(is->subpq_cond);
922 #if !CONFIG_AVFILTER
923     if (is->img_convert_ctx)
924         sws_freeContext(is->img_convert_ctx);
925 #endif
926     av_free(is);
927 }
928
929 static void do_exit(VideoState *is)
930 {
931     if (is) {
932         stream_close(is);
933     }
934     av_lockmgr_register(NULL);
935     uninit_opts();
936 #if CONFIG_AVFILTER
937     avfilter_uninit();
938 #endif
939     avformat_network_deinit();
940     if (show_status)
941         printf("\n");
942     SDL_Quit();
943     av_log(NULL, AV_LOG_QUIET, "%s", "");
944     exit(0);
945 }
946
947 static void sigterm_handler(int sig)
948 {
949     exit(123);
950 }
951
952 static int video_open(VideoState *is, int force_set_video_mode)
953 {
954     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
955     int w,h;
956     VideoPicture *vp = &is->pictq[is->pictq_rindex];
957
958     if (is_full_screen) flags |= SDL_FULLSCREEN;
959     else                flags |= SDL_RESIZABLE;
960
961     if (is_full_screen && fs_screen_width) {
962         w = fs_screen_width;
963         h = fs_screen_height;
964     } else if (!is_full_screen && screen_width) {
965         w = screen_width;
966         h = screen_height;
967     } else if (vp->width) {
968         w = vp->width;
969         h = vp->height;
970     } else {
971         w = 640;
972         h = 480;
973     }
974     if (screen && is->width == screen->w && screen->w == w
975        && is->height== screen->h && screen->h == h && !force_set_video_mode)
976         return 0;
977     screen = SDL_SetVideoMode(w, h, 0, flags);
978     if (!screen) {
979         fprintf(stderr, "SDL: could not set video mode - exiting\n");
980         do_exit(is);
981     }
982     if (!window_title)
983         window_title = input_filename;
984     SDL_WM_SetCaption(window_title, window_title);
985
986     is->width  = screen->w;
987     is->height = screen->h;
988
989     return 0;
990 }
991
992 /* display the current picture, if any */
993 static void video_display(VideoState *is)
994 {
995     if (!screen)
996         video_open(is, 0);
997     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
998         video_audio_display(is);
999     else if (is->video_st)
1000         video_image_display(is);
1001 }
1002
1003 static int refresh_thread(void *opaque)
1004 {
1005     VideoState *is= opaque;
1006     while (!is->abort_request) {
1007         SDL_Event event;
1008         event.type = FF_REFRESH_EVENT;
1009         event.user.data1 = opaque;
1010         if (!is->refresh && (!is->paused || is->force_refresh)) {
1011             is->refresh = 1;
1012             SDL_PushEvent(&event);
1013         }
1014         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1015         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1016     }
1017     return 0;
1018 }
1019
1020 /* get the current audio clock value */
1021 static double get_audio_clock(VideoState *is)
1022 {
1023     if (is->paused) {
1024         return is->audio_current_pts;
1025     } else {
1026         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1027     }
1028 }
1029
1030 /* get the current video clock value */
1031 static double get_video_clock(VideoState *is)
1032 {
1033     if (is->paused) {
1034         return is->video_current_pts;
1035     } else {
1036         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1037     }
1038 }
1039
1040 /* get the current external clock value */
1041 static double get_external_clock(VideoState *is)
1042 {
1043     int64_t ti;
1044     ti = av_gettime();
1045     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1046 }
1047
1048 /* get the current master clock value */
1049 static double get_master_clock(VideoState *is)
1050 {
1051     double val;
1052
1053     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1054         if (is->video_st)
1055             val = get_video_clock(is);
1056         else
1057             val = get_audio_clock(is);
1058     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1059         if (is->audio_st)
1060             val = get_audio_clock(is);
1061         else
1062             val = get_video_clock(is);
1063     } else {
1064         val = get_external_clock(is);
1065     }
1066     return val;
1067 }
1068
1069 /* seek in the stream */
1070 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1071 {
1072     if (!is->seek_req) {
1073         is->seek_pos = pos;
1074         is->seek_rel = rel;
1075         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1076         if (seek_by_bytes)
1077             is->seek_flags |= AVSEEK_FLAG_BYTE;
1078         is->seek_req = 1;
1079     }
1080 }
1081
1082 /* pause or resume the video */
1083 static void stream_toggle_pause(VideoState *is)
1084 {
1085     if (is->paused) {
1086         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1087         if (is->read_pause_return != AVERROR(ENOSYS)) {
1088             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1089         }
1090         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1091     }
1092     is->paused = !is->paused;
1093 }
1094
1095 static double compute_target_delay(double delay, VideoState *is)
1096 {
1097     double sync_threshold, diff;
1098
1099     /* update delay to follow master synchronisation source */
1100     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1101          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1102         /* if video is slave, we try to correct big delays by
1103            duplicating or deleting a frame */
1104         diff = get_video_clock(is) - get_master_clock(is);
1105
1106         /* skip or repeat frame. We take into account the
1107            delay to compute the threshold. I still don't know
1108            if it is the best guess */
1109         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1110         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1111             if (diff <= -sync_threshold)
1112                 delay = 0;
1113             else if (diff >= sync_threshold)
1114                 delay = 2 * delay;
1115         }
1116     }
1117
1118     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1119             delay, -diff);
1120
1121     return delay;
1122 }
1123
1124 static void pictq_next_picture(VideoState *is) {
1125     /* update queue size and signal for next picture */
1126     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1127         is->pictq_rindex = 0;
1128
1129     SDL_LockMutex(is->pictq_mutex);
1130     is->pictq_size--;
1131     SDL_CondSignal(is->pictq_cond);
1132     SDL_UnlockMutex(is->pictq_mutex);
1133 }
1134
1135 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1136     double time = av_gettime() / 1000000.0;
1137     /* update current video pts */
1138     is->video_current_pts = pts;
1139     is->video_current_pts_drift = is->video_current_pts - time;
1140     is->video_current_pos = pos;
1141     is->frame_last_pts = pts;
1142 }
1143
1144 /* called to display each frame */
1145 static void video_refresh(void *opaque)
1146 {
1147     VideoState *is = opaque;
1148     VideoPicture *vp;
1149     double time;
1150
1151     SubPicture *sp, *sp2;
1152
1153     if (is->video_st) {
1154 retry:
1155         if (is->pictq_size == 0) {
1156             SDL_LockMutex(is->pictq_mutex);
1157             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1158                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1159                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1160             }
1161             SDL_UnlockMutex(is->pictq_mutex);
1162             // nothing to do, no picture to display in the que
1163         } else {
1164             double last_duration, duration, delay;
1165             /* dequeue the picture */
1166             vp = &is->pictq[is->pictq_rindex];
1167
1168             if (vp->skip) {
1169                 pictq_next_picture(is);
1170                 goto retry;
1171             }
1172
1173             if (is->paused)
1174                 goto display;
1175
1176             /* compute nominal last_duration */
1177             last_duration = vp->pts - is->frame_last_pts;
1178             if (last_duration > 0 && last_duration < 10.0) {
1179                 /* if duration of the last frame was sane, update last_duration in video state */
1180                 is->frame_last_duration = last_duration;
1181             }
1182             delay = compute_target_delay(is->frame_last_duration, is);
1183
1184             time= av_gettime()/1000000.0;
1185             if (time < is->frame_timer + delay)
1186                 return;
1187
1188             if (delay > 0)
1189                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1190
1191             SDL_LockMutex(is->pictq_mutex);
1192             update_video_pts(is, vp->pts, vp->pos);
1193             SDL_UnlockMutex(is->pictq_mutex);
1194
1195             if (is->pictq_size > 1) {
1196                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1197                 duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1198             } else {
1199                 duration = vp->duration;
1200             }
1201
1202             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1203                 if(is->pictq_size > 1){
1204                     is->frame_drops_late++;
1205                     pictq_next_picture(is);
1206                     goto retry;
1207                 }
1208             }
1209
1210             if (is->subtitle_st) {
1211                 if (is->subtitle_stream_changed) {
1212                     SDL_LockMutex(is->subpq_mutex);
1213
1214                     while (is->subpq_size) {
1215                         free_subpicture(&is->subpq[is->subpq_rindex]);
1216
1217                         /* update queue size and signal for next picture */
1218                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1219                             is->subpq_rindex = 0;
1220
1221                         is->subpq_size--;
1222                     }
1223                     is->subtitle_stream_changed = 0;
1224
1225                     SDL_CondSignal(is->subpq_cond);
1226                     SDL_UnlockMutex(is->subpq_mutex);
1227                 } else {
1228                     if (is->subpq_size > 0) {
1229                         sp = &is->subpq[is->subpq_rindex];
1230
1231                         if (is->subpq_size > 1)
1232                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1233                         else
1234                             sp2 = NULL;
1235
1236                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1237                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1238                         {
1239                             free_subpicture(sp);
1240
1241                             /* update queue size and signal for next picture */
1242                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1243                                 is->subpq_rindex = 0;
1244
1245                             SDL_LockMutex(is->subpq_mutex);
1246                             is->subpq_size--;
1247                             SDL_CondSignal(is->subpq_cond);
1248                             SDL_UnlockMutex(is->subpq_mutex);
1249                         }
1250                     }
1251                 }
1252             }
1253
1254 display:
1255             /* display picture */
1256             if (!display_disable)
1257                 video_display(is);
1258
1259             if (!is->paused)
1260                 pictq_next_picture(is);
1261         }
1262     } else if (is->audio_st) {
1263         /* draw the next audio frame */
1264
1265         /* if only audio stream, then display the audio bars (better
1266            than nothing, just to test the implementation */
1267
1268         /* display picture */
1269         if (!display_disable)
1270             video_display(is);
1271     }
1272     is->force_refresh = 0;
1273     if (show_status) {
1274         static int64_t last_time;
1275         int64_t cur_time;
1276         int aqsize, vqsize, sqsize;
1277         double av_diff;
1278
1279         cur_time = av_gettime();
1280         if (!last_time || (cur_time - last_time) >= 30000) {
1281             aqsize = 0;
1282             vqsize = 0;
1283             sqsize = 0;
1284             if (is->audio_st)
1285                 aqsize = is->audioq.size;
1286             if (is->video_st)
1287                 vqsize = is->videoq.size;
1288             if (is->subtitle_st)
1289                 sqsize = is->subtitleq.size;
1290             av_diff = 0;
1291             if (is->audio_st && is->video_st)
1292                 av_diff = get_audio_clock(is) - get_video_clock(is);
1293             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1294                    get_master_clock(is),
1295                    av_diff,
1296                    is->frame_drops_early + is->frame_drops_late,
1297                    aqsize / 1024,
1298                    vqsize / 1024,
1299                    sqsize,
1300                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1301                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1302             fflush(stdout);
1303             last_time = cur_time;
1304         }
1305     }
1306 }
1307
1308 /* allocate a picture (needs to do that in main thread to avoid
1309    potential locking problems */
1310 static void alloc_picture(AllocEventProps *event_props)
1311 {
1312     VideoState *is = event_props->is;
1313     AVFrame *frame = event_props->frame;
1314     VideoPicture *vp;
1315
1316     vp = &is->pictq[is->pictq_windex];
1317
1318     if (vp->bmp)
1319         SDL_FreeYUVOverlay(vp->bmp);
1320
1321 #if CONFIG_AVFILTER
1322     if (vp->picref)
1323         avfilter_unref_buffer(vp->picref);
1324     vp->picref = NULL;
1325 #endif
1326
1327     vp->width   = frame->width;
1328     vp->height  = frame->height;
1329     vp->pix_fmt = frame->format;
1330
1331     video_open(event_props->is, 0);
1332
1333     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1334                                    SDL_YV12_OVERLAY,
1335                                    screen);
1336     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1337         /* SDL allocates a buffer smaller than requested if the video
1338          * overlay hardware is unable to support the requested size. */
1339         fprintf(stderr, "Error: the video system does not support an image\n"
1340                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1341                         "to reduce the image size.\n", vp->width, vp->height );
1342         do_exit(is);
1343     }
1344
1345     SDL_LockMutex(is->pictq_mutex);
1346     vp->allocated = 1;
1347     SDL_CondSignal(is->pictq_cond);
1348     SDL_UnlockMutex(is->pictq_mutex);
1349 }
1350
1351 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1352 {
1353     VideoPicture *vp;
1354     double frame_delay, pts = pts1;
1355
1356     /* compute the exact PTS for the picture if it is omitted in the stream
1357      * pts1 is the dts of the pkt / pts of the frame */
1358     if (pts != 0) {
1359         /* update video clock with pts, if present */
1360         is->video_clock = pts;
1361     } else {
1362         pts = is->video_clock;
1363     }
1364     /* update video clock for next frame */
1365     frame_delay = av_q2d(is->video_st->codec->time_base);
1366     /* for MPEG2, the frame can be repeated, so we update the
1367        clock accordingly */
1368     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1369     is->video_clock += frame_delay;
1370
1371 #if defined(DEBUG_SYNC) && 0
1372     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1373            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1374 #endif
1375
1376     /* wait until we have space to put a new picture */
1377     SDL_LockMutex(is->pictq_mutex);
1378
1379     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1380            !is->videoq.abort_request) {
1381         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1382     }
1383     SDL_UnlockMutex(is->pictq_mutex);
1384
1385     if (is->videoq.abort_request)
1386         return -1;
1387
1388     vp = &is->pictq[is->pictq_windex];
1389
1390     vp->duration = frame_delay;
1391
1392     /* alloc or resize hardware picture buffer */
1393     if (!vp->bmp || vp->reallocate ||
1394         vp->width  != src_frame->width ||
1395         vp->height != src_frame->height) {
1396         SDL_Event event;
1397         AllocEventProps event_props;
1398
1399         event_props.frame = src_frame;
1400         event_props.is = is;
1401
1402         vp->allocated  = 0;
1403         vp->reallocate = 0;
1404
1405         /* the allocation must be done in the main thread to avoid
1406            locking problems. We wait in this block for the event to complete,
1407            so we can pass a pointer to event_props to it. */
1408         event.type = FF_ALLOC_EVENT;
1409         event.user.data1 = &event_props;
1410         SDL_PushEvent(&event);
1411
1412         /* wait until the picture is allocated */
1413         SDL_LockMutex(is->pictq_mutex);
1414         while (!vp->allocated && !is->videoq.abort_request) {
1415             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1416         }
1417         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1418         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1419             while (!vp->allocated) {
1420                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1421             }
1422         }
1423         SDL_UnlockMutex(is->pictq_mutex);
1424
1425         if (is->videoq.abort_request)
1426             return -1;
1427     }
1428
1429     /* if the frame is not skipped, then display it */
1430     if (vp->bmp) {
1431         AVPicture pict = { { 0 } };
1432 #if CONFIG_AVFILTER
1433         if (vp->picref)
1434             avfilter_unref_buffer(vp->picref);
1435         vp->picref = src_frame->opaque;
1436 #endif
1437
1438         /* get a pointer on the bitmap */
1439         SDL_LockYUVOverlay (vp->bmp);
1440
1441         pict.data[0] = vp->bmp->pixels[0];
1442         pict.data[1] = vp->bmp->pixels[2];
1443         pict.data[2] = vp->bmp->pixels[1];
1444
1445         pict.linesize[0] = vp->bmp->pitches[0];
1446         pict.linesize[1] = vp->bmp->pitches[2];
1447         pict.linesize[2] = vp->bmp->pitches[1];
1448
1449 #if CONFIG_AVFILTER
1450         // FIXME use direct rendering
1451         av_picture_copy(&pict, (AVPicture *)src_frame,
1452                         vp->pix_fmt, vp->width, vp->height);
1453         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1454 #else
1455         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1456         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1457             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1458             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1459         if (is->img_convert_ctx == NULL) {
1460             fprintf(stderr, "Cannot initialize the conversion context\n");
1461             exit(1);
1462         }
1463         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1464                   0, vp->height, pict.data, pict.linesize);
1465         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1466 #endif
1467         /* update the bitmap content */
1468         SDL_UnlockYUVOverlay(vp->bmp);
1469
1470         vp->pts = pts;
1471         vp->pos = pos;
1472         vp->skip = 0;
1473
1474         /* now we can update the picture count */
1475         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1476             is->pictq_windex = 0;
1477         SDL_LockMutex(is->pictq_mutex);
1478         is->pictq_size++;
1479         SDL_UnlockMutex(is->pictq_mutex);
1480     }
1481     return 0;
1482 }
1483
1484 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1485 {
1486     int got_picture, i;
1487
1488     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1489         return -1;
1490
1491     if (pkt->data == flush_pkt.data) {
1492         avcodec_flush_buffers(is->video_st->codec);
1493
1494         SDL_LockMutex(is->pictq_mutex);
1495         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1496         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1497             is->pictq[i].skip = 1;
1498         }
1499         while (is->pictq_size && !is->videoq.abort_request) {
1500             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1501         }
1502         is->video_current_pos = -1;
1503         is->frame_last_pts = AV_NOPTS_VALUE;
1504         is->frame_last_duration = 0;
1505         is->frame_timer = (double)av_gettime() / 1000000.0;
1506         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1507         SDL_UnlockMutex(is->pictq_mutex);
1508
1509         return 0;
1510     }
1511
1512     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1513
1514     if (got_picture) {
1515         int ret = 1;
1516
1517         if (decoder_reorder_pts == -1) {
1518             *pts = av_frame_get_best_effort_timestamp(frame);
1519         } else if (decoder_reorder_pts) {
1520             *pts = frame->pkt_pts;
1521         } else {
1522             *pts = frame->pkt_dts;
1523         }
1524
1525         if (*pts == AV_NOPTS_VALUE) {
1526             *pts = 0;
1527         }
1528
1529         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1530              (framedrop>0 || (framedrop && is->audio_st))) {
1531             SDL_LockMutex(is->pictq_mutex);
1532             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1533                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1534                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1535                 double ptsdiff = dpts - is->frame_last_pts;
1536                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1537                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1538                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1539                     is->frame_last_dropped_pos = pkt->pos;
1540                     is->frame_last_dropped_pts = dpts;
1541                     is->frame_drops_early++;
1542                     ret = 0;
1543                 }
1544             }
1545             SDL_UnlockMutex(is->pictq_mutex);
1546         }
1547
1548         if (ret)
1549             is->frame_last_returned_time = av_gettime() / 1000000.0;
1550
1551         return ret;
1552     }
1553     return 0;
1554 }
1555
1556 #if CONFIG_AVFILTER
1557 typedef struct {
1558     VideoState *is;
1559     AVFrame *frame;
1560     int use_dr1;
1561 } FilterPriv;
1562
1563 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1564 {
1565     AVFilterContext *ctx = codec->opaque;
1566     AVFilterBufferRef  *ref;
1567     int perms = AV_PERM_WRITE;
1568     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1569     unsigned edge;
1570     int pixel_size;
1571
1572     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1573
1574     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1575         perms |= AV_PERM_NEG_LINESIZES;
1576
1577     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1578         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1579         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1580         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1581     }
1582     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1583
1584     w = codec->width;
1585     h = codec->height;
1586
1587     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1588         return -1;
1589
1590     avcodec_align_dimensions2(codec, &w, &h, stride);
1591     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1592     w += edge << 1;
1593     h += edge << 1;
1594     if (codec->pix_fmt != ctx->outputs[0]->format) {
1595         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1596         return -1;
1597     }
1598     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1599         return -1;
1600
1601     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1602     ref->video->w = codec->width;
1603     ref->video->h = codec->height;
1604     for (i = 0; i < 4; i ++) {
1605         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1606         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1607
1608         pic->base[i]     = ref->data[i];
1609         if (ref->data[i]) {
1610             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1611         }
1612         pic->data[i]     = ref->data[i];
1613         pic->linesize[i] = ref->linesize[i];
1614     }
1615     pic->opaque = ref;
1616     pic->type   = FF_BUFFER_TYPE_USER;
1617     pic->reordered_opaque = codec->reordered_opaque;
1618     pic->width               = codec->width;
1619     pic->height              = codec->height;
1620     pic->format              = codec->pix_fmt;
1621     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1622     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1623     else            pic->pkt_pts = AV_NOPTS_VALUE;
1624     return 0;
1625 }
1626
1627 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1628 {
1629     memset(pic->data, 0, sizeof(pic->data));
1630     avfilter_unref_buffer(pic->opaque);
1631 }
1632
1633 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1634 {
1635     AVFilterBufferRef *ref = pic->opaque;
1636
1637     if (pic->data[0] == NULL) {
1638         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1639         return codec->get_buffer(codec, pic);
1640     }
1641
1642     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1643         (codec->pix_fmt != ref->format)) {
1644         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1645         return -1;
1646     }
1647
1648     pic->reordered_opaque = codec->reordered_opaque;
1649     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1650     else            pic->pkt_pts = AV_NOPTS_VALUE;
1651     return 0;
1652 }
1653
1654 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1655 {
1656     FilterPriv *priv = ctx->priv;
1657     AVCodecContext *codec;
1658     if (!opaque) return -1;
1659
1660     priv->is = opaque;
1661     codec    = priv->is->video_st->codec;
1662     codec->opaque = ctx;
1663     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1664         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1665         priv->use_dr1 = 1;
1666         codec->get_buffer     = input_get_buffer;
1667         codec->release_buffer = input_release_buffer;
1668         codec->reget_buffer   = input_reget_buffer;
1669         codec->thread_safe_callbacks = 1;
1670     }
1671
1672     priv->frame = avcodec_alloc_frame();
1673
1674     return 0;
1675 }
1676
1677 static void input_uninit(AVFilterContext *ctx)
1678 {
1679     FilterPriv *priv = ctx->priv;
1680     av_free(priv->frame);
1681 }
1682
1683 static int input_request_frame(AVFilterLink *link)
1684 {
1685     FilterPriv *priv = link->src->priv;
1686     AVFilterBufferRef *picref;
1687     int64_t pts = 0;
1688     AVPacket pkt;
1689     int ret;
1690
1691     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1692         av_free_packet(&pkt);
1693     if (ret < 0)
1694         return -1;
1695
1696     if (priv->use_dr1 && priv->frame->opaque) {
1697         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1698     } else {
1699         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
1700         av_image_copy(picref->data, picref->linesize,
1701                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1702                       picref->format, priv->frame->width, priv->frame->height);
1703     }
1704     av_free_packet(&pkt);
1705
1706     avfilter_copy_frame_props(picref, priv->frame);
1707     picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
1708     picref->pts = pts;
1709
1710     avfilter_start_frame(link, picref);
1711     avfilter_draw_slice(link, 0, picref->video->h, 1);
1712     avfilter_end_frame(link);
1713
1714     return 0;
1715 }
1716
1717 static int input_query_formats(AVFilterContext *ctx)
1718 {
1719     FilterPriv *priv = ctx->priv;
1720     enum PixelFormat pix_fmts[] = {
1721         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1722     };
1723
1724     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1725     return 0;
1726 }
1727
1728 static int input_config_props(AVFilterLink *link)
1729 {
1730     FilterPriv *priv  = link->src->priv;
1731     AVStream *s = priv->is->video_st;
1732
1733     link->w = s->codec->width;
1734     link->h = s->codec->height;
1735     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1736         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1737     link->time_base = s->time_base;
1738
1739     return 0;
1740 }
1741
1742 static AVFilter input_filter =
1743 {
1744     .name      = "ffplay_input",
1745
1746     .priv_size = sizeof(FilterPriv),
1747
1748     .init      = input_init,
1749     .uninit    = input_uninit,
1750
1751     .query_formats = input_query_formats,
1752
1753     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1754     .outputs   = (AVFilterPad[]) {{ .name = "default",
1755                                     .type = AVMEDIA_TYPE_VIDEO,
1756                                     .request_frame = input_request_frame,
1757                                     .config_props  = input_config_props, },
1758                                   { .name = NULL }},
1759 };
1760
1761 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1762 {
1763     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1764     char sws_flags_str[128];
1765     int ret;
1766     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1767     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
1768     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1769     graph->scale_sws_opts = av_strdup(sws_flags_str);
1770
1771     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1772                                             NULL, is, graph)) < 0)
1773         return ret;
1774
1775 #if FF_API_OLD_VSINK_API
1776     ret = avfilter_graph_create_filter(&filt_out,
1777                                        avfilter_get_by_name("buffersink"),
1778                                        "out", NULL, pix_fmts, graph);
1779 #else
1780     buffersink_params->pixel_fmts = pix_fmts;
1781     ret = avfilter_graph_create_filter(&filt_out,
1782                                        avfilter_get_by_name("buffersink"),
1783                                        "out", NULL, buffersink_params, graph);
1784 #endif
1785     av_freep(&buffersink_params);
1786     if (ret < 0)
1787         return ret;
1788
1789     if ((ret = avfilter_graph_create_filter(&filt_format,
1790                                             avfilter_get_by_name("format"),
1791                                             "format", "yuv420p", NULL, graph)) < 0)
1792         return ret;
1793     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1794         return ret;
1795
1796
1797     if (vfilters) {
1798         AVFilterInOut *outputs = avfilter_inout_alloc();
1799         AVFilterInOut *inputs  = avfilter_inout_alloc();
1800
1801         outputs->name    = av_strdup("in");
1802         outputs->filter_ctx = filt_src;
1803         outputs->pad_idx = 0;
1804         outputs->next    = NULL;
1805
1806         inputs->name    = av_strdup("out");
1807         inputs->filter_ctx = filt_format;
1808         inputs->pad_idx = 0;
1809         inputs->next    = NULL;
1810
1811         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1812             return ret;
1813     } else {
1814         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1815             return ret;
1816     }
1817
1818     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1819         return ret;
1820
1821     is->out_video_filter = filt_out;
1822
1823     return ret;
1824 }
1825
1826 #endif  /* CONFIG_AVFILTER */
1827
1828 static int video_thread(void *arg)
1829 {
1830     VideoState *is = arg;
1831     AVFrame *frame = avcodec_alloc_frame();
1832     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1833     double pts;
1834     int ret;
1835
1836 #if CONFIG_AVFILTER
1837     AVFilterGraph *graph = avfilter_graph_alloc();
1838     AVFilterContext *filt_out = NULL;
1839     int last_w = is->video_st->codec->width;
1840     int last_h = is->video_st->codec->height;
1841
1842     if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1843         SDL_Event event;
1844         event.type = FF_QUIT_EVENT;
1845         event.user.data1 = is;
1846         SDL_PushEvent(&event);
1847         goto the_end;
1848     }
1849     filt_out = is->out_video_filter;
1850 #endif
1851
1852     for (;;) {
1853 #if !CONFIG_AVFILTER
1854         AVPacket pkt;
1855 #else
1856         AVFilterBufferRef *picref;
1857         AVRational tb = filt_out->inputs[0]->time_base;
1858 #endif
1859         while (is->paused && !is->videoq.abort_request)
1860             SDL_Delay(10);
1861 #if CONFIG_AVFILTER
1862         if (   last_w != is->video_st->codec->width
1863             || last_h != is->video_st->codec->height) {
1864             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1865                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1866             avfilter_graph_free(&graph);
1867             graph = avfilter_graph_alloc();
1868             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1869                 goto the_end;
1870             filt_out = is->out_video_filter;
1871             last_w = is->video_st->codec->width;
1872             last_h = is->video_st->codec->height;
1873         }
1874         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1875         if (picref) {
1876             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1877             pts_int = picref->pts;
1878             tb      = filt_out->inputs[0]->time_base;
1879             pos     = picref->pos;
1880             frame->opaque = picref;
1881
1882             ret = 1;
1883         }
1884
1885         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1886             av_unused int64_t pts1 = pts_int;
1887             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1888             av_dlog(NULL, "video_thread(): "
1889                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1890                     tb.num, tb.den, pts1,
1891                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1892         }
1893 #else
1894         ret = get_video_frame(is, frame, &pts_int, &pkt);
1895         pos = pkt.pos;
1896         av_free_packet(&pkt);
1897         if (ret == 0)
1898             continue;
1899 #endif
1900
1901         if (ret < 0)
1902             goto the_end;
1903
1904         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1905         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1906             is->frame_last_filter_delay = 0;
1907
1908 #if CONFIG_AVFILTER
1909         if (!picref)
1910             continue;
1911 #endif
1912
1913         pts = pts_int * av_q2d(is->video_st->time_base);
1914
1915         ret = queue_picture(is, frame, pts, pos);
1916
1917         if (ret < 0)
1918             goto the_end;
1919
1920         if (is->step)
1921             stream_toggle_pause(is);
1922     }
1923  the_end:
1924     avcodec_flush_buffers(is->video_st->codec);
1925 #if CONFIG_AVFILTER
1926     av_freep(&vfilters);
1927     avfilter_graph_free(&graph);
1928 #endif
1929     av_free(frame);
1930     return 0;
1931 }
1932
1933 static int subtitle_thread(void *arg)
1934 {
1935     VideoState *is = arg;
1936     SubPicture *sp;
1937     AVPacket pkt1, *pkt = &pkt1;
1938     int got_subtitle;
1939     double pts;
1940     int i, j;
1941     int r, g, b, y, u, v, a;
1942
1943     for (;;) {
1944         while (is->paused && !is->subtitleq.abort_request) {
1945             SDL_Delay(10);
1946         }
1947         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1948             break;
1949
1950         if (pkt->data == flush_pkt.data) {
1951             avcodec_flush_buffers(is->subtitle_st->codec);
1952             continue;
1953         }
1954         SDL_LockMutex(is->subpq_mutex);
1955         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1956                !is->subtitleq.abort_request) {
1957             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1958         }
1959         SDL_UnlockMutex(is->subpq_mutex);
1960
1961         if (is->subtitleq.abort_request)
1962             return 0;
1963
1964         sp = &is->subpq[is->subpq_windex];
1965
1966        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1967            this packet, if any */
1968         pts = 0;
1969         if (pkt->pts != AV_NOPTS_VALUE)
1970             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1971
1972         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1973                                  &got_subtitle, pkt);
1974
1975         if (got_subtitle && sp->sub.format == 0) {
1976             sp->pts = pts;
1977
1978             for (i = 0; i < sp->sub.num_rects; i++)
1979             {
1980                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1981                 {
1982                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1983                     y = RGB_TO_Y_CCIR(r, g, b);
1984                     u = RGB_TO_U_CCIR(r, g, b, 0);
1985                     v = RGB_TO_V_CCIR(r, g, b, 0);
1986                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1987                 }
1988             }
1989
1990             /* now we can update the picture count */
1991             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1992                 is->subpq_windex = 0;
1993             SDL_LockMutex(is->subpq_mutex);
1994             is->subpq_size++;
1995             SDL_UnlockMutex(is->subpq_mutex);
1996         }
1997         av_free_packet(pkt);
1998     }
1999     return 0;
2000 }
2001
2002 /* copy samples for viewing in editor window */
2003 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2004 {
2005     int size, len;
2006
2007     size = samples_size / sizeof(short);
2008     while (size > 0) {
2009         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2010         if (len > size)
2011             len = size;
2012         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2013         samples += len;
2014         is->sample_array_index += len;
2015         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2016             is->sample_array_index = 0;
2017         size -= len;
2018     }
2019 }
2020
2021 /* return the wanted number of samples to get better sync if sync_type is video
2022  * or external master clock */
2023 static int synchronize_audio(VideoState *is, int nb_samples)
2024 {
2025     int wanted_nb_samples = nb_samples;
2026
2027     /* if not master, then we try to remove or add samples to correct the clock */
2028     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2029          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2030         double diff, avg_diff;
2031         int min_nb_samples, max_nb_samples;
2032
2033         diff = get_audio_clock(is) - get_master_clock(is);
2034
2035         if (diff < AV_NOSYNC_THRESHOLD) {
2036             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2037             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2038                 /* not enough measures to have a correct estimate */
2039                 is->audio_diff_avg_count++;
2040             } else {
2041                 /* estimate the A-V difference */
2042                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2043
2044                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2045                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
2046                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2047                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2048                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2049                 }
2050                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2051                         diff, avg_diff, wanted_nb_samples - nb_samples,
2052                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2053             }
2054         } else {
2055             /* too big difference : may be initial PTS errors, so
2056                reset A-V filter */
2057             is->audio_diff_avg_count = 0;
2058             is->audio_diff_cum       = 0;
2059         }
2060     }
2061
2062     return wanted_nb_samples;
2063 }
2064
2065 /* decode one audio frame and returns its uncompressed size */
2066 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2067 {
2068     AVPacket *pkt_temp = &is->audio_pkt_temp;
2069     AVPacket *pkt = &is->audio_pkt;
2070     AVCodecContext *dec = is->audio_st->codec;
2071     int len1, len2, data_size, resampled_data_size;
2072     int64_t dec_channel_layout;
2073     int got_frame;
2074     double pts;
2075     int new_packet = 0;
2076     int flush_complete = 0;
2077     int wanted_nb_samples;
2078
2079     for (;;) {
2080         /* NOTE: the audio packet can contain several frames */
2081         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2082             if (!is->frame) {
2083                 if (!(is->frame = avcodec_alloc_frame()))
2084                     return AVERROR(ENOMEM);
2085             } else
2086                 avcodec_get_frame_defaults(is->frame);
2087
2088             if (flush_complete)
2089                 break;
2090             new_packet = 0;
2091             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2092             if (len1 < 0) {
2093                 /* if error, we skip the frame */
2094                 pkt_temp->size = 0;
2095                 break;
2096             }
2097
2098             pkt_temp->data += len1;
2099             pkt_temp->size -= len1;
2100
2101             if (!got_frame) {
2102                 /* stop sending empty packets if the decoder is finished */
2103                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2104                     flush_complete = 1;
2105                 continue;
2106             }
2107             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2108                                                    is->frame->nb_samples,
2109                                                    dec->sample_fmt, 1);
2110
2111             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2112             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2113
2114             if (dec->sample_fmt != is->audio_src_fmt ||
2115                 dec_channel_layout != is->audio_src_channel_layout ||
2116                 dec->sample_rate != is->audio_src_freq ||
2117                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2118                 if (is->swr_ctx)
2119                     swr_free(&is->swr_ctx);
2120                 is->swr_ctx = swr_alloc_set_opts(NULL,
2121                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2122                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2123                                                  0, NULL);
2124                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2125                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2126                         dec->sample_rate,
2127                         av_get_sample_fmt_name(dec->sample_fmt),
2128                         dec->channels,
2129                         is->audio_tgt_freq,
2130                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2131                         is->audio_tgt_channels);
2132                     break;
2133                 }
2134                 is->audio_src_channel_layout = dec_channel_layout;
2135                 is->audio_src_channels = dec->channels;
2136                 is->audio_src_freq = dec->sample_rate;
2137                 is->audio_src_fmt = dec->sample_fmt;
2138             }
2139
2140             resampled_data_size = data_size;
2141             if (is->swr_ctx) {
2142                 const uint8_t *in[] = { is->frame->data[0] };
2143                 uint8_t *out[] = {is->audio_buf2};
2144                 if (wanted_nb_samples != is->frame->nb_samples) {
2145                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
2146                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
2147                         fprintf(stderr, "swr_set_compensation() failed\n");
2148                         break;
2149                     }
2150                 }
2151                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2152                                                 in, is->frame->nb_samples);
2153                 if (len2 < 0) {
2154                     fprintf(stderr, "audio_resample() failed\n");
2155                     break;
2156                 }
2157                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2158                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2159                     swr_init(is->swr_ctx);
2160                 }
2161                 is->audio_buf = is->audio_buf2;
2162                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2163             } else {
2164                 is->audio_buf = is->frame->data[0];
2165             }
2166
2167             /* if no pts, then compute it */
2168             pts = is->audio_clock;
2169             *pts_ptr = pts;
2170             is->audio_clock += (double)data_size /
2171                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2172 #ifdef DEBUG
2173             {
2174                 static double last_clock;
2175                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2176                        is->audio_clock - last_clock,
2177                        is->audio_clock, pts);
2178                 last_clock = is->audio_clock;
2179             }
2180 #endif
2181             return resampled_data_size;
2182         }
2183
2184         /* free the current packet */
2185         if (pkt->data)
2186             av_free_packet(pkt);
2187         memset(pkt_temp, 0, sizeof(*pkt_temp));
2188
2189         if (is->paused || is->audioq.abort_request) {
2190             return -1;
2191         }
2192
2193         /* read next packet */
2194         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2195             return -1;
2196
2197         if (pkt->data == flush_pkt.data) {
2198             avcodec_flush_buffers(dec);
2199             flush_complete = 0;
2200         }
2201
2202         *pkt_temp = *pkt;
2203
2204         /* if update the audio clock with the pts */
2205         if (pkt->pts != AV_NOPTS_VALUE) {
2206             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2207         }
2208     }
2209 }
2210
2211 /* prepare a new audio buffer */
2212 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2213 {
2214     VideoState *is = opaque;
2215     int audio_size, len1;
2216     int bytes_per_sec;
2217     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
2218     double pts;
2219
2220     audio_callback_time = av_gettime();
2221
2222     while (len > 0) {
2223         if (is->audio_buf_index >= is->audio_buf_size) {
2224            audio_size = audio_decode_frame(is, &pts);
2225            if (audio_size < 0) {
2226                 /* if error, just output silence */
2227                is->audio_buf      = is->silence_buf;
2228                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2229            } else {
2230                if (is->show_mode != SHOW_MODE_VIDEO)
2231                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2232                is->audio_buf_size = audio_size;
2233            }
2234            is->audio_buf_index = 0;
2235         }
2236         len1 = is->audio_buf_size - is->audio_buf_index;
2237         if (len1 > len)
2238             len1 = len;
2239         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2240         len -= len1;
2241         stream += len1;
2242         is->audio_buf_index += len1;
2243     }
2244     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2245     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2246     /* Let's assume the audio driver that is used by SDL has two periods. */
2247     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2248     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2249 }
2250
2251 /* open a given stream. Return 0 if OK */
2252 static int stream_component_open(VideoState *is, int stream_index)
2253 {
2254     AVFormatContext *ic = is->ic;
2255     AVCodecContext *avctx;
2256     AVCodec *codec;
2257     SDL_AudioSpec wanted_spec, spec;
2258     AVDictionary *opts;
2259     AVDictionaryEntry *t = NULL;
2260     int64_t wanted_channel_layout = 0;
2261     int wanted_nb_channels;
2262     const char *env;
2263
2264     if (stream_index < 0 || stream_index >= ic->nb_streams)
2265         return -1;
2266     avctx = ic->streams[stream_index]->codec;
2267
2268     codec = avcodec_find_decoder(avctx->codec_id);
2269     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2270
2271     switch(avctx->codec_type){
2272         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2273         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2274         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2275     }
2276     if (!codec)
2277         return -1;
2278
2279     avctx->workaround_bugs   = workaround_bugs;
2280     avctx->lowres            = lowres;
2281     if(avctx->lowres > codec->max_lowres){
2282         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2283                 codec->max_lowres);
2284         avctx->lowres= codec->max_lowres;
2285     }
2286     avctx->idct_algo         = idct;
2287     avctx->skip_frame        = skip_frame;
2288     avctx->skip_idct         = skip_idct;
2289     avctx->skip_loop_filter  = skip_loop_filter;
2290     avctx->error_concealment = error_concealment;
2291
2292     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2293     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2294     if(codec->capabilities & CODEC_CAP_DR1)
2295         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2296
2297     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2298         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2299         env = SDL_getenv("SDL_AUDIO_CHANNELS");
2300         if (env)
2301             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2302         if (!wanted_channel_layout) {
2303             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2304             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2305             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2306             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2307             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > 6)) {
2308                 wanted_nb_channels--;
2309                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2310             }
2311         }
2312         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2313         wanted_spec.freq = avctx->sample_rate;
2314         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2315             fprintf(stderr, "Invalid sample rate or channel count!\n");
2316             return -1;
2317         }
2318     }
2319
2320     if (!av_dict_get(opts, "threads", NULL, 0))
2321         av_dict_set(&opts, "threads", "auto", 0);
2322     if (!codec ||
2323         avcodec_open2(avctx, codec, &opts) < 0)
2324         return -1;
2325     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2326         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2327         return AVERROR_OPTION_NOT_FOUND;
2328     }
2329
2330     /* prepare audio output */
2331     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2332         wanted_spec.format = AUDIO_S16SYS;
2333         wanted_spec.silence = 0;
2334         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2335         wanted_spec.callback = sdl_audio_callback;
2336         wanted_spec.userdata = is;
2337         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2338             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2339             return -1;
2340         }
2341         is->audio_hw_buf_size = spec.size;
2342         if (spec.format != AUDIO_S16SYS) {
2343             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2344             return -1;
2345         }
2346         if (spec.channels != wanted_spec.channels) {
2347             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2348             if (!wanted_channel_layout) {
2349                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2350                 return -1;
2351             }
2352         }
2353         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2354         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2355         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2356         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2357     }
2358
2359     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2360     switch (avctx->codec_type) {
2361     case AVMEDIA_TYPE_AUDIO:
2362         is->audio_stream = stream_index;
2363         is->audio_st = ic->streams[stream_index];
2364         is->audio_buf_size  = 0;
2365         is->audio_buf_index = 0;
2366
2367         /* init averaging filter */
2368         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2369         is->audio_diff_avg_count = 0;
2370         /* since we do not have a precise anough audio fifo fullness,
2371            we correct audio sync only if larger than this threshold */
2372         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2373
2374         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2375         packet_queue_start(&is->audioq);
2376         SDL_PauseAudio(0);
2377         break;
2378     case AVMEDIA_TYPE_VIDEO:
2379         is->video_stream = stream_index;
2380         is->video_st = ic->streams[stream_index];
2381
2382         packet_queue_start(&is->videoq);
2383         is->video_tid = SDL_CreateThread(video_thread, is);
2384         break;
2385     case AVMEDIA_TYPE_SUBTITLE:
2386         is->subtitle_stream = stream_index;
2387         is->subtitle_st = ic->streams[stream_index];
2388         packet_queue_start(&is->subtitleq);
2389
2390         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2391         break;
2392     default:
2393         break;
2394     }
2395     return 0;
2396 }
2397
2398 static void stream_component_close(VideoState *is, int stream_index)
2399 {
2400     AVFormatContext *ic = is->ic;
2401     AVCodecContext *avctx;
2402
2403     if (stream_index < 0 || stream_index >= ic->nb_streams)
2404         return;
2405     avctx = ic->streams[stream_index]->codec;
2406
2407     switch (avctx->codec_type) {
2408     case AVMEDIA_TYPE_AUDIO:
2409         packet_queue_abort(&is->audioq);
2410
2411         SDL_CloseAudio();
2412
2413         packet_queue_flush(&is->audioq);
2414         av_free_packet(&is->audio_pkt);
2415         if (is->swr_ctx)
2416             swr_free(&is->swr_ctx);
2417         av_freep(&is->audio_buf1);
2418         is->audio_buf = NULL;
2419         av_freep(&is->frame);
2420
2421         if (is->rdft) {
2422             av_rdft_end(is->rdft);
2423             av_freep(&is->rdft_data);
2424             is->rdft = NULL;
2425             is->rdft_bits = 0;
2426         }
2427         break;
2428     case AVMEDIA_TYPE_VIDEO:
2429         packet_queue_abort(&is->videoq);
2430
2431         /* note: we also signal this mutex to make sure we deblock the
2432            video thread in all cases */
2433         SDL_LockMutex(is->pictq_mutex);
2434         SDL_CondSignal(is->pictq_cond);
2435         SDL_UnlockMutex(is->pictq_mutex);
2436
2437         SDL_WaitThread(is->video_tid, NULL);
2438
2439         packet_queue_flush(&is->videoq);
2440         break;
2441     case AVMEDIA_TYPE_SUBTITLE:
2442         packet_queue_abort(&is->subtitleq);
2443
2444         /* note: we also signal this mutex to make sure we deblock the
2445            video thread in all cases */
2446         SDL_LockMutex(is->subpq_mutex);
2447         is->subtitle_stream_changed = 1;
2448
2449         SDL_CondSignal(is->subpq_cond);
2450         SDL_UnlockMutex(is->subpq_mutex);
2451
2452         SDL_WaitThread(is->subtitle_tid, NULL);
2453
2454         packet_queue_flush(&is->subtitleq);
2455         break;
2456     default:
2457         break;
2458     }
2459
2460     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2461     avcodec_close(avctx);
2462     switch (avctx->codec_type) {
2463     case AVMEDIA_TYPE_AUDIO:
2464         is->audio_st = NULL;
2465         is->audio_stream = -1;
2466         break;
2467     case AVMEDIA_TYPE_VIDEO:
2468         is->video_st = NULL;
2469         is->video_stream = -1;
2470         break;
2471     case AVMEDIA_TYPE_SUBTITLE:
2472         is->subtitle_st = NULL;
2473         is->subtitle_stream = -1;
2474         break;
2475     default:
2476         break;
2477     }
2478 }
2479
2480 static int decode_interrupt_cb(void *ctx)
2481 {
2482     VideoState *is = ctx;
2483     return is->abort_request;
2484 }
2485
2486 /* this thread gets the stream from the disk or the network */
2487 static int read_thread(void *arg)
2488 {
2489     VideoState *is = arg;
2490     AVFormatContext *ic = NULL;
2491     int err, i, ret;
2492     int st_index[AVMEDIA_TYPE_NB];
2493     AVPacket pkt1, *pkt = &pkt1;
2494     int eof = 0;
2495     int pkt_in_play_range = 0;
2496     AVDictionaryEntry *t;
2497     AVDictionary **opts;
2498     int orig_nb_streams;
2499
2500     memset(st_index, -1, sizeof(st_index));
2501     is->last_video_stream = is->video_stream = -1;
2502     is->last_audio_stream = is->audio_stream = -1;
2503     is->last_subtitle_stream = is->subtitle_stream = -1;
2504
2505     ic = avformat_alloc_context();
2506     ic->interrupt_callback.callback = decode_interrupt_cb;
2507     ic->interrupt_callback.opaque = is;
2508     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2509     if (err < 0) {
2510         print_error(is->filename, err);
2511         ret = -1;
2512         goto fail;
2513     }
2514     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2515         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2516         ret = AVERROR_OPTION_NOT_FOUND;
2517         goto fail;
2518     }
2519     is->ic = ic;
2520
2521     if (genpts)
2522         ic->flags |= AVFMT_FLAG_GENPTS;
2523
2524     opts = setup_find_stream_info_opts(ic, codec_opts);
2525     orig_nb_streams = ic->nb_streams;
2526
2527     err = avformat_find_stream_info(ic, opts);
2528     if (err < 0) {
2529         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2530         ret = -1;
2531         goto fail;
2532     }
2533     for (i = 0; i < orig_nb_streams; i++)
2534         av_dict_free(&opts[i]);
2535     av_freep(&opts);
2536
2537     if (ic->pb)
2538         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2539
2540     if (seek_by_bytes < 0)
2541         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2542
2543     /* if seeking requested, we execute it */
2544     if (start_time != AV_NOPTS_VALUE) {
2545         int64_t timestamp;
2546
2547         timestamp = start_time;
2548         /* add the stream start time */
2549         if (ic->start_time != AV_NOPTS_VALUE)
2550             timestamp += ic->start_time;
2551         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2552         if (ret < 0) {
2553             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2554                     is->filename, (double)timestamp / AV_TIME_BASE);
2555         }
2556     }
2557
2558     for (i = 0; i < ic->nb_streams; i++)
2559         ic->streams[i]->discard = AVDISCARD_ALL;
2560     if (!video_disable)
2561         st_index[AVMEDIA_TYPE_VIDEO] =
2562             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2563                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2564     if (!audio_disable)
2565         st_index[AVMEDIA_TYPE_AUDIO] =
2566             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2567                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2568                                 st_index[AVMEDIA_TYPE_VIDEO],
2569                                 NULL, 0);
2570     if (!video_disable)
2571         st_index[AVMEDIA_TYPE_SUBTITLE] =
2572             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2573                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2574                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2575                                  st_index[AVMEDIA_TYPE_AUDIO] :
2576                                  st_index[AVMEDIA_TYPE_VIDEO]),
2577                                 NULL, 0);
2578     if (show_status) {
2579         av_dump_format(ic, 0, is->filename, 0);
2580     }
2581
2582     is->show_mode = show_mode;
2583
2584     /* open the streams */
2585     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2586         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2587     }
2588
2589     ret = -1;
2590     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2591         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2592     }
2593     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2594     if (is->show_mode == SHOW_MODE_NONE)
2595         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2596
2597     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2598         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2599     }
2600
2601     if (is->video_stream < 0 && is->audio_stream < 0) {
2602         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2603         ret = -1;
2604         goto fail;
2605     }
2606
2607     for (;;) {
2608         if (is->abort_request)
2609             break;
2610         if (is->paused != is->last_paused) {
2611             is->last_paused = is->paused;
2612             if (is->paused)
2613                 is->read_pause_return = av_read_pause(ic);
2614             else
2615                 av_read_play(ic);
2616         }
2617 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2618         if (is->paused &&
2619                 (!strcmp(ic->iformat->name, "rtsp") ||
2620                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2621             /* wait 10 ms to avoid trying to get another packet */
2622             /* XXX: horrible */
2623             SDL_Delay(10);
2624             continue;
2625         }
2626 #endif
2627         if (is->seek_req) {
2628             int64_t seek_target = is->seek_pos;
2629             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2630             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2631 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2632 //      of the seek_pos/seek_rel variables
2633
2634             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2635             if (ret < 0) {
2636                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2637             } else {
2638                 if (is->audio_stream >= 0) {
2639                     packet_queue_flush(&is->audioq);
2640                     packet_queue_put(&is->audioq, &flush_pkt);
2641                 }
2642                 if (is->subtitle_stream >= 0) {
2643                     packet_queue_flush(&is->subtitleq);
2644                     packet_queue_put(&is->subtitleq, &flush_pkt);
2645                 }
2646                 if (is->video_stream >= 0) {
2647                     packet_queue_flush(&is->videoq);
2648                     packet_queue_put(&is->videoq, &flush_pkt);
2649                 }
2650             }
2651             is->seek_req = 0;
2652             eof = 0;
2653         }
2654
2655         /* if the queue are full, no need to read more */
2656         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2657             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2658                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2659                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
2660             /* wait 10 ms */
2661             SDL_Delay(10);
2662             continue;
2663         }
2664         if (eof) {
2665             if (is->video_stream >= 0) {
2666                 av_init_packet(pkt);
2667                 pkt->data = NULL;
2668                 pkt->size = 0;
2669                 pkt->stream_index = is->video_stream;
2670                 packet_queue_put(&is->videoq, pkt);
2671             }
2672             if (is->audio_stream >= 0 &&
2673                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2674                 av_init_packet(pkt);
2675                 pkt->data = NULL;
2676                 pkt->size = 0;
2677                 pkt->stream_index = is->audio_stream;
2678                 packet_queue_put(&is->audioq, pkt);
2679             }
2680             SDL_Delay(10);
2681             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2682                 if (loop != 1 && (!loop || --loop)) {
2683                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2684                 } else if (autoexit) {
2685                     ret = AVERROR_EOF;
2686                     goto fail;
2687                 }
2688             }
2689             eof=0;
2690             continue;
2691         }
2692         ret = av_read_frame(ic, pkt);
2693         if (ret < 0) {
2694             if (ret == AVERROR_EOF || url_feof(ic->pb))
2695                 eof = 1;
2696             if (ic->pb && ic->pb->error)
2697                 break;
2698             SDL_Delay(100); /* wait for user event */
2699             continue;
2700         }
2701         /* check if packet is in play range specified by user, then queue, otherwise discard */
2702         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2703                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2704                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2705                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2706                 <= ((double)duration / 1000000);
2707         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2708             packet_queue_put(&is->audioq, pkt);
2709         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2710             packet_queue_put(&is->videoq, pkt);
2711         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2712             packet_queue_put(&is->subtitleq, pkt);
2713         } else {
2714             av_free_packet(pkt);
2715         }
2716     }
2717     /* wait until the end */
2718     while (!is->abort_request) {
2719         SDL_Delay(100);
2720     }
2721
2722     ret = 0;
2723  fail:
2724     /* close each stream */
2725     if (is->audio_stream >= 0)
2726         stream_component_close(is, is->audio_stream);
2727     if (is->video_stream >= 0)
2728         stream_component_close(is, is->video_stream);
2729     if (is->subtitle_stream >= 0)
2730         stream_component_close(is, is->subtitle_stream);
2731     if (is->ic) {
2732         avformat_close_input(&is->ic);
2733     }
2734
2735     if (ret != 0) {
2736         SDL_Event event;
2737
2738         event.type = FF_QUIT_EVENT;
2739         event.user.data1 = is;
2740         SDL_PushEvent(&event);
2741     }
2742     return 0;
2743 }
2744
2745 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2746 {
2747     VideoState *is;
2748
2749     is = av_mallocz(sizeof(VideoState));
2750     if (!is)
2751         return NULL;
2752     av_strlcpy(is->filename, filename, sizeof(is->filename));
2753     is->iformat = iformat;
2754     is->ytop    = 0;
2755     is->xleft   = 0;
2756
2757     /* start video display */
2758     is->pictq_mutex = SDL_CreateMutex();
2759     is->pictq_cond  = SDL_CreateCond();
2760
2761     is->subpq_mutex = SDL_CreateMutex();
2762     is->subpq_cond  = SDL_CreateCond();
2763
2764     packet_queue_init(&is->videoq);
2765     packet_queue_init(&is->audioq);
2766     packet_queue_init(&is->subtitleq);
2767
2768     is->av_sync_type = av_sync_type;
2769     is->read_tid     = SDL_CreateThread(read_thread, is);
2770     if (!is->read_tid) {
2771         av_free(is);
2772         return NULL;
2773     }
2774     return is;
2775 }
2776
2777 static void stream_cycle_channel(VideoState *is, int codec_type)
2778 {
2779     AVFormatContext *ic = is->ic;
2780     int start_index, stream_index;
2781     int old_index;
2782     AVStream *st;
2783
2784     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2785         start_index = is->last_video_stream;
2786         old_index = is->video_stream;
2787     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2788         start_index = is->last_audio_stream;
2789         old_index = is->audio_stream;
2790     } else {
2791         start_index = is->last_subtitle_stream;
2792         old_index = is->subtitle_stream;
2793     }
2794     stream_index = start_index;
2795     for (;;) {
2796         if (++stream_index >= is->ic->nb_streams)
2797         {
2798             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2799             {
2800                 stream_index = -1;
2801                 is->last_subtitle_stream = -1;
2802                 goto the_end;
2803             }
2804             if (start_index == -1)
2805                 return;
2806             stream_index = 0;
2807         }
2808         if (stream_index == start_index)
2809             return;
2810         st = ic->streams[stream_index];
2811         if (st->codec->codec_type == codec_type) {
2812             /* check that parameters are OK */
2813             switch (codec_type) {
2814             case AVMEDIA_TYPE_AUDIO:
2815                 if (st->codec->sample_rate != 0 &&
2816                     st->codec->channels != 0)
2817                     goto the_end;
2818                 break;
2819             case AVMEDIA_TYPE_VIDEO:
2820             case AVMEDIA_TYPE_SUBTITLE:
2821                 goto the_end;
2822             default:
2823                 break;
2824             }
2825         }
2826     }
2827  the_end:
2828     stream_component_close(is, old_index);
2829     stream_component_open(is, stream_index);
2830 }
2831
2832
2833 static void toggle_full_screen(VideoState *is)
2834 {
2835     av_unused int i;
2836     is_full_screen = !is_full_screen;
2837 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2838     /* OS X needs to reallocate the SDL overlays */
2839     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2840         is->pictq[i].reallocate = 1;
2841     }
2842 #endif
2843     video_open(is, 1);
2844 }
2845
2846 static void toggle_pause(VideoState *is)
2847 {
2848     stream_toggle_pause(is);
2849     is->step = 0;
2850 }
2851
2852 static void step_to_next_frame(VideoState *is)
2853 {
2854     /* if the stream is paused unpause it, then step */
2855     if (is->paused)
2856         stream_toggle_pause(is);
2857     is->step = 1;
2858 }
2859
2860 static void toggle_audio_display(VideoState *is)
2861 {
2862     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2863     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2864     fill_rectangle(screen,
2865                 is->xleft, is->ytop, is->width, is->height,
2866                 bgcolor);
2867     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2868 }
2869
2870 /* handle an event sent by the GUI */
2871 static void event_loop(VideoState *cur_stream)
2872 {
2873     SDL_Event event;
2874     double incr, pos, frac;
2875
2876     for (;;) {
2877         double x;
2878         SDL_WaitEvent(&event);
2879         switch (event.type) {
2880         case SDL_KEYDOWN:
2881             if (exit_on_keydown) {
2882                 do_exit(cur_stream);
2883                 break;
2884             }
2885             switch (event.key.keysym.sym) {
2886             case SDLK_ESCAPE:
2887             case SDLK_q:
2888                 do_exit(cur_stream);
2889                 break;
2890             case SDLK_f:
2891                 toggle_full_screen(cur_stream);
2892                 cur_stream->force_refresh = 1;
2893                 break;
2894             case SDLK_p:
2895             case SDLK_SPACE:
2896                 toggle_pause(cur_stream);
2897                 break;
2898             case SDLK_s: // S: Step to next frame
2899                 step_to_next_frame(cur_stream);
2900                 break;
2901             case SDLK_a:
2902                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2903                 break;
2904             case SDLK_v:
2905                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2906                 break;
2907             case SDLK_t:
2908                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2909                 break;
2910             case SDLK_w:
2911                 toggle_audio_display(cur_stream);
2912                 cur_stream->force_refresh = 1;
2913                 break;
2914             case SDLK_PAGEUP:
2915                 incr = 600.0;
2916                 goto do_seek;
2917             case SDLK_PAGEDOWN:
2918                 incr = -600.0;
2919                 goto do_seek;
2920             case SDLK_LEFT:
2921                 incr = -10.0;
2922                 goto do_seek;
2923             case SDLK_RIGHT:
2924                 incr = 10.0;
2925                 goto do_seek;
2926             case SDLK_UP:
2927                 incr = 60.0;
2928                 goto do_seek;
2929             case SDLK_DOWN:
2930                 incr = -60.0;
2931             do_seek:
2932                     if (seek_by_bytes) {
2933                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2934                             pos = cur_stream->video_current_pos;
2935                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2936                             pos = cur_stream->audio_pkt.pos;
2937                         } else
2938                             pos = avio_tell(cur_stream->ic->pb);
2939                         if (cur_stream->ic->bit_rate)
2940                             incr *= cur_stream->ic->bit_rate / 8.0;
2941                         else
2942                             incr *= 180000.0;
2943                         pos += incr;
2944                         stream_seek(cur_stream, pos, incr, 1);
2945                     } else {
2946                         pos = get_master_clock(cur_stream);
2947                         pos += incr;
2948                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2949                     }
2950                 break;
2951             default:
2952                 break;
2953             }
2954             break;
2955         case SDL_VIDEOEXPOSE:
2956             cur_stream->force_refresh = 1;
2957             break;
2958         case SDL_MOUSEBUTTONDOWN:
2959             if (exit_on_mousedown) {
2960                 do_exit(cur_stream);
2961                 break;
2962             }
2963         case SDL_MOUSEMOTION:
2964             if (event.type == SDL_MOUSEBUTTONDOWN) {
2965                 x = event.button.x;
2966             } else {
2967                 if (event.motion.state != SDL_PRESSED)
2968                     break;
2969                 x = event.motion.x;
2970             }
2971                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2972                     uint64_t size =  avio_size(cur_stream->ic->pb);
2973                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2974                 } else {
2975                     int64_t ts;
2976                     int ns, hh, mm, ss;
2977                     int tns, thh, tmm, tss;
2978                     tns  = cur_stream->ic->duration / 1000000LL;
2979                     thh  = tns / 3600;
2980                     tmm  = (tns % 3600) / 60;
2981                     tss  = (tns % 60);
2982                     frac = x / cur_stream->width;
2983                     ns   = frac * tns;
2984                     hh   = ns / 3600;
2985                     mm   = (ns % 3600) / 60;
2986                     ss   = (ns % 60);
2987                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2988                             hh, mm, ss, thh, tmm, tss);
2989                     ts = frac * cur_stream->ic->duration;
2990                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2991                         ts += cur_stream->ic->start_time;
2992                     stream_seek(cur_stream, ts, 0, 0);
2993                 }
2994             break;
2995         case SDL_VIDEORESIZE:
2996                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2997                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2998                 screen_width  = cur_stream->width  = event.resize.w;
2999                 screen_height = cur_stream->height = event.resize.h;
3000                 cur_stream->force_refresh = 1;
3001             break;
3002         case SDL_QUIT:
3003         case FF_QUIT_EVENT:
3004             do_exit(cur_stream);
3005             break;
3006         case FF_ALLOC_EVENT:
3007             alloc_picture(event.user.data1);
3008             break;
3009         case FF_REFRESH_EVENT:
3010             video_refresh(event.user.data1);
3011             cur_stream->refresh = 0;
3012             break;
3013         default:
3014             break;
3015         }
3016     }
3017 }
3018
3019 static int opt_frame_size(const char *opt, const char *arg)
3020 {
3021     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3022     return opt_default("video_size", arg);
3023 }
3024
3025 static int opt_width(const char *opt, const char *arg)
3026 {
3027     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3028     return 0;
3029 }
3030
3031 static int opt_height(const char *opt, const char *arg)
3032 {
3033     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3034     return 0;
3035 }
3036
3037 static int opt_format(const char *opt, const char *arg)
3038 {
3039     file_iformat = av_find_input_format(arg);
3040     if (!file_iformat) {
3041         fprintf(stderr, "Unknown input format: %s\n", arg);
3042         return AVERROR(EINVAL);
3043     }
3044     return 0;
3045 }
3046
3047 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3048 {
3049     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3050     return opt_default("pixel_format", arg);
3051 }
3052
3053 static int opt_sync(const char *opt, const char *arg)
3054 {
3055     if (!strcmp(arg, "audio"))
3056         av_sync_type = AV_SYNC_AUDIO_MASTER;
3057     else if (!strcmp(arg, "video"))
3058         av_sync_type = AV_SYNC_VIDEO_MASTER;
3059     else if (!strcmp(arg, "ext"))
3060         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3061     else {
3062         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3063         exit(1);
3064     }
3065     return 0;
3066 }
3067
3068 static int opt_seek(const char *opt, const char *arg)
3069 {
3070     start_time = parse_time_or_die(opt, arg, 1);
3071     return 0;
3072 }
3073
3074 static int opt_duration(const char *opt, const char *arg)
3075 {
3076     duration = parse_time_or_die(opt, arg, 1);
3077     return 0;
3078 }
3079
3080 static int opt_show_mode(const char *opt, const char *arg)
3081 {
3082     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3083                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3084                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3085                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3086     return 0;
3087 }
3088
3089 static void opt_input_file(void *optctx, const char *filename)
3090 {
3091     if (input_filename) {
3092         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3093                 filename, input_filename);
3094         exit_program(1);
3095     }
3096     if (!strcmp(filename, "-"))
3097         filename = "pipe:";
3098     input_filename = filename;
3099 }
3100
3101 static int opt_codec(void *o, const char *opt, const char *arg)
3102 {
3103     switch(opt[strlen(opt)-1]){
3104     case 'a' :    audio_codec_name = arg; break;
3105     case 's' : subtitle_codec_name = arg; break;
3106     case 'v' :    video_codec_name = arg; break;
3107     }
3108     return 0;
3109 }
3110
3111 static int dummy;
3112
3113 static const OptionDef options[] = {
3114 #include "cmdutils_common_opts.h"
3115     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3116     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3117     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3118     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3119     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3120     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3121     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3122     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3123     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3124     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3125     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3126     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3127     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3128     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3129     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3130     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3131     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3132     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3133     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3134     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3135     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3136     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3137     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3138     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3139     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3140     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3141     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3142     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3143     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3144     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3145     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3146     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3147     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3148 #if CONFIG_AVFILTER
3149     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3150 #endif
3151     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3152     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3153     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3154     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3155     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3156     { NULL, },
3157 };
3158
3159 static void show_usage(void)
3160 {
3161     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3162     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3163     av_log(NULL, AV_LOG_INFO, "\n");
3164 }
3165
3166 static int opt_help(const char *opt, const char *arg)
3167 {
3168     av_log_set_callback(log_callback_help);
3169     show_usage();
3170     show_help_options(options, "Main options:\n",
3171                       OPT_EXPERT, 0);
3172     show_help_options(options, "\nAdvanced options:\n",
3173                       OPT_EXPERT, OPT_EXPERT);
3174     printf("\n");
3175     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3176     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3177 #if !CONFIG_AVFILTER
3178     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3179 #endif
3180     printf("\nWhile playing:\n"
3181            "q, ESC              quit\n"
3182            "f                   toggle full screen\n"
3183            "p, SPC              pause\n"
3184            "a                   cycle audio channel\n"
3185            "v                   cycle video channel\n"
3186            "t                   cycle subtitle channel\n"
3187            "w                   show audio waves\n"
3188            "s                   activate frame-step mode\n"
3189            "left/right          seek backward/forward 10 seconds\n"
3190            "down/up             seek backward/forward 1 minute\n"
3191            "page down/page up   seek backward/forward 10 minutes\n"
3192            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3193            );
3194     return 0;
3195 }
3196
3197 static int lockmgr(void **mtx, enum AVLockOp op)
3198 {
3199    switch(op) {
3200       case AV_LOCK_CREATE:
3201           *mtx = SDL_CreateMutex();
3202           if(!*mtx)
3203               return 1;
3204           return 0;
3205       case AV_LOCK_OBTAIN:
3206           return !!SDL_LockMutex(*mtx);
3207       case AV_LOCK_RELEASE:
3208           return !!SDL_UnlockMutex(*mtx);
3209       case AV_LOCK_DESTROY:
3210           SDL_DestroyMutex(*mtx);
3211           return 0;
3212    }
3213    return 1;
3214 }
3215
3216 /* Called from the main */
3217 int main(int argc, char **argv)
3218 {
3219     int flags;
3220     VideoState *is;
3221
3222     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3223     parse_loglevel(argc, argv, options);
3224
3225     /* register all codecs, demux and protocols */
3226     avcodec_register_all();
3227 #if CONFIG_AVDEVICE
3228     avdevice_register_all();
3229 #endif
3230 #if CONFIG_AVFILTER
3231     avfilter_register_all();
3232 #endif
3233     av_register_all();
3234     avformat_network_init();
3235
3236     init_opts();
3237
3238     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3239     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3240
3241     show_banner(argc, argv, options);
3242
3243     parse_options(NULL, argc, argv, options, opt_input_file);
3244
3245     if (!input_filename) {
3246         show_usage();
3247         fprintf(stderr, "An input file must be specified\n");
3248         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3249         exit(1);
3250     }
3251
3252     if (display_disable) {
3253         video_disable = 1;
3254     }
3255     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3256     if (audio_disable)
3257         flags &= ~SDL_INIT_AUDIO;
3258 #if !defined(__MINGW32__) && !defined(__APPLE__)
3259     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3260 #endif
3261     if (SDL_Init (flags)) {
3262         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3263         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3264         exit(1);
3265     }
3266
3267     if (!display_disable) {
3268 #if HAVE_SDL_VIDEO_SIZE
3269         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3270         fs_screen_width = vi->current_w;
3271         fs_screen_height = vi->current_h;
3272 #endif
3273     }
3274
3275     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3276     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3277     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3278
3279     if (av_lockmgr_register(lockmgr)) {
3280         fprintf(stderr, "Could not initialize lock manager!\n");
3281         do_exit(NULL);
3282     }
3283
3284     av_init_packet(&flush_pkt);
3285     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3286
3287     is = stream_open(input_filename, file_iformat);
3288     if (!is) {
3289         fprintf(stderr, "Failed to initialize VideoState!\n");
3290         do_exit(NULL);
3291     }
3292
3293     event_loop(is);
3294
3295     /* never returns */
3296
3297     return 0;
3298 }