]> git.sesse.net Git - ffmpeg/blob - ffplay.c
a0d8a18f5b14b69f799366cb35f17533f14aa736
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum SampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int loop=1;
264 static int framedrop=1;
265
266 static int rdftspeed=20;
267 #if CONFIG_AVFILTER
268 static char *vfilters = NULL;
269 #endif
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_free_packet(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     /* duplicate the packet */
324     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325         return -1;
326
327     pkt1 = av_malloc(sizeof(AVPacketList));
328     if (!pkt1)
329         return -1;
330     pkt1->pkt = *pkt;
331     pkt1->next = NULL;
332
333
334     SDL_LockMutex(q->mutex);
335
336     if (!q->last_pkt)
337
338         q->first_pkt = pkt1;
339     else
340         q->last_pkt->next = pkt1;
341     q->last_pkt = pkt1;
342     q->nb_packets++;
343     q->size += pkt1->pkt.size + sizeof(*pkt1);
344     /* XXX: should duplicate packet data in DV case */
345     SDL_CondSignal(q->cond);
346
347     SDL_UnlockMutex(q->mutex);
348     return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for(;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #if 0
410 /* draw only the border of a rectangle */
411 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412 {
413     int w1, w2, h1, h2;
414
415     /* fill the background */
416     w1 = x;
417     if (w1 < 0)
418         w1 = 0;
419     w2 = s->width - (x + w);
420     if (w2 < 0)
421         w2 = 0;
422     h1 = y;
423     if (h1 < 0)
424         h1 = 0;
425     h2 = s->height - (y + h);
426     if (h2 < 0)
427         h2 = 0;
428     fill_rectangle(screen,
429                    s->xleft, s->ytop,
430                    w1, s->height,
431                    color);
432     fill_rectangle(screen,
433                    s->xleft + s->width - w2, s->ytop,
434                    w2, s->height,
435                    color);
436     fill_rectangle(screen,
437                    s->xleft + w1, s->ytop,
438                    s->width - w1 - w2, h1,
439                    color);
440     fill_rectangle(screen,
441                    s->xleft + w1, s->ytop + s->height - h2,
442                    s->width - w1 - w2, h2,
443                    color);
444 }
445 #endif
446
447 #define ALPHA_BLEND(a, oldp, newp, s)\
448 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449
450 #define RGBA_IN(r, g, b, a, s)\
451 {\
452     unsigned int v = ((const uint32_t *)(s))[0];\
453     a = (v >> 24) & 0xff;\
454     r = (v >> 16) & 0xff;\
455     g = (v >> 8) & 0xff;\
456     b = v & 0xff;\
457 }
458
459 #define YUVA_IN(y, u, v, a, s, pal)\
460 {\
461     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462     a = (val >> 24) & 0xff;\
463     y = (val >> 16) & 0xff;\
464     u = (val >> 8) & 0xff;\
465     v = val & 0xff;\
466 }
467
468 #define YUVA_OUT(d, y, u, v, a)\
469 {\
470     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471 }
472
473
474 #define BPP 1
475
476 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477 {
478     int wrap, wrap3, width2, skip2;
479     int y, u, v, a, u1, v1, a1, w, h;
480     uint8_t *lum, *cb, *cr;
481     const uint8_t *p;
482     const uint32_t *pal;
483     int dstx, dsty, dstw, dsth;
484
485     dstw = av_clip(rect->w, 0, imgw);
486     dsth = av_clip(rect->h, 0, imgh);
487     dstx = av_clip(rect->x, 0, imgw - dstw);
488     dsty = av_clip(rect->y, 0, imgh - dsth);
489     lum = dst->data[0] + dsty * dst->linesize[0];
490     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492
493     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494     skip2 = dstx >> 1;
495     wrap = dst->linesize[0];
496     wrap3 = rect->pict.linesize[0];
497     p = rect->pict.data[0];
498     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499
500     if (dsty & 1) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510             cb++;
511             cr++;
512             lum++;
513             p += BPP;
514         }
515         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
522             YUVA_IN(y, u, v, a, p + BPP, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += 2 * BPP;
532             lum += 2;
533         }
534         if (w) {
535             YUVA_IN(y, u, v, a, p, pal);
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539             p++;
540             lum++;
541         }
542         p += wrap3 - dstw * BPP;
543         lum += wrap - dstw - dstx;
544         cb += dst->linesize[1] - width2 - skip2;
545         cr += dst->linesize[2] - width2 - skip2;
546     }
547     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548         lum += dstx;
549         cb += skip2;
550         cr += skip2;
551
552         if (dstx & 1) {
553             YUVA_IN(y, u, v, a, p, pal);
554             u1 = u;
555             v1 = v;
556             a1 = a;
557             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558             p += wrap3;
559             lum += wrap;
560             YUVA_IN(y, u, v, a, p, pal);
561             u1 += u;
562             v1 += v;
563             a1 += a;
564             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567             cb++;
568             cr++;
569             p += -wrap3 + BPP;
570             lum += -wrap + 1;
571         }
572         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578
579             YUVA_IN(y, u, v, a, p + BPP, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584             p += wrap3;
585             lum += wrap;
586
587             YUVA_IN(y, u, v, a, p, pal);
588             u1 += u;
589             v1 += v;
590             a1 += a;
591             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592
593             YUVA_IN(y, u, v, a, p + BPP, pal);
594             u1 += u;
595             v1 += v;
596             a1 += a;
597             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598
599             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601
602             cb++;
603             cr++;
604             p += -wrap3 + 2 * BPP;
605             lum += -wrap + 2;
606         }
607         if (w) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613             p += wrap3;
614             lum += wrap;
615             YUVA_IN(y, u, v, a, p, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622             cb++;
623             cr++;
624             p += -wrap3 + BPP;
625             lum += -wrap + 1;
626         }
627         p += wrap3 + (wrap3 - dstw * BPP);
628         lum += wrap + (wrap - dstw - dstx);
629         cb += dst->linesize[1] - width2 - skip2;
630         cr += dst->linesize[2] - width2 - skip2;
631     }
632     /* handle odd height */
633     if (h) {
634         lum += dstx;
635         cb += skip2;
636         cr += skip2;
637
638         if (dstx & 1) {
639             YUVA_IN(y, u, v, a, p, pal);
640             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643             cb++;
644             cr++;
645             lum++;
646             p += BPP;
647         }
648         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649             YUVA_IN(y, u, v, a, p, pal);
650             u1 = u;
651             v1 = v;
652             a1 = a;
653             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654
655             YUVA_IN(y, u, v, a, p + BPP, pal);
656             u1 += u;
657             v1 += v;
658             a1 += a;
659             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662             cb++;
663             cr++;
664             p += 2 * BPP;
665             lum += 2;
666         }
667         if (w) {
668             YUVA_IN(y, u, v, a, p, pal);
669             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672         }
673     }
674 }
675
676 static void free_subpicture(SubPicture *sp)
677 {
678     int i;
679
680     for (i = 0; i < sp->sub.num_rects; i++)
681     {
682         av_freep(&sp->sub.rects[i]->pict.data[0]);
683         av_freep(&sp->sub.rects[i]->pict.data[1]);
684         av_freep(&sp->sub.rects[i]);
685     }
686
687     av_free(sp->sub.rects);
688
689     memset(&sp->sub, 0, sizeof(AVSubtitle));
690 }
691
692 static void video_image_display(VideoState *is)
693 {
694     VideoPicture *vp;
695     SubPicture *sp;
696     AVPicture pict;
697     float aspect_ratio;
698     int width, height, x, y;
699     SDL_Rect rect;
700     int i;
701
702     vp = &is->pictq[is->pictq_rindex];
703     if (vp->bmp) {
704 #if CONFIG_AVFILTER
705          if (vp->picref->pixel_aspect.num == 0)
706              aspect_ratio = 0;
707          else
708              aspect_ratio = av_q2d(vp->picref->pixel_aspect);
709 #else
710
711         /* XXX: use variable in the frame */
712         if (is->video_st->sample_aspect_ratio.num)
713             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
714         else if (is->video_st->codec->sample_aspect_ratio.num)
715             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
716         else
717             aspect_ratio = 0;
718 #endif
719         if (aspect_ratio <= 0.0)
720             aspect_ratio = 1.0;
721         aspect_ratio *= (float)vp->width / (float)vp->height;
722         /* if an active format is indicated, then it overrides the
723            mpeg format */
724 #if 0
725         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
726             is->dtg_active_format = is->video_st->codec->dtg_active_format;
727             printf("dtg_active_format=%d\n", is->dtg_active_format);
728         }
729 #endif
730 #if 0
731         switch(is->video_st->codec->dtg_active_format) {
732         case FF_DTG_AFD_SAME:
733         default:
734             /* nothing to do */
735             break;
736         case FF_DTG_AFD_4_3:
737             aspect_ratio = 4.0 / 3.0;
738             break;
739         case FF_DTG_AFD_16_9:
740             aspect_ratio = 16.0 / 9.0;
741             break;
742         case FF_DTG_AFD_14_9:
743             aspect_ratio = 14.0 / 9.0;
744             break;
745         case FF_DTG_AFD_4_3_SP_14_9:
746             aspect_ratio = 14.0 / 9.0;
747             break;
748         case FF_DTG_AFD_16_9_SP_14_9:
749             aspect_ratio = 14.0 / 9.0;
750             break;
751         case FF_DTG_AFD_SP_4_3:
752             aspect_ratio = 4.0 / 3.0;
753             break;
754         }
755 #endif
756
757         if (is->subtitle_st)
758         {
759             if (is->subpq_size > 0)
760             {
761                 sp = &is->subpq[is->subpq_rindex];
762
763                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
764                 {
765                     SDL_LockYUVOverlay (vp->bmp);
766
767                     pict.data[0] = vp->bmp->pixels[0];
768                     pict.data[1] = vp->bmp->pixels[2];
769                     pict.data[2] = vp->bmp->pixels[1];
770
771                     pict.linesize[0] = vp->bmp->pitches[0];
772                     pict.linesize[1] = vp->bmp->pitches[2];
773                     pict.linesize[2] = vp->bmp->pitches[1];
774
775                     for (i = 0; i < sp->sub.num_rects; i++)
776                         blend_subrect(&pict, sp->sub.rects[i],
777                                       vp->bmp->w, vp->bmp->h);
778
779                     SDL_UnlockYUVOverlay (vp->bmp);
780                 }
781             }
782         }
783
784
785         /* XXX: we suppose the screen has a 1.0 pixel ratio */
786         height = is->height;
787         width = ((int)rint(height * aspect_ratio)) & ~1;
788         if (width > is->width) {
789             width = is->width;
790             height = ((int)rint(width / aspect_ratio)) & ~1;
791         }
792         x = (is->width - width) / 2;
793         y = (is->height - height) / 2;
794         if (!is->no_background) {
795             /* fill the background */
796             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
797         } else {
798             is->no_background = 0;
799         }
800         rect.x = is->xleft + x;
801         rect.y = is->ytop  + y;
802         rect.w = width;
803         rect.h = height;
804         SDL_DisplayYUVOverlay(vp->bmp, &rect);
805     } else {
806 #if 0
807         fill_rectangle(screen,
808                        is->xleft, is->ytop, is->width, is->height,
809                        QERGB(0x00, 0x00, 0x00));
810 #endif
811     }
812 }
813
814 static inline int compute_mod(int a, int b)
815 {
816     a = a % b;
817     if (a >= 0)
818         return a;
819     else
820         return a + b;
821 }
822
823 static void video_audio_display(VideoState *s)
824 {
825     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
826     int ch, channels, h, h2, bgcolor, fgcolor;
827     int16_t time_diff;
828     int rdft_bits, nb_freq;
829
830     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
831         ;
832     nb_freq= 1<<(rdft_bits-1);
833
834     /* compute display index : center on currently output samples */
835     channels = s->audio_st->codec->channels;
836     nb_display_channels = channels;
837     if (!s->paused) {
838         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
839         n = 2 * channels;
840         delay = audio_write_get_buf_size(s);
841         delay /= n;
842
843         /* to be more precise, we take into account the time spent since
844            the last buffer computation */
845         if (audio_callback_time) {
846             time_diff = av_gettime() - audio_callback_time;
847             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
848         }
849
850         delay += 2*data_used;
851         if (delay < data_used)
852             delay = data_used;
853
854         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
855         if(s->show_audio==1){
856             h= INT_MIN;
857             for(i=0; i<1000; i+=channels){
858                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
859                 int a= s->sample_array[idx];
860                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
861                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
862                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
863                 int score= a-d;
864                 if(h<score && (b^c)<0){
865                     h= score;
866                     i_start= idx;
867                 }
868             }
869         }
870
871         s->last_i_start = i_start;
872     } else {
873         i_start = s->last_i_start;
874     }
875
876     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
877     if(s->show_audio==1){
878         fill_rectangle(screen,
879                        s->xleft, s->ytop, s->width, s->height,
880                        bgcolor);
881
882         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
883
884         /* total height for one channel */
885         h = s->height / nb_display_channels;
886         /* graph height / 2 */
887         h2 = (h * 9) / 20;
888         for(ch = 0;ch < nb_display_channels; ch++) {
889             i = i_start + ch;
890             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
891             for(x = 0; x < s->width; x++) {
892                 y = (s->sample_array[i] * h2) >> 15;
893                 if (y < 0) {
894                     y = -y;
895                     ys = y1 - y;
896                 } else {
897                     ys = y1;
898                 }
899                 fill_rectangle(screen,
900                                s->xleft + x, ys, 1, y,
901                                fgcolor);
902                 i += channels;
903                 if (i >= SAMPLE_ARRAY_SIZE)
904                     i -= SAMPLE_ARRAY_SIZE;
905             }
906         }
907
908         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
909
910         for(ch = 1;ch < nb_display_channels; ch++) {
911             y = s->ytop + ch * h;
912             fill_rectangle(screen,
913                            s->xleft, y, s->width, 1,
914                            fgcolor);
915         }
916         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
917     }else{
918         nb_display_channels= FFMIN(nb_display_channels, 2);
919         if(rdft_bits != s->rdft_bits){
920             av_rdft_end(s->rdft);
921             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
922             s->rdft_bits= rdft_bits;
923         }
924         {
925             FFTSample data[2][2*nb_freq];
926             for(ch = 0;ch < nb_display_channels; ch++) {
927                 i = i_start + ch;
928                 for(x = 0; x < 2*nb_freq; x++) {
929                     double w= (x-nb_freq)*(1.0/nb_freq);
930                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
931                     i += channels;
932                     if (i >= SAMPLE_ARRAY_SIZE)
933                         i -= SAMPLE_ARRAY_SIZE;
934                 }
935                 av_rdft_calc(s->rdft, data[ch]);
936             }
937             //least efficient way to do this, we should of course directly access it but its more than fast enough
938             for(y=0; y<s->height; y++){
939                 double w= 1/sqrt(nb_freq);
940                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
941                 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
942                 a= FFMIN(a,255);
943                 b= FFMIN(b,255);
944                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
945
946                 fill_rectangle(screen,
947                             s->xpos, s->height-y, 1, 1,
948                             fgcolor);
949             }
950         }
951         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
952         s->xpos++;
953         if(s->xpos >= s->width)
954             s->xpos= s->xleft;
955     }
956 }
957
958 static int video_open(VideoState *is){
959     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
960     int w,h;
961
962     if(is_full_screen) flags |= SDL_FULLSCREEN;
963     else               flags |= SDL_RESIZABLE;
964
965     if (is_full_screen && fs_screen_width) {
966         w = fs_screen_width;
967         h = fs_screen_height;
968     } else if(!is_full_screen && screen_width){
969         w = screen_width;
970         h = screen_height;
971 #if CONFIG_AVFILTER
972     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
973         w = is->out_video_filter->inputs[0]->w;
974         h = is->out_video_filter->inputs[0]->h;
975 #else
976     }else if (is->video_st && is->video_st->codec->width){
977         w = is->video_st->codec->width;
978         h = is->video_st->codec->height;
979 #endif
980     } else {
981         w = 640;
982         h = 480;
983     }
984     if(screen && is->width == screen->w && screen->w == w
985        && is->height== screen->h && screen->h == h)
986         return 0;
987
988 #ifndef __APPLE__
989     screen = SDL_SetVideoMode(w, h, 0, flags);
990 #else
991     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
992     screen = SDL_SetVideoMode(w, h, 24, flags);
993 #endif
994     if (!screen) {
995         fprintf(stderr, "SDL: could not set video mode - exiting\n");
996         return -1;
997     }
998     if (!window_title)
999         window_title = input_filename;
1000     SDL_WM_SetCaption(window_title, window_title);
1001
1002     is->width = screen->w;
1003     is->height = screen->h;
1004
1005     return 0;
1006 }
1007
1008 /* display the current picture, if any */
1009 static void video_display(VideoState *is)
1010 {
1011     if(!screen)
1012         video_open(cur_stream);
1013     if (is->audio_st && is->show_audio)
1014         video_audio_display(is);
1015     else if (is->video_st)
1016         video_image_display(is);
1017 }
1018
1019 static int refresh_thread(void *opaque)
1020 {
1021     VideoState *is= opaque;
1022     while(!is->abort_request){
1023     SDL_Event event;
1024     event.type = FF_REFRESH_EVENT;
1025     event.user.data1 = opaque;
1026         if(!is->refresh){
1027             is->refresh=1;
1028     SDL_PushEvent(&event);
1029         }
1030         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1031     }
1032     return 0;
1033 }
1034
1035 /* get the current audio clock value */
1036 static double get_audio_clock(VideoState *is)
1037 {
1038     double pts;
1039     int hw_buf_size, bytes_per_sec;
1040     pts = is->audio_clock;
1041     hw_buf_size = audio_write_get_buf_size(is);
1042     bytes_per_sec = 0;
1043     if (is->audio_st) {
1044         bytes_per_sec = is->audio_st->codec->sample_rate *
1045             2 * is->audio_st->codec->channels;
1046     }
1047     if (bytes_per_sec)
1048         pts -= (double)hw_buf_size / bytes_per_sec;
1049     return pts;
1050 }
1051
1052 /* get the current video clock value */
1053 static double get_video_clock(VideoState *is)
1054 {
1055     if (is->paused) {
1056         return is->video_current_pts;
1057     } else {
1058         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1059     }
1060 }
1061
1062 /* get the current external clock value */
1063 static double get_external_clock(VideoState *is)
1064 {
1065     int64_t ti;
1066     ti = av_gettime();
1067     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1068 }
1069
1070 /* get the current master clock value */
1071 static double get_master_clock(VideoState *is)
1072 {
1073     double val;
1074
1075     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1076         if (is->video_st)
1077             val = get_video_clock(is);
1078         else
1079             val = get_audio_clock(is);
1080     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1081         if (is->audio_st)
1082             val = get_audio_clock(is);
1083         else
1084             val = get_video_clock(is);
1085     } else {
1086         val = get_external_clock(is);
1087     }
1088     return val;
1089 }
1090
1091 /* seek in the stream */
1092 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1093 {
1094     if (!is->seek_req) {
1095         is->seek_pos = pos;
1096         is->seek_rel = rel;
1097         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1098         if (seek_by_bytes)
1099             is->seek_flags |= AVSEEK_FLAG_BYTE;
1100         is->seek_req = 1;
1101     }
1102 }
1103
1104 /* pause or resume the video */
1105 static void stream_pause(VideoState *is)
1106 {
1107     if (is->paused) {
1108         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1109         if(is->read_pause_return != AVERROR(ENOSYS)){
1110             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1111         }
1112         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1113     }
1114     is->paused = !is->paused;
1115 }
1116
1117 static double compute_target_time(double frame_current_pts, VideoState *is)
1118 {
1119     double delay, sync_threshold, diff;
1120
1121     /* compute nominal delay */
1122     delay = frame_current_pts - is->frame_last_pts;
1123     if (delay <= 0 || delay >= 10.0) {
1124         /* if incorrect delay, use previous one */
1125         delay = is->frame_last_delay;
1126     } else {
1127         is->frame_last_delay = delay;
1128     }
1129     is->frame_last_pts = frame_current_pts;
1130
1131     /* update delay to follow master synchronisation source */
1132     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1133          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1134         /* if video is slave, we try to correct big delays by
1135            duplicating or deleting a frame */
1136         diff = get_video_clock(is) - get_master_clock(is);
1137
1138         /* skip or repeat frame. We take into account the
1139            delay to compute the threshold. I still don't know
1140            if it is the best guess */
1141         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1142         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1143             if (diff <= -sync_threshold)
1144                 delay = 0;
1145             else if (diff >= sync_threshold)
1146                 delay = 2 * delay;
1147         }
1148     }
1149     is->frame_timer += delay;
1150 #if defined(DEBUG_SYNC)
1151     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1152             delay, actual_delay, frame_current_pts, -diff);
1153 #endif
1154
1155     return is->frame_timer;
1156 }
1157
1158 /* called to display each frame */
1159 static void video_refresh_timer(void *opaque)
1160 {
1161     VideoState *is = opaque;
1162     VideoPicture *vp;
1163
1164     SubPicture *sp, *sp2;
1165
1166     if (is->video_st) {
1167 retry:
1168         if (is->pictq_size == 0) {
1169             //nothing to do, no picture to display in the que
1170         } else {
1171             double time= av_gettime()/1000000.0;
1172             double next_target;
1173             /* dequeue the picture */
1174             vp = &is->pictq[is->pictq_rindex];
1175
1176             if(time < vp->target_clock)
1177                 return;
1178             /* update current video pts */
1179             is->video_current_pts = vp->pts;
1180             is->video_current_pts_drift = is->video_current_pts - time;
1181             is->video_current_pos = vp->pos;
1182             if(is->pictq_size > 1){
1183                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1184                 assert(nextvp->target_clock >= vp->target_clock);
1185                 next_target= nextvp->target_clock;
1186             }else{
1187                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1188             }
1189             if(framedrop && time > next_target){
1190                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1191                 if(is->pictq_size > 1 || time > next_target + 0.5){
1192                     /* update queue size and signal for next picture */
1193                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1194                         is->pictq_rindex = 0;
1195
1196                     SDL_LockMutex(is->pictq_mutex);
1197                     is->pictq_size--;
1198                     SDL_CondSignal(is->pictq_cond);
1199                     SDL_UnlockMutex(is->pictq_mutex);
1200                     goto retry;
1201                 }
1202             }
1203
1204             if(is->subtitle_st) {
1205                 if (is->subtitle_stream_changed) {
1206                     SDL_LockMutex(is->subpq_mutex);
1207
1208                     while (is->subpq_size) {
1209                         free_subpicture(&is->subpq[is->subpq_rindex]);
1210
1211                         /* update queue size and signal for next picture */
1212                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1213                             is->subpq_rindex = 0;
1214
1215                         is->subpq_size--;
1216                     }
1217                     is->subtitle_stream_changed = 0;
1218
1219                     SDL_CondSignal(is->subpq_cond);
1220                     SDL_UnlockMutex(is->subpq_mutex);
1221                 } else {
1222                     if (is->subpq_size > 0) {
1223                         sp = &is->subpq[is->subpq_rindex];
1224
1225                         if (is->subpq_size > 1)
1226                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1227                         else
1228                             sp2 = NULL;
1229
1230                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1231                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1232                         {
1233                             free_subpicture(sp);
1234
1235                             /* update queue size and signal for next picture */
1236                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1237                                 is->subpq_rindex = 0;
1238
1239                             SDL_LockMutex(is->subpq_mutex);
1240                             is->subpq_size--;
1241                             SDL_CondSignal(is->subpq_cond);
1242                             SDL_UnlockMutex(is->subpq_mutex);
1243                         }
1244                     }
1245                 }
1246             }
1247
1248             /* display picture */
1249             video_display(is);
1250
1251             /* update queue size and signal for next picture */
1252             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1253                 is->pictq_rindex = 0;
1254
1255             SDL_LockMutex(is->pictq_mutex);
1256             is->pictq_size--;
1257             SDL_CondSignal(is->pictq_cond);
1258             SDL_UnlockMutex(is->pictq_mutex);
1259         }
1260     } else if (is->audio_st) {
1261         /* draw the next audio frame */
1262
1263         /* if only audio stream, then display the audio bars (better
1264            than nothing, just to test the implementation */
1265
1266         /* display picture */
1267         video_display(is);
1268     }
1269     if (show_status) {
1270         static int64_t last_time;
1271         int64_t cur_time;
1272         int aqsize, vqsize, sqsize;
1273         double av_diff;
1274
1275         cur_time = av_gettime();
1276         if (!last_time || (cur_time - last_time) >= 30000) {
1277             aqsize = 0;
1278             vqsize = 0;
1279             sqsize = 0;
1280             if (is->audio_st)
1281                 aqsize = is->audioq.size;
1282             if (is->video_st)
1283                 vqsize = is->videoq.size;
1284             if (is->subtitle_st)
1285                 sqsize = is->subtitleq.size;
1286             av_diff = 0;
1287             if (is->audio_st && is->video_st)
1288                 av_diff = get_audio_clock(is) - get_video_clock(is);
1289             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1290                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1291             fflush(stdout);
1292             last_time = cur_time;
1293         }
1294     }
1295 }
1296
1297 /* allocate a picture (needs to do that in main thread to avoid
1298    potential locking problems */
1299 static void alloc_picture(void *opaque)
1300 {
1301     VideoState *is = opaque;
1302     VideoPicture *vp;
1303
1304     vp = &is->pictq[is->pictq_windex];
1305
1306     if (vp->bmp)
1307         SDL_FreeYUVOverlay(vp->bmp);
1308
1309 #if CONFIG_AVFILTER
1310     if (vp->picref)
1311         avfilter_unref_pic(vp->picref);
1312     vp->picref = NULL;
1313
1314     vp->width   = is->out_video_filter->inputs[0]->w;
1315     vp->height  = is->out_video_filter->inputs[0]->h;
1316     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1317 #else
1318     vp->width   = is->video_st->codec->width;
1319     vp->height  = is->video_st->codec->height;
1320     vp->pix_fmt = is->video_st->codec->pix_fmt;
1321 #endif
1322
1323     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1324                                    SDL_YV12_OVERLAY,
1325                                    screen);
1326
1327     SDL_LockMutex(is->pictq_mutex);
1328     vp->allocated = 1;
1329     SDL_CondSignal(is->pictq_cond);
1330     SDL_UnlockMutex(is->pictq_mutex);
1331 }
1332
1333 /**
1334  *
1335  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1336  */
1337 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1338 {
1339     VideoPicture *vp;
1340     int dst_pix_fmt;
1341 #if CONFIG_AVFILTER
1342     AVPicture pict_src;
1343 #endif
1344     /* wait until we have space to put a new picture */
1345     SDL_LockMutex(is->pictq_mutex);
1346
1347     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1348         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1349
1350     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1351            !is->videoq.abort_request) {
1352         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1353     }
1354     SDL_UnlockMutex(is->pictq_mutex);
1355
1356     if (is->videoq.abort_request)
1357         return -1;
1358
1359     vp = &is->pictq[is->pictq_windex];
1360
1361     /* alloc or resize hardware picture buffer */
1362     if (!vp->bmp ||
1363 #if CONFIG_AVFILTER
1364         vp->width  != is->out_video_filter->inputs[0]->w ||
1365         vp->height != is->out_video_filter->inputs[0]->h) {
1366 #else
1367         vp->width != is->video_st->codec->width ||
1368         vp->height != is->video_st->codec->height) {
1369 #endif
1370         SDL_Event event;
1371
1372         vp->allocated = 0;
1373
1374         /* the allocation must be done in the main thread to avoid
1375            locking problems */
1376         event.type = FF_ALLOC_EVENT;
1377         event.user.data1 = is;
1378         SDL_PushEvent(&event);
1379
1380         /* wait until the picture is allocated */
1381         SDL_LockMutex(is->pictq_mutex);
1382         while (!vp->allocated && !is->videoq.abort_request) {
1383             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1384         }
1385         SDL_UnlockMutex(is->pictq_mutex);
1386
1387         if (is->videoq.abort_request)
1388             return -1;
1389     }
1390
1391     /* if the frame is not skipped, then display it */
1392     if (vp->bmp) {
1393         AVPicture pict;
1394 #if CONFIG_AVFILTER
1395         if(vp->picref)
1396             avfilter_unref_pic(vp->picref);
1397         vp->picref = src_frame->opaque;
1398 #endif
1399
1400         /* get a pointer on the bitmap */
1401         SDL_LockYUVOverlay (vp->bmp);
1402
1403         dst_pix_fmt = PIX_FMT_YUV420P;
1404         memset(&pict,0,sizeof(AVPicture));
1405         pict.data[0] = vp->bmp->pixels[0];
1406         pict.data[1] = vp->bmp->pixels[2];
1407         pict.data[2] = vp->bmp->pixels[1];
1408
1409         pict.linesize[0] = vp->bmp->pitches[0];
1410         pict.linesize[1] = vp->bmp->pitches[2];
1411         pict.linesize[2] = vp->bmp->pitches[1];
1412
1413 #if CONFIG_AVFILTER
1414         pict_src.data[0] = src_frame->data[0];
1415         pict_src.data[1] = src_frame->data[1];
1416         pict_src.data[2] = src_frame->data[2];
1417
1418         pict_src.linesize[0] = src_frame->linesize[0];
1419         pict_src.linesize[1] = src_frame->linesize[1];
1420         pict_src.linesize[2] = src_frame->linesize[2];
1421
1422         //FIXME use direct rendering
1423         av_picture_copy(&pict, &pict_src,
1424                         vp->pix_fmt, vp->width, vp->height);
1425 #else
1426         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1427         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1428             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1429             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1430         if (is->img_convert_ctx == NULL) {
1431             fprintf(stderr, "Cannot initialize the conversion context\n");
1432             exit(1);
1433         }
1434         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1435                   0, vp->height, pict.data, pict.linesize);
1436 #endif
1437         /* update the bitmap content */
1438         SDL_UnlockYUVOverlay(vp->bmp);
1439
1440         vp->pts = pts;
1441         vp->pos = pos;
1442
1443         /* now we can update the picture count */
1444         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1445             is->pictq_windex = 0;
1446         SDL_LockMutex(is->pictq_mutex);
1447         vp->target_clock= compute_target_time(vp->pts, is);
1448
1449         is->pictq_size++;
1450         SDL_UnlockMutex(is->pictq_mutex);
1451     }
1452     return 0;
1453 }
1454
1455 /**
1456  * compute the exact PTS for the picture if it is omitted in the stream
1457  * @param pts1 the dts of the pkt / pts of the frame
1458  */
1459 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1460 {
1461     double frame_delay, pts;
1462
1463     pts = pts1;
1464
1465     if (pts != 0) {
1466         /* update video clock with pts, if present */
1467         is->video_clock = pts;
1468     } else {
1469         pts = is->video_clock;
1470     }
1471     /* update video clock for next frame */
1472     frame_delay = av_q2d(is->video_st->codec->time_base);
1473     /* for MPEG2, the frame can be repeated, so we update the
1474        clock accordingly */
1475     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1476     is->video_clock += frame_delay;
1477
1478 #if defined(DEBUG_SYNC) && 0
1479     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1480            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1481 #endif
1482     return queue_picture(is, src_frame, pts, pos);
1483 }
1484
1485 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1486 {
1487     int len1, got_picture, i;
1488
1489         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1490             return -1;
1491
1492         if(pkt->data == flush_pkt.data){
1493             avcodec_flush_buffers(is->video_st->codec);
1494
1495             SDL_LockMutex(is->pictq_mutex);
1496             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1497             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1498                 is->pictq[i].target_clock= 0;
1499             }
1500             while (is->pictq_size && !is->videoq.abort_request) {
1501                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1502             }
1503             is->video_current_pos= -1;
1504             SDL_UnlockMutex(is->pictq_mutex);
1505
1506             is->last_dts_for_fault_detection=
1507             is->last_pts_for_fault_detection= INT64_MIN;
1508             is->frame_last_pts= AV_NOPTS_VALUE;
1509             is->frame_last_delay = 0;
1510             is->frame_timer = (double)av_gettime() / 1000000.0;
1511             is->skip_frames= 1;
1512             is->skip_frames_index= 0;
1513             return 0;
1514         }
1515
1516         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1517            this packet, if any */
1518         is->video_st->codec->reordered_opaque= pkt->pts;
1519         len1 = avcodec_decode_video2(is->video_st->codec,
1520                                     frame, &got_picture,
1521                                     pkt);
1522
1523         if (got_picture) {
1524             if(pkt->dts != AV_NOPTS_VALUE){
1525                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1526                 is->last_dts_for_fault_detection= pkt->dts;
1527             }
1528             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1529                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1530                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1531             }
1532         }
1533
1534         if(   (   decoder_reorder_pts==1
1535                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1536                || pkt->dts == AV_NOPTS_VALUE)
1537            && frame->reordered_opaque != AV_NOPTS_VALUE)
1538             *pts= frame->reordered_opaque;
1539         else if(pkt->dts != AV_NOPTS_VALUE)
1540             *pts= pkt->dts;
1541         else
1542             *pts= 0;
1543
1544 //            if (len1 < 0)
1545 //                break;
1546     if (got_picture){
1547         is->skip_frames_index += 1;
1548         if(is->skip_frames_index >= is->skip_frames){
1549             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1550             return 1;
1551         }
1552
1553     }
1554     return 0;
1555 }
1556
1557 #if CONFIG_AVFILTER
1558 typedef struct {
1559     VideoState *is;
1560     AVFrame *frame;
1561     int use_dr1;
1562 } FilterPriv;
1563
1564 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1565 {
1566     AVFilterContext *ctx = codec->opaque;
1567     AVFilterPicRef  *ref;
1568     int perms = AV_PERM_WRITE;
1569     int w, h, stride[4];
1570     unsigned edge;
1571
1572     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1573         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1574         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1575         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1576     }
1577     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1578
1579     w = codec->width;
1580     h = codec->height;
1581     avcodec_align_dimensions2(codec, &w, &h, stride);
1582     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1583     w += edge << 1;
1584     h += edge << 1;
1585
1586     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1587         return -1;
1588
1589     ref->w = codec->width;
1590     ref->h = codec->height;
1591     for(int i = 0; i < 3; i ++) {
1592         unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
1593         unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
1594
1595         ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1596         pic->data[i]     = ref->data[i];
1597         pic->linesize[i] = ref->linesize[i];
1598     }
1599     pic->opaque = ref;
1600     pic->age    = INT_MAX;
1601     pic->type   = FF_BUFFER_TYPE_USER;
1602     return 0;
1603 }
1604
1605 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1606 {
1607     memset(pic->data, 0, sizeof(pic->data));
1608     avfilter_unref_pic(pic->opaque);
1609 }
1610
1611 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1612 {
1613     FilterPriv *priv = ctx->priv;
1614     AVCodecContext *codec;
1615     if(!opaque) return -1;
1616
1617     priv->is = opaque;
1618     codec    = priv->is->video_st->codec;
1619     codec->opaque = ctx;
1620     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1621         priv->use_dr1 = 1;
1622         codec->get_buffer     = input_get_buffer;
1623         codec->release_buffer = input_release_buffer;
1624     }
1625
1626     priv->frame = avcodec_alloc_frame();
1627
1628     return 0;
1629 }
1630
1631 static void input_uninit(AVFilterContext *ctx)
1632 {
1633     FilterPriv *priv = ctx->priv;
1634     av_free(priv->frame);
1635 }
1636
1637 static int input_request_frame(AVFilterLink *link)
1638 {
1639     FilterPriv *priv = link->src->priv;
1640     AVFilterPicRef *picref;
1641     int64_t pts = 0;
1642     AVPacket pkt;
1643     int ret;
1644
1645     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1646         av_free_packet(&pkt);
1647     if (ret < 0)
1648         return -1;
1649
1650     if(priv->use_dr1) {
1651         picref = priv->frame->opaque;
1652     } else {
1653     picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1654     av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1655                     picref->pic->format, link->w, link->h);
1656     }
1657     av_free_packet(&pkt);
1658
1659     picref->pts = pts;
1660     picref->pos = pkt.pos;
1661     picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1662     avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1663     avfilter_draw_slice(link, 0, link->h, 1);
1664     avfilter_end_frame(link);
1665     if(!priv->use_dr1)
1666     avfilter_unref_pic(picref);
1667
1668     return 0;
1669 }
1670
1671 static int input_query_formats(AVFilterContext *ctx)
1672 {
1673     FilterPriv *priv = ctx->priv;
1674     enum PixelFormat pix_fmts[] = {
1675         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1676     };
1677
1678     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1679     return 0;
1680 }
1681
1682 static int input_config_props(AVFilterLink *link)
1683 {
1684     FilterPriv *priv  = link->src->priv;
1685     AVCodecContext *c = priv->is->video_st->codec;
1686
1687     link->w = c->width;
1688     link->h = c->height;
1689
1690     return 0;
1691 }
1692
1693 static AVFilter input_filter =
1694 {
1695     .name      = "ffplay_input",
1696
1697     .priv_size = sizeof(FilterPriv),
1698
1699     .init      = input_init,
1700     .uninit    = input_uninit,
1701
1702     .query_formats = input_query_formats,
1703
1704     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1705     .outputs   = (AVFilterPad[]) {{ .name = "default",
1706                                     .type = AVMEDIA_TYPE_VIDEO,
1707                                     .request_frame = input_request_frame,
1708                                     .config_props  = input_config_props, },
1709                                   { .name = NULL }},
1710 };
1711
1712 static void output_end_frame(AVFilterLink *link)
1713 {
1714 }
1715
1716 static int output_query_formats(AVFilterContext *ctx)
1717 {
1718     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1719
1720     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1721     return 0;
1722 }
1723
1724 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1725                                     int64_t *pts, int64_t *pos)
1726 {
1727     AVFilterPicRef *pic;
1728
1729     if(avfilter_request_frame(ctx->inputs[0]))
1730         return -1;
1731     if(!(pic = ctx->inputs[0]->cur_pic))
1732         return -1;
1733     ctx->inputs[0]->cur_pic = NULL;
1734
1735     frame->opaque = pic;
1736     *pts          = pic->pts;
1737     *pos          = pic->pos;
1738
1739     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1740     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1741
1742     return 1;
1743 }
1744
1745 static AVFilter output_filter =
1746 {
1747     .name      = "ffplay_output",
1748
1749     .query_formats = output_query_formats,
1750
1751     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1752                                     .type          = AVMEDIA_TYPE_VIDEO,
1753                                     .end_frame     = output_end_frame,
1754                                     .min_perms     = AV_PERM_READ, },
1755                                   { .name = NULL }},
1756     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1757 };
1758 #endif  /* CONFIG_AVFILTER */
1759
1760 static int video_thread(void *arg)
1761 {
1762     VideoState *is = arg;
1763     AVFrame *frame= avcodec_alloc_frame();
1764     int64_t pts_int;
1765     double pts;
1766     int ret;
1767
1768 #if CONFIG_AVFILTER
1769     int64_t pos;
1770     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1771     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1772     graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1773
1774     if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1775     if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1776
1777     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1778     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1779
1780
1781     if(vfilters) {
1782         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1783         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1784
1785         outputs->name    = av_strdup("in");
1786         outputs->filter  = filt_src;
1787         outputs->pad_idx = 0;
1788         outputs->next    = NULL;
1789
1790         inputs->name    = av_strdup("out");
1791         inputs->filter  = filt_out;
1792         inputs->pad_idx = 0;
1793         inputs->next    = NULL;
1794
1795         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1796             goto the_end;
1797         av_freep(&vfilters);
1798     } else {
1799         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1800     }
1801     avfilter_graph_add_filter(graph, filt_src);
1802     avfilter_graph_add_filter(graph, filt_out);
1803
1804     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1805     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1806     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1807
1808     is->out_video_filter = filt_out;
1809 #endif
1810
1811     for(;;) {
1812 #if !CONFIG_AVFILTER
1813         AVPacket pkt;
1814 #endif
1815         while (is->paused && !is->videoq.abort_request)
1816             SDL_Delay(10);
1817 #if CONFIG_AVFILTER
1818         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1819 #else
1820         ret = get_video_frame(is, frame, &pts_int, &pkt);
1821 #endif
1822
1823         if (ret < 0) goto the_end;
1824
1825         if (!ret)
1826             continue;
1827
1828         pts = pts_int*av_q2d(is->video_st->time_base);
1829
1830 #if CONFIG_AVFILTER
1831         ret = output_picture2(is, frame, pts, pos);
1832 #else
1833         ret = output_picture2(is, frame, pts,  pkt.pos);
1834         av_free_packet(&pkt);
1835 #endif
1836         if (ret < 0)
1837             goto the_end;
1838
1839         if (step)
1840             if (cur_stream)
1841                 stream_pause(cur_stream);
1842     }
1843  the_end:
1844 #if CONFIG_AVFILTER
1845     avfilter_graph_destroy(graph);
1846     av_freep(&graph);
1847 #endif
1848     av_free(frame);
1849     return 0;
1850 }
1851
1852 static int subtitle_thread(void *arg)
1853 {
1854     VideoState *is = arg;
1855     SubPicture *sp;
1856     AVPacket pkt1, *pkt = &pkt1;
1857     int len1, got_subtitle;
1858     double pts;
1859     int i, j;
1860     int r, g, b, y, u, v, a;
1861
1862     for(;;) {
1863         while (is->paused && !is->subtitleq.abort_request) {
1864             SDL_Delay(10);
1865         }
1866         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1867             break;
1868
1869         if(pkt->data == flush_pkt.data){
1870             avcodec_flush_buffers(is->subtitle_st->codec);
1871             continue;
1872         }
1873         SDL_LockMutex(is->subpq_mutex);
1874         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1875                !is->subtitleq.abort_request) {
1876             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1877         }
1878         SDL_UnlockMutex(is->subpq_mutex);
1879
1880         if (is->subtitleq.abort_request)
1881             goto the_end;
1882
1883         sp = &is->subpq[is->subpq_windex];
1884
1885        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1886            this packet, if any */
1887         pts = 0;
1888         if (pkt->pts != AV_NOPTS_VALUE)
1889             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1890
1891         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1892                                     &sp->sub, &got_subtitle,
1893                                     pkt);
1894 //            if (len1 < 0)
1895 //                break;
1896         if (got_subtitle && sp->sub.format == 0) {
1897             sp->pts = pts;
1898
1899             for (i = 0; i < sp->sub.num_rects; i++)
1900             {
1901                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1902                 {
1903                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1904                     y = RGB_TO_Y_CCIR(r, g, b);
1905                     u = RGB_TO_U_CCIR(r, g, b, 0);
1906                     v = RGB_TO_V_CCIR(r, g, b, 0);
1907                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1908                 }
1909             }
1910
1911             /* now we can update the picture count */
1912             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1913                 is->subpq_windex = 0;
1914             SDL_LockMutex(is->subpq_mutex);
1915             is->subpq_size++;
1916             SDL_UnlockMutex(is->subpq_mutex);
1917         }
1918         av_free_packet(pkt);
1919 //        if (step)
1920 //            if (cur_stream)
1921 //                stream_pause(cur_stream);
1922     }
1923  the_end:
1924     return 0;
1925 }
1926
1927 /* copy samples for viewing in editor window */
1928 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1929 {
1930     int size, len, channels;
1931
1932     channels = is->audio_st->codec->channels;
1933
1934     size = samples_size / sizeof(short);
1935     while (size > 0) {
1936         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1937         if (len > size)
1938             len = size;
1939         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1940         samples += len;
1941         is->sample_array_index += len;
1942         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1943             is->sample_array_index = 0;
1944         size -= len;
1945     }
1946 }
1947
1948 /* return the new audio buffer size (samples can be added or deleted
1949    to get better sync if video or external master clock) */
1950 static int synchronize_audio(VideoState *is, short *samples,
1951                              int samples_size1, double pts)
1952 {
1953     int n, samples_size;
1954     double ref_clock;
1955
1956     n = 2 * is->audio_st->codec->channels;
1957     samples_size = samples_size1;
1958
1959     /* if not master, then we try to remove or add samples to correct the clock */
1960     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1961          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1962         double diff, avg_diff;
1963         int wanted_size, min_size, max_size, nb_samples;
1964
1965         ref_clock = get_master_clock(is);
1966         diff = get_audio_clock(is) - ref_clock;
1967
1968         if (diff < AV_NOSYNC_THRESHOLD) {
1969             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1970             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1971                 /* not enough measures to have a correct estimate */
1972                 is->audio_diff_avg_count++;
1973             } else {
1974                 /* estimate the A-V difference */
1975                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1976
1977                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1978                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1979                     nb_samples = samples_size / n;
1980
1981                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1982                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1983                     if (wanted_size < min_size)
1984                         wanted_size = min_size;
1985                     else if (wanted_size > max_size)
1986                         wanted_size = max_size;
1987
1988                     /* add or remove samples to correction the synchro */
1989                     if (wanted_size < samples_size) {
1990                         /* remove samples */
1991                         samples_size = wanted_size;
1992                     } else if (wanted_size > samples_size) {
1993                         uint8_t *samples_end, *q;
1994                         int nb;
1995
1996                         /* add samples */
1997                         nb = (samples_size - wanted_size);
1998                         samples_end = (uint8_t *)samples + samples_size - n;
1999                         q = samples_end + n;
2000                         while (nb > 0) {
2001                             memcpy(q, samples_end, n);
2002                             q += n;
2003                             nb -= n;
2004                         }
2005                         samples_size = wanted_size;
2006                     }
2007                 }
2008 #if 0
2009                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2010                        diff, avg_diff, samples_size - samples_size1,
2011                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2012 #endif
2013             }
2014         } else {
2015             /* too big difference : may be initial PTS errors, so
2016                reset A-V filter */
2017             is->audio_diff_avg_count = 0;
2018             is->audio_diff_cum = 0;
2019         }
2020     }
2021
2022     return samples_size;
2023 }
2024
2025 /* decode one audio frame and returns its uncompressed size */
2026 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2027 {
2028     AVPacket *pkt_temp = &is->audio_pkt_temp;
2029     AVPacket *pkt = &is->audio_pkt;
2030     AVCodecContext *dec= is->audio_st->codec;
2031     int n, len1, data_size;
2032     double pts;
2033
2034     for(;;) {
2035         /* NOTE: the audio packet can contain several frames */
2036         while (pkt_temp->size > 0) {
2037             data_size = sizeof(is->audio_buf1);
2038             len1 = avcodec_decode_audio3(dec,
2039                                         (int16_t *)is->audio_buf1, &data_size,
2040                                         pkt_temp);
2041             if (len1 < 0) {
2042                 /* if error, we skip the frame */
2043                 pkt_temp->size = 0;
2044                 break;
2045             }
2046
2047             pkt_temp->data += len1;
2048             pkt_temp->size -= len1;
2049             if (data_size <= 0)
2050                 continue;
2051
2052             if (dec->sample_fmt != is->audio_src_fmt) {
2053                 if (is->reformat_ctx)
2054                     av_audio_convert_free(is->reformat_ctx);
2055                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2056                                                          dec->sample_fmt, 1, NULL, 0);
2057                 if (!is->reformat_ctx) {
2058                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2059                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2060                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2061                         break;
2062                 }
2063                 is->audio_src_fmt= dec->sample_fmt;
2064             }
2065
2066             if (is->reformat_ctx) {
2067                 const void *ibuf[6]= {is->audio_buf1};
2068                 void *obuf[6]= {is->audio_buf2};
2069                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2070                 int ostride[6]= {2};
2071                 int len= data_size/istride[0];
2072                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2073                     printf("av_audio_convert() failed\n");
2074                     break;
2075                 }
2076                 is->audio_buf= is->audio_buf2;
2077                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2078                           remove this legacy cruft */
2079                 data_size= len*2;
2080             }else{
2081                 is->audio_buf= is->audio_buf1;
2082             }
2083
2084             /* if no pts, then compute it */
2085             pts = is->audio_clock;
2086             *pts_ptr = pts;
2087             n = 2 * dec->channels;
2088             is->audio_clock += (double)data_size /
2089                 (double)(n * dec->sample_rate);
2090 #if defined(DEBUG_SYNC)
2091             {
2092                 static double last_clock;
2093                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2094                        is->audio_clock - last_clock,
2095                        is->audio_clock, pts);
2096                 last_clock = is->audio_clock;
2097             }
2098 #endif
2099             return data_size;
2100         }
2101
2102         /* free the current packet */
2103         if (pkt->data)
2104             av_free_packet(pkt);
2105
2106         if (is->paused || is->audioq.abort_request) {
2107             return -1;
2108         }
2109
2110         /* read next packet */
2111         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2112             return -1;
2113         if(pkt->data == flush_pkt.data){
2114             avcodec_flush_buffers(dec);
2115             continue;
2116         }
2117
2118         pkt_temp->data = pkt->data;
2119         pkt_temp->size = pkt->size;
2120
2121         /* if update the audio clock with the pts */
2122         if (pkt->pts != AV_NOPTS_VALUE) {
2123             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2124         }
2125     }
2126 }
2127
2128 /* get the current audio output buffer size, in samples. With SDL, we
2129    cannot have a precise information */
2130 static int audio_write_get_buf_size(VideoState *is)
2131 {
2132     return is->audio_buf_size - is->audio_buf_index;
2133 }
2134
2135
2136 /* prepare a new audio buffer */
2137 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2138 {
2139     VideoState *is = opaque;
2140     int audio_size, len1;
2141     double pts;
2142
2143     audio_callback_time = av_gettime();
2144
2145     while (len > 0) {
2146         if (is->audio_buf_index >= is->audio_buf_size) {
2147            audio_size = audio_decode_frame(is, &pts);
2148            if (audio_size < 0) {
2149                 /* if error, just output silence */
2150                is->audio_buf = is->audio_buf1;
2151                is->audio_buf_size = 1024;
2152                memset(is->audio_buf, 0, is->audio_buf_size);
2153            } else {
2154                if (is->show_audio)
2155                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2156                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2157                                               pts);
2158                is->audio_buf_size = audio_size;
2159            }
2160            is->audio_buf_index = 0;
2161         }
2162         len1 = is->audio_buf_size - is->audio_buf_index;
2163         if (len1 > len)
2164             len1 = len;
2165         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2166         len -= len1;
2167         stream += len1;
2168         is->audio_buf_index += len1;
2169     }
2170 }
2171
2172 /* open a given stream. Return 0 if OK */
2173 static int stream_component_open(VideoState *is, int stream_index)
2174 {
2175     AVFormatContext *ic = is->ic;
2176     AVCodecContext *avctx;
2177     AVCodec *codec;
2178     SDL_AudioSpec wanted_spec, spec;
2179
2180     if (stream_index < 0 || stream_index >= ic->nb_streams)
2181         return -1;
2182     avctx = ic->streams[stream_index]->codec;
2183
2184     /* prepare audio output */
2185     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2186         if (avctx->channels > 0) {
2187             avctx->request_channels = FFMIN(2, avctx->channels);
2188         } else {
2189             avctx->request_channels = 2;
2190         }
2191     }
2192
2193     codec = avcodec_find_decoder(avctx->codec_id);
2194     avctx->debug_mv = debug_mv;
2195     avctx->debug = debug;
2196     avctx->workaround_bugs = workaround_bugs;
2197     avctx->lowres = lowres;
2198     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2199     avctx->idct_algo= idct;
2200     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2201     avctx->skip_frame= skip_frame;
2202     avctx->skip_idct= skip_idct;
2203     avctx->skip_loop_filter= skip_loop_filter;
2204     avctx->error_recognition= error_recognition;
2205     avctx->error_concealment= error_concealment;
2206     avcodec_thread_init(avctx, thread_count);
2207
2208     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2209
2210     if (!codec ||
2211         avcodec_open(avctx, codec) < 0)
2212         return -1;
2213
2214     /* prepare audio output */
2215     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2216         wanted_spec.freq = avctx->sample_rate;
2217         wanted_spec.format = AUDIO_S16SYS;
2218         wanted_spec.channels = avctx->channels;
2219         wanted_spec.silence = 0;
2220         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2221         wanted_spec.callback = sdl_audio_callback;
2222         wanted_spec.userdata = is;
2223         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2224             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2225             return -1;
2226         }
2227         is->audio_hw_buf_size = spec.size;
2228         is->audio_src_fmt= SAMPLE_FMT_S16;
2229     }
2230
2231     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2232     switch(avctx->codec_type) {
2233     case AVMEDIA_TYPE_AUDIO:
2234         is->audio_stream = stream_index;
2235         is->audio_st = ic->streams[stream_index];
2236         is->audio_buf_size = 0;
2237         is->audio_buf_index = 0;
2238
2239         /* init averaging filter */
2240         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2241         is->audio_diff_avg_count = 0;
2242         /* since we do not have a precise anough audio fifo fullness,
2243            we correct audio sync only if larger than this threshold */
2244         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2245
2246         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2247         packet_queue_init(&is->audioq);
2248         SDL_PauseAudio(0);
2249         break;
2250     case AVMEDIA_TYPE_VIDEO:
2251         is->video_stream = stream_index;
2252         is->video_st = ic->streams[stream_index];
2253
2254 //        is->video_current_pts_time = av_gettime();
2255
2256         packet_queue_init(&is->videoq);
2257         is->video_tid = SDL_CreateThread(video_thread, is);
2258         break;
2259     case AVMEDIA_TYPE_SUBTITLE:
2260         is->subtitle_stream = stream_index;
2261         is->subtitle_st = ic->streams[stream_index];
2262         packet_queue_init(&is->subtitleq);
2263
2264         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2265         break;
2266     default:
2267         break;
2268     }
2269     return 0;
2270 }
2271
2272 static void stream_component_close(VideoState *is, int stream_index)
2273 {
2274     AVFormatContext *ic = is->ic;
2275     AVCodecContext *avctx;
2276
2277     if (stream_index < 0 || stream_index >= ic->nb_streams)
2278         return;
2279     avctx = ic->streams[stream_index]->codec;
2280
2281     switch(avctx->codec_type) {
2282     case AVMEDIA_TYPE_AUDIO:
2283         packet_queue_abort(&is->audioq);
2284
2285         SDL_CloseAudio();
2286
2287         packet_queue_end(&is->audioq);
2288         if (is->reformat_ctx)
2289             av_audio_convert_free(is->reformat_ctx);
2290         is->reformat_ctx = NULL;
2291         break;
2292     case AVMEDIA_TYPE_VIDEO:
2293         packet_queue_abort(&is->videoq);
2294
2295         /* note: we also signal this mutex to make sure we deblock the
2296            video thread in all cases */
2297         SDL_LockMutex(is->pictq_mutex);
2298         SDL_CondSignal(is->pictq_cond);
2299         SDL_UnlockMutex(is->pictq_mutex);
2300
2301         SDL_WaitThread(is->video_tid, NULL);
2302
2303         packet_queue_end(&is->videoq);
2304         break;
2305     case AVMEDIA_TYPE_SUBTITLE:
2306         packet_queue_abort(&is->subtitleq);
2307
2308         /* note: we also signal this mutex to make sure we deblock the
2309            video thread in all cases */
2310         SDL_LockMutex(is->subpq_mutex);
2311         is->subtitle_stream_changed = 1;
2312
2313         SDL_CondSignal(is->subpq_cond);
2314         SDL_UnlockMutex(is->subpq_mutex);
2315
2316         SDL_WaitThread(is->subtitle_tid, NULL);
2317
2318         packet_queue_end(&is->subtitleq);
2319         break;
2320     default:
2321         break;
2322     }
2323
2324     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2325     avcodec_close(avctx);
2326     switch(avctx->codec_type) {
2327     case AVMEDIA_TYPE_AUDIO:
2328         is->audio_st = NULL;
2329         is->audio_stream = -1;
2330         break;
2331     case AVMEDIA_TYPE_VIDEO:
2332         is->video_st = NULL;
2333         is->video_stream = -1;
2334         break;
2335     case AVMEDIA_TYPE_SUBTITLE:
2336         is->subtitle_st = NULL;
2337         is->subtitle_stream = -1;
2338         break;
2339     default:
2340         break;
2341     }
2342 }
2343
2344 /* since we have only one decoding thread, we can use a global
2345    variable instead of a thread local variable */
2346 static VideoState *global_video_state;
2347
2348 static int decode_interrupt_cb(void)
2349 {
2350     return (global_video_state && global_video_state->abort_request);
2351 }
2352
2353 /* this thread gets the stream from the disk or the network */
2354 static int decode_thread(void *arg)
2355 {
2356     VideoState *is = arg;
2357     AVFormatContext *ic;
2358     int err, i, ret;
2359     int st_index[AVMEDIA_TYPE_NB];
2360     int st_count[AVMEDIA_TYPE_NB]={0};
2361     int st_best_packet_count[AVMEDIA_TYPE_NB];
2362     AVPacket pkt1, *pkt = &pkt1;
2363     AVFormatParameters params, *ap = &params;
2364     int eof=0;
2365
2366     ic = avformat_alloc_context();
2367
2368     memset(st_index, -1, sizeof(st_index));
2369     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2370     is->video_stream = -1;
2371     is->audio_stream = -1;
2372     is->subtitle_stream = -1;
2373
2374     global_video_state = is;
2375     url_set_interrupt_cb(decode_interrupt_cb);
2376
2377     memset(ap, 0, sizeof(*ap));
2378
2379     ap->prealloced_context = 1;
2380     ap->width = frame_width;
2381     ap->height= frame_height;
2382     ap->time_base= (AVRational){1, 25};
2383     ap->pix_fmt = frame_pix_fmt;
2384
2385     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2386
2387     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2388     if (err < 0) {
2389         print_error(is->filename, err);
2390         ret = -1;
2391         goto fail;
2392     }
2393     is->ic = ic;
2394
2395     if(genpts)
2396         ic->flags |= AVFMT_FLAG_GENPTS;
2397
2398     err = av_find_stream_info(ic);
2399     if (err < 0) {
2400         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2401         ret = -1;
2402         goto fail;
2403     }
2404     if(ic->pb)
2405         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2406
2407     if(seek_by_bytes<0)
2408         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2409
2410     /* if seeking requested, we execute it */
2411     if (start_time != AV_NOPTS_VALUE) {
2412         int64_t timestamp;
2413
2414         timestamp = start_time;
2415         /* add the stream start time */
2416         if (ic->start_time != AV_NOPTS_VALUE)
2417             timestamp += ic->start_time;
2418         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2419         if (ret < 0) {
2420             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2421                     is->filename, (double)timestamp / AV_TIME_BASE);
2422         }
2423     }
2424
2425     for(i = 0; i < ic->nb_streams; i++) {
2426         AVStream *st= ic->streams[i];
2427         AVCodecContext *avctx = st->codec;
2428         ic->streams[i]->discard = AVDISCARD_ALL;
2429         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2430             continue;
2431         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2432             continue;
2433
2434         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2435             continue;
2436         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2437
2438         switch(avctx->codec_type) {
2439         case AVMEDIA_TYPE_AUDIO:
2440             if (!audio_disable)
2441                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2442             break;
2443         case AVMEDIA_TYPE_VIDEO:
2444         case AVMEDIA_TYPE_SUBTITLE:
2445             if (!video_disable)
2446                 st_index[avctx->codec_type] = i;
2447             break;
2448         default:
2449             break;
2450         }
2451     }
2452     if (show_status) {
2453         dump_format(ic, 0, is->filename, 0);
2454     }
2455
2456     /* open the streams */
2457     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2458         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2459     }
2460
2461     ret=-1;
2462     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2463         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2464     }
2465     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2466     if(ret<0) {
2467         if (!display_disable)
2468             is->show_audio = 2;
2469     }
2470
2471     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2472         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2473     }
2474
2475     if (is->video_stream < 0 && is->audio_stream < 0) {
2476         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2477         ret = -1;
2478         goto fail;
2479     }
2480
2481     for(;;) {
2482         if (is->abort_request)
2483             break;
2484         if (is->paused != is->last_paused) {
2485             is->last_paused = is->paused;
2486             if (is->paused)
2487                 is->read_pause_return= av_read_pause(ic);
2488             else
2489                 av_read_play(ic);
2490         }
2491 #if CONFIG_RTSP_DEMUXER
2492         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2493             /* wait 10 ms to avoid trying to get another packet */
2494             /* XXX: horrible */
2495             SDL_Delay(10);
2496             continue;
2497         }
2498 #endif
2499         if (is->seek_req) {
2500             int64_t seek_target= is->seek_pos;
2501             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2502             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2503 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2504 //      of the seek_pos/seek_rel variables
2505
2506             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2507             if (ret < 0) {
2508                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2509             }else{
2510                 if (is->audio_stream >= 0) {
2511                     packet_queue_flush(&is->audioq);
2512                     packet_queue_put(&is->audioq, &flush_pkt);
2513                 }
2514                 if (is->subtitle_stream >= 0) {
2515                     packet_queue_flush(&is->subtitleq);
2516                     packet_queue_put(&is->subtitleq, &flush_pkt);
2517                 }
2518                 if (is->video_stream >= 0) {
2519                     packet_queue_flush(&is->videoq);
2520                     packet_queue_put(&is->videoq, &flush_pkt);
2521                 }
2522             }
2523             is->seek_req = 0;
2524             eof= 0;
2525         }
2526
2527         /* if the queue are full, no need to read more */
2528         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2529             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2530                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2531                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2532             /* wait 10 ms */
2533             SDL_Delay(10);
2534             continue;
2535         }
2536         if(url_feof(ic->pb) || eof) {
2537             if(is->video_stream >= 0){
2538                 av_init_packet(pkt);
2539                 pkt->data=NULL;
2540                 pkt->size=0;
2541                 pkt->stream_index= is->video_stream;
2542                 packet_queue_put(&is->videoq, pkt);
2543             }
2544             SDL_Delay(10);
2545             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2546                 if(loop!=1 && (!loop || --loop)){
2547                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2548                 }else if(autoexit){
2549                     ret=AVERROR_EOF;
2550                     goto fail;
2551                 }
2552             }
2553             continue;
2554         }
2555         ret = av_read_frame(ic, pkt);
2556         if (ret < 0) {
2557             if (ret == AVERROR_EOF)
2558                 eof=1;
2559             if (url_ferror(ic->pb))
2560                 break;
2561             SDL_Delay(100); /* wait for user event */
2562             continue;
2563         }
2564         if (pkt->stream_index == is->audio_stream) {
2565             packet_queue_put(&is->audioq, pkt);
2566         } else if (pkt->stream_index == is->video_stream) {
2567             packet_queue_put(&is->videoq, pkt);
2568         } else if (pkt->stream_index == is->subtitle_stream) {
2569             packet_queue_put(&is->subtitleq, pkt);
2570         } else {
2571             av_free_packet(pkt);
2572         }
2573     }
2574     /* wait until the end */
2575     while (!is->abort_request) {
2576         SDL_Delay(100);
2577     }
2578
2579     ret = 0;
2580  fail:
2581     /* disable interrupting */
2582     global_video_state = NULL;
2583
2584     /* close each stream */
2585     if (is->audio_stream >= 0)
2586         stream_component_close(is, is->audio_stream);
2587     if (is->video_stream >= 0)
2588         stream_component_close(is, is->video_stream);
2589     if (is->subtitle_stream >= 0)
2590         stream_component_close(is, is->subtitle_stream);
2591     if (is->ic) {
2592         av_close_input_file(is->ic);
2593         is->ic = NULL; /* safety */
2594     }
2595     url_set_interrupt_cb(NULL);
2596
2597     if (ret != 0) {
2598         SDL_Event event;
2599
2600         event.type = FF_QUIT_EVENT;
2601         event.user.data1 = is;
2602         SDL_PushEvent(&event);
2603     }
2604     return 0;
2605 }
2606
2607 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2608 {
2609     VideoState *is;
2610
2611     is = av_mallocz(sizeof(VideoState));
2612     if (!is)
2613         return NULL;
2614     av_strlcpy(is->filename, filename, sizeof(is->filename));
2615     is->iformat = iformat;
2616     is->ytop = 0;
2617     is->xleft = 0;
2618
2619     /* start video display */
2620     is->pictq_mutex = SDL_CreateMutex();
2621     is->pictq_cond = SDL_CreateCond();
2622
2623     is->subpq_mutex = SDL_CreateMutex();
2624     is->subpq_cond = SDL_CreateCond();
2625
2626     is->av_sync_type = av_sync_type;
2627     is->parse_tid = SDL_CreateThread(decode_thread, is);
2628     if (!is->parse_tid) {
2629         av_free(is);
2630         return NULL;
2631     }
2632     return is;
2633 }
2634
2635 static void stream_close(VideoState *is)
2636 {
2637     VideoPicture *vp;
2638     int i;
2639     /* XXX: use a special url_shutdown call to abort parse cleanly */
2640     is->abort_request = 1;
2641     SDL_WaitThread(is->parse_tid, NULL);
2642     SDL_WaitThread(is->refresh_tid, NULL);
2643
2644     /* free all pictures */
2645     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2646         vp = &is->pictq[i];
2647 #if CONFIG_AVFILTER
2648         if (vp->picref) {
2649             avfilter_unref_pic(vp->picref);
2650             vp->picref = NULL;
2651         }
2652 #endif
2653         if (vp->bmp) {
2654             SDL_FreeYUVOverlay(vp->bmp);
2655             vp->bmp = NULL;
2656         }
2657     }
2658     SDL_DestroyMutex(is->pictq_mutex);
2659     SDL_DestroyCond(is->pictq_cond);
2660     SDL_DestroyMutex(is->subpq_mutex);
2661     SDL_DestroyCond(is->subpq_cond);
2662 #if !CONFIG_AVFILTER
2663     if (is->img_convert_ctx)
2664         sws_freeContext(is->img_convert_ctx);
2665 #endif
2666     av_free(is);
2667 }
2668
2669 static void stream_cycle_channel(VideoState *is, int codec_type)
2670 {
2671     AVFormatContext *ic = is->ic;
2672     int start_index, stream_index;
2673     AVStream *st;
2674
2675     if (codec_type == AVMEDIA_TYPE_VIDEO)
2676         start_index = is->video_stream;
2677     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2678         start_index = is->audio_stream;
2679     else
2680         start_index = is->subtitle_stream;
2681     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2682         return;
2683     stream_index = start_index;
2684     for(;;) {
2685         if (++stream_index >= is->ic->nb_streams)
2686         {
2687             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2688             {
2689                 stream_index = -1;
2690                 goto the_end;
2691             } else
2692                 stream_index = 0;
2693         }
2694         if (stream_index == start_index)
2695             return;
2696         st = ic->streams[stream_index];
2697         if (st->codec->codec_type == codec_type) {
2698             /* check that parameters are OK */
2699             switch(codec_type) {
2700             case AVMEDIA_TYPE_AUDIO:
2701                 if (st->codec->sample_rate != 0 &&
2702                     st->codec->channels != 0)
2703                     goto the_end;
2704                 break;
2705             case AVMEDIA_TYPE_VIDEO:
2706             case AVMEDIA_TYPE_SUBTITLE:
2707                 goto the_end;
2708             default:
2709                 break;
2710             }
2711         }
2712     }
2713  the_end:
2714     stream_component_close(is, start_index);
2715     stream_component_open(is, stream_index);
2716 }
2717
2718
2719 static void toggle_full_screen(void)
2720 {
2721     is_full_screen = !is_full_screen;
2722     if (!fs_screen_width) {
2723         /* use default SDL method */
2724 //        SDL_WM_ToggleFullScreen(screen);
2725     }
2726     video_open(cur_stream);
2727 }
2728
2729 static void toggle_pause(void)
2730 {
2731     if (cur_stream)
2732         stream_pause(cur_stream);
2733     step = 0;
2734 }
2735
2736 static void step_to_next_frame(void)
2737 {
2738     if (cur_stream) {
2739         /* if the stream is paused unpause it, then step */
2740         if (cur_stream->paused)
2741             stream_pause(cur_stream);
2742     }
2743     step = 1;
2744 }
2745
2746 static void do_exit(void)
2747 {
2748     int i;
2749     if (cur_stream) {
2750         stream_close(cur_stream);
2751         cur_stream = NULL;
2752     }
2753     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2754         av_free(avcodec_opts[i]);
2755     av_free(avformat_opts);
2756     av_free(sws_opts);
2757 #if CONFIG_AVFILTER
2758     avfilter_uninit();
2759 #endif
2760     if (show_status)
2761         printf("\n");
2762     SDL_Quit();
2763     exit(0);
2764 }
2765
2766 static void toggle_audio_display(void)
2767 {
2768     if (cur_stream) {
2769         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2770         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2771         fill_rectangle(screen,
2772                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2773                     bgcolor);
2774         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2775     }
2776 }
2777
2778 /* handle an event sent by the GUI */
2779 static void event_loop(void)
2780 {
2781     SDL_Event event;
2782     double incr, pos, frac;
2783
2784     for(;;) {
2785         double x;
2786         SDL_WaitEvent(&event);
2787         switch(event.type) {
2788         case SDL_KEYDOWN:
2789             switch(event.key.keysym.sym) {
2790             case SDLK_ESCAPE:
2791             case SDLK_q:
2792                 do_exit();
2793                 break;
2794             case SDLK_f:
2795                 toggle_full_screen();
2796                 break;
2797             case SDLK_p:
2798             case SDLK_SPACE:
2799                 toggle_pause();
2800                 break;
2801             case SDLK_s: //S: Step to next frame
2802                 step_to_next_frame();
2803                 break;
2804             case SDLK_a:
2805                 if (cur_stream)
2806                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2807                 break;
2808             case SDLK_v:
2809                 if (cur_stream)
2810                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2811                 break;
2812             case SDLK_t:
2813                 if (cur_stream)
2814                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2815                 break;
2816             case SDLK_w:
2817                 toggle_audio_display();
2818                 break;
2819             case SDLK_LEFT:
2820                 incr = -10.0;
2821                 goto do_seek;
2822             case SDLK_RIGHT:
2823                 incr = 10.0;
2824                 goto do_seek;
2825             case SDLK_UP:
2826                 incr = 60.0;
2827                 goto do_seek;
2828             case SDLK_DOWN:
2829                 incr = -60.0;
2830             do_seek:
2831                 if (cur_stream) {
2832                     if (seek_by_bytes) {
2833                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2834                             pos= cur_stream->video_current_pos;
2835                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2836                             pos= cur_stream->audio_pkt.pos;
2837                         }else
2838                             pos = url_ftell(cur_stream->ic->pb);
2839                         if (cur_stream->ic->bit_rate)
2840                             incr *= cur_stream->ic->bit_rate / 8.0;
2841                         else
2842                             incr *= 180000.0;
2843                         pos += incr;
2844                         stream_seek(cur_stream, pos, incr, 1);
2845                     } else {
2846                         pos = get_master_clock(cur_stream);
2847                         pos += incr;
2848                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2849                     }
2850                 }
2851                 break;
2852             default:
2853                 break;
2854             }
2855             break;
2856         case SDL_MOUSEBUTTONDOWN:
2857         case SDL_MOUSEMOTION:
2858             if(event.type ==SDL_MOUSEBUTTONDOWN){
2859                 x= event.button.x;
2860             }else{
2861                 if(event.motion.state != SDL_PRESSED)
2862                     break;
2863                 x= event.motion.x;
2864             }
2865             if (cur_stream) {
2866                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2867                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2868                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2869                 }else{
2870                     int64_t ts;
2871                     int ns, hh, mm, ss;
2872                     int tns, thh, tmm, tss;
2873                     tns = cur_stream->ic->duration/1000000LL;
2874                     thh = tns/3600;
2875                     tmm = (tns%3600)/60;
2876                     tss = (tns%60);
2877                     frac = x/cur_stream->width;
2878                     ns = frac*tns;
2879                     hh = ns/3600;
2880                     mm = (ns%3600)/60;
2881                     ss = (ns%60);
2882                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2883                             hh, mm, ss, thh, tmm, tss);
2884                     ts = frac*cur_stream->ic->duration;
2885                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2886                         ts += cur_stream->ic->start_time;
2887                     stream_seek(cur_stream, ts, 0, 0);
2888                 }
2889             }
2890             break;
2891         case SDL_VIDEORESIZE:
2892             if (cur_stream) {
2893                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2894                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2895                 screen_width = cur_stream->width = event.resize.w;
2896                 screen_height= cur_stream->height= event.resize.h;
2897             }
2898             break;
2899         case SDL_QUIT:
2900         case FF_QUIT_EVENT:
2901             do_exit();
2902             break;
2903         case FF_ALLOC_EVENT:
2904             video_open(event.user.data1);
2905             alloc_picture(event.user.data1);
2906             break;
2907         case FF_REFRESH_EVENT:
2908             video_refresh_timer(event.user.data1);
2909             cur_stream->refresh=0;
2910             break;
2911         default:
2912             break;
2913         }
2914     }
2915 }
2916
2917 static void opt_frame_size(const char *arg)
2918 {
2919     if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2920         fprintf(stderr, "Incorrect frame size\n");
2921         exit(1);
2922     }
2923     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2924         fprintf(stderr, "Frame size must be a multiple of 2\n");
2925         exit(1);
2926     }
2927 }
2928
2929 static int opt_width(const char *opt, const char *arg)
2930 {
2931     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2932     return 0;
2933 }
2934
2935 static int opt_height(const char *opt, const char *arg)
2936 {
2937     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2938     return 0;
2939 }
2940
2941 static void opt_format(const char *arg)
2942 {
2943     file_iformat = av_find_input_format(arg);
2944     if (!file_iformat) {
2945         fprintf(stderr, "Unknown input format: %s\n", arg);
2946         exit(1);
2947     }
2948 }
2949
2950 static void opt_frame_pix_fmt(const char *arg)
2951 {
2952     frame_pix_fmt = av_get_pix_fmt(arg);
2953 }
2954
2955 static int opt_sync(const char *opt, const char *arg)
2956 {
2957     if (!strcmp(arg, "audio"))
2958         av_sync_type = AV_SYNC_AUDIO_MASTER;
2959     else if (!strcmp(arg, "video"))
2960         av_sync_type = AV_SYNC_VIDEO_MASTER;
2961     else if (!strcmp(arg, "ext"))
2962         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2963     else {
2964         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2965         exit(1);
2966     }
2967     return 0;
2968 }
2969
2970 static int opt_seek(const char *opt, const char *arg)
2971 {
2972     start_time = parse_time_or_die(opt, arg, 1);
2973     return 0;
2974 }
2975
2976 static int opt_debug(const char *opt, const char *arg)
2977 {
2978     av_log_set_level(99);
2979     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2980     return 0;
2981 }
2982
2983 static int opt_vismv(const char *opt, const char *arg)
2984 {
2985     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2986     return 0;
2987 }
2988
2989 static int opt_thread_count(const char *opt, const char *arg)
2990 {
2991     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2992 #if !HAVE_THREADS
2993     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2994 #endif
2995     return 0;
2996 }
2997
2998 static const OptionDef options[] = {
2999 #include "cmdutils_common_opts.h"
3000     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3001     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3002     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3003     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3004     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3005     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3006     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3007     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3008     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3009     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3010     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3011     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3012     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3013     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3014     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3015     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3016     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3017     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3018     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3019     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3020     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3021     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3022     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3023     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3024     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3025     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3026     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3027     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3028     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3029     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3030     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3031     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3032     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3033     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3034 #if CONFIG_AVFILTER
3035     { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3036 #endif
3037     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3038     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3039     { NULL, },
3040 };
3041
3042 static void show_usage(void)
3043 {
3044     printf("Simple media player\n");
3045     printf("usage: ffplay [options] input_file\n");
3046     printf("\n");
3047 }
3048
3049 static void show_help(void)
3050 {
3051     show_usage();
3052     show_help_options(options, "Main options:\n",
3053                       OPT_EXPERT, 0);
3054     show_help_options(options, "\nAdvanced options:\n",
3055                       OPT_EXPERT, OPT_EXPERT);
3056     printf("\nWhile playing:\n"
3057            "q, ESC              quit\n"
3058            "f                   toggle full screen\n"
3059            "p, SPC              pause\n"
3060            "a                   cycle audio channel\n"
3061            "v                   cycle video channel\n"
3062            "t                   cycle subtitle channel\n"
3063            "w                   show audio waves\n"
3064            "left/right          seek backward/forward 10 seconds\n"
3065            "down/up             seek backward/forward 1 minute\n"
3066            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3067            );
3068 }
3069
3070 static void opt_input_file(const char *filename)
3071 {
3072     if (input_filename) {
3073         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3074                 filename, input_filename);
3075         exit(1);
3076     }
3077     if (!strcmp(filename, "-"))
3078         filename = "pipe:";
3079     input_filename = filename;
3080 }
3081
3082 /* Called from the main */
3083 int main(int argc, char **argv)
3084 {
3085     int flags, i;
3086
3087     /* register all codecs, demux and protocols */
3088     avcodec_register_all();
3089     avdevice_register_all();
3090 #if CONFIG_AVFILTER
3091     avfilter_register_all();
3092 #endif
3093     av_register_all();
3094
3095     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3096         avcodec_opts[i]= avcodec_alloc_context2(i);
3097     }
3098     avformat_opts = avformat_alloc_context();
3099 #if !CONFIG_AVFILTER
3100     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3101 #endif
3102
3103     show_banner();
3104
3105     parse_options(argc, argv, options, opt_input_file);
3106
3107     if (!input_filename) {
3108         show_usage();
3109         fprintf(stderr, "An input file must be specified\n");
3110         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3111         exit(1);
3112     }
3113
3114     if (display_disable) {
3115         video_disable = 1;
3116     }
3117     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3118 #if !defined(__MINGW32__) && !defined(__APPLE__)
3119     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3120 #endif
3121     if (SDL_Init (flags)) {
3122         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3123         exit(1);
3124     }
3125
3126     if (!display_disable) {
3127 #if HAVE_SDL_VIDEO_SIZE
3128         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3129         fs_screen_width = vi->current_w;
3130         fs_screen_height = vi->current_h;
3131 #endif
3132     }
3133
3134     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3135     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3136     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3137
3138     av_init_packet(&flush_pkt);
3139     flush_pkt.data= "FLUSH";
3140
3141     cur_stream = stream_open(input_filename, file_iformat);
3142
3143     event_loop();
3144
3145     /* never returns */
3146
3147     return 0;
3148 }