]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ac3enc: add support for E-AC-3 encoding.
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/opt.h"
37 #include "libavcodec/avfft.h"
38
39 #if CONFIG_AVFILTER
40 # include "libavfilter/avfilter.h"
41 # include "libavfilter/avfiltergraph.h"
42 #endif
43
44 #include "cmdutils.h"
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #ifdef __MINGW32__
50 #undef main /* We don't want SDL to override our main() */
51 #endif
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 //#define DEBUG
60 //#define DEBUG_SYNC
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2*65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///<presentation time stamp for this picture
102     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
103     int64_t pos;                                 ///<byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t *audio_buf;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum AVSampleFormat audio_src_fmt;
166     AVAudioConvert *reformat_ctx;
167
168     int show_audio; /* if true, display audio samples */
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     //    QETimer *video_timer;
206     char filename[1024];
207     int width, height, xleft, ytop;
208
209     PtsCorrectionContext pts_ctx;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int frame_width = 0;
231 static int frame_height = 0;
232 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB]={
236     [AVMEDIA_TYPE_AUDIO]=-1,
237     [AVMEDIA_TYPE_VIDEO]=-1,
238     [AVMEDIA_TYPE_SUBTITLE]=-1,
239 };
240 static int seek_by_bytes=-1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int exit_on_keydown;
263 static int exit_on_mousedown;
264 static int loop=1;
265 static int framedrop=1;
266
267 static int rdftspeed=20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290     memset(q, 0, sizeof(PacketQueue));
291     q->mutex = SDL_CreateMutex();
292     q->cond = SDL_CreateCond();
293     packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298     AVPacketList *pkt, *pkt1;
299
300     SDL_LockMutex(q->mutex);
301     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302         pkt1 = pkt->next;
303         av_free_packet(&pkt->pkt);
304         av_freep(&pkt);
305     }
306     q->last_pkt = NULL;
307     q->first_pkt = NULL;
308     q->nb_packets = 0;
309     q->size = 0;
310     SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315     packet_queue_flush(q);
316     SDL_DestroyMutex(q->mutex);
317     SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322     AVPacketList *pkt1;
323
324     /* duplicate the packet */
325     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
326         return -1;
327
328     pkt1 = av_malloc(sizeof(AVPacketList));
329     if (!pkt1)
330         return -1;
331     pkt1->pkt = *pkt;
332     pkt1->next = NULL;
333
334
335     SDL_LockMutex(q->mutex);
336
337     if (!q->last_pkt)
338
339         q->first_pkt = pkt1;
340     else
341         q->last_pkt->next = pkt1;
342     q->last_pkt = pkt1;
343     q->nb_packets++;
344     q->size += pkt1->pkt.size + sizeof(*pkt1);
345     /* XXX: should duplicate packet data in DV case */
346     SDL_CondSignal(q->cond);
347
348     SDL_UnlockMutex(q->mutex);
349     return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #define ALPHA_BLEND(a, oldp, newp, s)\
411 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
412
413 #define RGBA_IN(r, g, b, a, s)\
414 {\
415     unsigned int v = ((const uint32_t *)(s))[0];\
416     a = (v >> 24) & 0xff;\
417     r = (v >> 16) & 0xff;\
418     g = (v >> 8) & 0xff;\
419     b = v & 0xff;\
420 }
421
422 #define YUVA_IN(y, u, v, a, s, pal)\
423 {\
424     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
425     a = (val >> 24) & 0xff;\
426     y = (val >> 16) & 0xff;\
427     u = (val >> 8) & 0xff;\
428     v = val & 0xff;\
429 }
430
431 #define YUVA_OUT(d, y, u, v, a)\
432 {\
433     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 }
435
436
437 #define BPP 1
438
439 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
440 {
441     int wrap, wrap3, width2, skip2;
442     int y, u, v, a, u1, v1, a1, w, h;
443     uint8_t *lum, *cb, *cr;
444     const uint8_t *p;
445     const uint32_t *pal;
446     int dstx, dsty, dstw, dsth;
447
448     dstw = av_clip(rect->w, 0, imgw);
449     dsth = av_clip(rect->h, 0, imgh);
450     dstx = av_clip(rect->x, 0, imgw - dstw);
451     dsty = av_clip(rect->y, 0, imgh - dsth);
452     lum = dst->data[0] + dsty * dst->linesize[0];
453     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
454     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
455
456     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
457     skip2 = dstx >> 1;
458     wrap = dst->linesize[0];
459     wrap3 = rect->pict.linesize[0];
460     p = rect->pict.data[0];
461     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
462
463     if (dsty & 1) {
464         lum += dstx;
465         cb += skip2;
466         cr += skip2;
467
468         if (dstx & 1) {
469             YUVA_IN(y, u, v, a, p, pal);
470             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
472             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
473             cb++;
474             cr++;
475             lum++;
476             p += BPP;
477         }
478         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
479             YUVA_IN(y, u, v, a, p, pal);
480             u1 = u;
481             v1 = v;
482             a1 = a;
483             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484
485             YUVA_IN(y, u, v, a, p + BPP, pal);
486             u1 += u;
487             v1 += v;
488             a1 += a;
489             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
490             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
491             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
492             cb++;
493             cr++;
494             p += 2 * BPP;
495             lum += 2;
496         }
497         if (w) {
498             YUVA_IN(y, u, v, a, p, pal);
499             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
501             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
502             p++;
503             lum++;
504         }
505         p += wrap3 - dstw * BPP;
506         lum += wrap - dstw - dstx;
507         cb += dst->linesize[1] - width2 - skip2;
508         cr += dst->linesize[2] - width2 - skip2;
509     }
510     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
511         lum += dstx;
512         cb += skip2;
513         cr += skip2;
514
515         if (dstx & 1) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             p += wrap3;
522             lum += wrap;
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += -wrap3 + BPP;
533             lum += -wrap + 1;
534         }
535         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
536             YUVA_IN(y, u, v, a, p, pal);
537             u1 = u;
538             v1 = v;
539             a1 = a;
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
542             YUVA_IN(y, u, v, a, p + BPP, pal);
543             u1 += u;
544             v1 += v;
545             a1 += a;
546             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
547             p += wrap3;
548             lum += wrap;
549
550             YUVA_IN(y, u, v, a, p, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555
556             YUVA_IN(y, u, v, a, p + BPP, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
561
562             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
563             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
564
565             cb++;
566             cr++;
567             p += -wrap3 + 2 * BPP;
568             lum += -wrap + 2;
569         }
570         if (w) {
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 = u;
573             v1 = v;
574             a1 = a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             p += wrap3;
577             lum += wrap;
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 += u;
580             v1 += v;
581             a1 += a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
584             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
585             cb++;
586             cr++;
587             p += -wrap3 + BPP;
588             lum += -wrap + 1;
589         }
590         p += wrap3 + (wrap3 - dstw * BPP);
591         lum += wrap + (wrap - dstw - dstx);
592         cb += dst->linesize[1] - width2 - skip2;
593         cr += dst->linesize[2] - width2 - skip2;
594     }
595     /* handle odd height */
596     if (h) {
597         lum += dstx;
598         cb += skip2;
599         cr += skip2;
600
601         if (dstx & 1) {
602             YUVA_IN(y, u, v, a, p, pal);
603             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
605             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
606             cb++;
607             cr++;
608             lum++;
609             p += BPP;
610         }
611         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
618             YUVA_IN(y, u, v, a, p + BPP, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
625             cb++;
626             cr++;
627             p += 2 * BPP;
628             lum += 2;
629         }
630         if (w) {
631             YUVA_IN(y, u, v, a, p, pal);
632             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
633             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
634             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
635         }
636     }
637 }
638
639 static void free_subpicture(SubPicture *sp)
640 {
641     avsubtitle_free(&sp->sub);
642 }
643
644 static void video_image_display(VideoState *is)
645 {
646     VideoPicture *vp;
647     SubPicture *sp;
648     AVPicture pict;
649     float aspect_ratio;
650     int width, height, x, y;
651     SDL_Rect rect;
652     int i;
653
654     vp = &is->pictq[is->pictq_rindex];
655     if (vp->bmp) {
656 #if CONFIG_AVFILTER
657          if (vp->picref->video->pixel_aspect.num == 0)
658              aspect_ratio = 0;
659          else
660              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
661 #else
662
663         /* XXX: use variable in the frame */
664         if (is->video_st->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666         else if (is->video_st->codec->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668         else
669             aspect_ratio = 0;
670 #endif
671         if (aspect_ratio <= 0.0)
672             aspect_ratio = 1.0;
673         aspect_ratio *= (float)vp->width / (float)vp->height;
674
675         if (is->subtitle_st)
676         {
677             if (is->subpq_size > 0)
678             {
679                 sp = &is->subpq[is->subpq_rindex];
680
681                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
682                 {
683                     SDL_LockYUVOverlay (vp->bmp);
684
685                     pict.data[0] = vp->bmp->pixels[0];
686                     pict.data[1] = vp->bmp->pixels[2];
687                     pict.data[2] = vp->bmp->pixels[1];
688
689                     pict.linesize[0] = vp->bmp->pitches[0];
690                     pict.linesize[1] = vp->bmp->pitches[2];
691                     pict.linesize[2] = vp->bmp->pitches[1];
692
693                     for (i = 0; i < sp->sub.num_rects; i++)
694                         blend_subrect(&pict, sp->sub.rects[i],
695                                       vp->bmp->w, vp->bmp->h);
696
697                     SDL_UnlockYUVOverlay (vp->bmp);
698                 }
699             }
700         }
701
702
703         /* XXX: we suppose the screen has a 1.0 pixel ratio */
704         height = is->height;
705         width = ((int)rint(height * aspect_ratio)) & ~1;
706         if (width > is->width) {
707             width = is->width;
708             height = ((int)rint(width / aspect_ratio)) & ~1;
709         }
710         x = (is->width - width) / 2;
711         y = (is->height - height) / 2;
712         is->no_background = 0;
713         rect.x = is->xleft + x;
714         rect.y = is->ytop  + y;
715         rect.w = width;
716         rect.h = height;
717         SDL_DisplayYUVOverlay(vp->bmp, &rect);
718     }
719 }
720
721 /* get the current audio output buffer size, in samples. With SDL, we
722    cannot have a precise information */
723 static int audio_write_get_buf_size(VideoState *is)
724 {
725     return is->audio_buf_size - is->audio_buf_index;
726 }
727
728 static inline int compute_mod(int a, int b)
729 {
730     a = a % b;
731     if (a >= 0)
732         return a;
733     else
734         return a + b;
735 }
736
737 static void video_audio_display(VideoState *s)
738 {
739     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
740     int ch, channels, h, h2, bgcolor, fgcolor;
741     int16_t time_diff;
742     int rdft_bits, nb_freq;
743
744     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
745         ;
746     nb_freq= 1<<(rdft_bits-1);
747
748     /* compute display index : center on currently output samples */
749     channels = s->audio_st->codec->channels;
750     nb_display_channels = channels;
751     if (!s->paused) {
752         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
753         n = 2 * channels;
754         delay = audio_write_get_buf_size(s);
755         delay /= n;
756
757         /* to be more precise, we take into account the time spent since
758            the last buffer computation */
759         if (audio_callback_time) {
760             time_diff = av_gettime() - audio_callback_time;
761             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
762         }
763
764         delay += 2*data_used;
765         if (delay < data_used)
766             delay = data_used;
767
768         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
769         if(s->show_audio==1){
770             h= INT_MIN;
771             for(i=0; i<1000; i+=channels){
772                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
773                 int a= s->sample_array[idx];
774                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
775                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
776                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
777                 int score= a-d;
778                 if(h<score && (b^c)<0){
779                     h= score;
780                     i_start= idx;
781                 }
782             }
783         }
784
785         s->last_i_start = i_start;
786     } else {
787         i_start = s->last_i_start;
788     }
789
790     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791     if(s->show_audio==1){
792         fill_rectangle(screen,
793                        s->xleft, s->ytop, s->width, s->height,
794                        bgcolor);
795
796         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
797
798         /* total height for one channel */
799         h = s->height / nb_display_channels;
800         /* graph height / 2 */
801         h2 = (h * 9) / 20;
802         for(ch = 0;ch < nb_display_channels; ch++) {
803             i = i_start + ch;
804             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
805             for(x = 0; x < s->width; x++) {
806                 y = (s->sample_array[i] * h2) >> 15;
807                 if (y < 0) {
808                     y = -y;
809                     ys = y1 - y;
810                 } else {
811                     ys = y1;
812                 }
813                 fill_rectangle(screen,
814                                s->xleft + x, ys, 1, y,
815                                fgcolor);
816                 i += channels;
817                 if (i >= SAMPLE_ARRAY_SIZE)
818                     i -= SAMPLE_ARRAY_SIZE;
819             }
820         }
821
822         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
823
824         for(ch = 1;ch < nb_display_channels; ch++) {
825             y = s->ytop + ch * h;
826             fill_rectangle(screen,
827                            s->xleft, y, s->width, 1,
828                            fgcolor);
829         }
830         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
831     }else{
832         nb_display_channels= FFMIN(nb_display_channels, 2);
833         if(rdft_bits != s->rdft_bits){
834             av_rdft_end(s->rdft);
835             av_free(s->rdft_data);
836             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
837             s->rdft_bits= rdft_bits;
838             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
839         }
840         {
841             FFTSample *data[2];
842             for(ch = 0;ch < nb_display_channels; ch++) {
843                 data[ch] = s->rdft_data + 2*nb_freq*ch;
844                 i = i_start + ch;
845                 for(x = 0; x < 2*nb_freq; x++) {
846                     double w= (x-nb_freq)*(1.0/nb_freq);
847                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
848                     i += channels;
849                     if (i >= SAMPLE_ARRAY_SIZE)
850                         i -= SAMPLE_ARRAY_SIZE;
851                 }
852                 av_rdft_calc(s->rdft, data[ch]);
853             }
854             //least efficient way to do this, we should of course directly access it but its more than fast enough
855             for(y=0; y<s->height; y++){
856                 double w= 1/sqrt(nb_freq);
857                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
858                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
859                        + data[1][2*y+1]*data[1][2*y+1])) : a;
860                 a= FFMIN(a,255);
861                 b= FFMIN(b,255);
862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
863
864                 fill_rectangle(screen,
865                             s->xpos, s->height-y, 1, 1,
866                             fgcolor);
867             }
868         }
869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870         s->xpos++;
871         if(s->xpos >= s->width)
872             s->xpos= s->xleft;
873     }
874 }
875
876 static int video_open(VideoState *is){
877     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
878     int w,h;
879
880     if(is_full_screen) flags |= SDL_FULLSCREEN;
881     else               flags |= SDL_RESIZABLE;
882
883     if (is_full_screen && fs_screen_width) {
884         w = fs_screen_width;
885         h = fs_screen_height;
886     } else if(!is_full_screen && screen_width){
887         w = screen_width;
888         h = screen_height;
889 #if CONFIG_AVFILTER
890     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
891         w = is->out_video_filter->inputs[0]->w;
892         h = is->out_video_filter->inputs[0]->h;
893 #else
894     }else if (is->video_st && is->video_st->codec->width){
895         w = is->video_st->codec->width;
896         h = is->video_st->codec->height;
897 #endif
898     } else {
899         w = 640;
900         h = 480;
901     }
902     if(screen && is->width == screen->w && screen->w == w
903        && is->height== screen->h && screen->h == h)
904         return 0;
905
906 #ifndef __APPLE__
907     screen = SDL_SetVideoMode(w, h, 0, flags);
908 #else
909     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
910     screen = SDL_SetVideoMode(w, h, 24, flags);
911 #endif
912     if (!screen) {
913         fprintf(stderr, "SDL: could not set video mode - exiting\n");
914         return -1;
915     }
916     if (!window_title)
917         window_title = input_filename;
918     SDL_WM_SetCaption(window_title, window_title);
919
920     is->width = screen->w;
921     is->height = screen->h;
922
923     return 0;
924 }
925
926 /* display the current picture, if any */
927 static void video_display(VideoState *is)
928 {
929     if(!screen)
930         video_open(cur_stream);
931     if (is->audio_st && is->show_audio)
932         video_audio_display(is);
933     else if (is->video_st)
934         video_image_display(is);
935 }
936
937 static int refresh_thread(void *opaque)
938 {
939     VideoState *is= opaque;
940     while(!is->abort_request){
941         SDL_Event event;
942         event.type = FF_REFRESH_EVENT;
943         event.user.data1 = opaque;
944         if(!is->refresh){
945             is->refresh=1;
946             SDL_PushEvent(&event);
947         }
948         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
949     }
950     return 0;
951 }
952
953 /* get the current audio clock value */
954 static double get_audio_clock(VideoState *is)
955 {
956     double pts;
957     int hw_buf_size, bytes_per_sec;
958     pts = is->audio_clock;
959     hw_buf_size = audio_write_get_buf_size(is);
960     bytes_per_sec = 0;
961     if (is->audio_st) {
962         bytes_per_sec = is->audio_st->codec->sample_rate *
963             2 * is->audio_st->codec->channels;
964     }
965     if (bytes_per_sec)
966         pts -= (double)hw_buf_size / bytes_per_sec;
967     return pts;
968 }
969
970 /* get the current video clock value */
971 static double get_video_clock(VideoState *is)
972 {
973     if (is->paused) {
974         return is->video_current_pts;
975     } else {
976         return is->video_current_pts_drift + av_gettime() / 1000000.0;
977     }
978 }
979
980 /* get the current external clock value */
981 static double get_external_clock(VideoState *is)
982 {
983     int64_t ti;
984     ti = av_gettime();
985     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
986 }
987
988 /* get the current master clock value */
989 static double get_master_clock(VideoState *is)
990 {
991     double val;
992
993     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
994         if (is->video_st)
995             val = get_video_clock(is);
996         else
997             val = get_audio_clock(is);
998     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
999         if (is->audio_st)
1000             val = get_audio_clock(is);
1001         else
1002             val = get_video_clock(is);
1003     } else {
1004         val = get_external_clock(is);
1005     }
1006     return val;
1007 }
1008
1009 /* seek in the stream */
1010 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1011 {
1012     if (!is->seek_req) {
1013         is->seek_pos = pos;
1014         is->seek_rel = rel;
1015         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1016         if (seek_by_bytes)
1017             is->seek_flags |= AVSEEK_FLAG_BYTE;
1018         is->seek_req = 1;
1019     }
1020 }
1021
1022 /* pause or resume the video */
1023 static void stream_pause(VideoState *is)
1024 {
1025     if (is->paused) {
1026         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1027         if(is->read_pause_return != AVERROR(ENOSYS)){
1028             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1029         }
1030         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1031     }
1032     is->paused = !is->paused;
1033 }
1034
1035 static double compute_target_time(double frame_current_pts, VideoState *is)
1036 {
1037     double delay, sync_threshold, diff;
1038
1039     /* compute nominal delay */
1040     delay = frame_current_pts - is->frame_last_pts;
1041     if (delay <= 0 || delay >= 10.0) {
1042         /* if incorrect delay, use previous one */
1043         delay = is->frame_last_delay;
1044     } else {
1045         is->frame_last_delay = delay;
1046     }
1047     is->frame_last_pts = frame_current_pts;
1048
1049     /* update delay to follow master synchronisation source */
1050     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1051          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1052         /* if video is slave, we try to correct big delays by
1053            duplicating or deleting a frame */
1054         diff = get_video_clock(is) - get_master_clock(is);
1055
1056         /* skip or repeat frame. We take into account the
1057            delay to compute the threshold. I still don't know
1058            if it is the best guess */
1059         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1060         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1061             if (diff <= -sync_threshold)
1062                 delay = 0;
1063             else if (diff >= sync_threshold)
1064                 delay = 2 * delay;
1065         }
1066     }
1067     is->frame_timer += delay;
1068 #if defined(DEBUG_SYNC)
1069     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1070             delay, actual_delay, frame_current_pts, -diff);
1071 #endif
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             //nothing to do, no picture to display in the que
1088         } else {
1089             double time= av_gettime()/1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if(time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if(is->pictq_size > 1){
1101                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             }else{
1105                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1106             }
1107             if(framedrop && time > next_target){
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if(is->pictq_size > 1 || time > next_target + 0.5){
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if(is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1211             fflush(stdout);
1212             last_time = cur_time;
1213         }
1214     }
1215 }
1216
1217 static void stream_close(VideoState *is)
1218 {
1219     VideoPicture *vp;
1220     int i;
1221     /* XXX: use a special url_shutdown call to abort parse cleanly */
1222     is->abort_request = 1;
1223     SDL_WaitThread(is->parse_tid, NULL);
1224     SDL_WaitThread(is->refresh_tid, NULL);
1225
1226     /* free all pictures */
1227     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1228         vp = &is->pictq[i];
1229 #if CONFIG_AVFILTER
1230         if (vp->picref) {
1231             avfilter_unref_buffer(vp->picref);
1232             vp->picref = NULL;
1233         }
1234 #endif
1235         if (vp->bmp) {
1236             SDL_FreeYUVOverlay(vp->bmp);
1237             vp->bmp = NULL;
1238         }
1239     }
1240     SDL_DestroyMutex(is->pictq_mutex);
1241     SDL_DestroyCond(is->pictq_cond);
1242     SDL_DestroyMutex(is->subpq_mutex);
1243     SDL_DestroyCond(is->subpq_cond);
1244 #if !CONFIG_AVFILTER
1245     if (is->img_convert_ctx)
1246         sws_freeContext(is->img_convert_ctx);
1247 #endif
1248     av_free(is);
1249 }
1250
1251 static void do_exit(void)
1252 {
1253     if (cur_stream) {
1254         stream_close(cur_stream);
1255         cur_stream = NULL;
1256     }
1257     uninit_opts();
1258 #if CONFIG_AVFILTER
1259     avfilter_uninit();
1260 #endif
1261     if (show_status)
1262         printf("\n");
1263     SDL_Quit();
1264     av_log(NULL, AV_LOG_QUIET, "");
1265     exit(0);
1266 }
1267
1268 /* allocate a picture (needs to do that in main thread to avoid
1269    potential locking problems */
1270 static void alloc_picture(void *opaque)
1271 {
1272     VideoState *is = opaque;
1273     VideoPicture *vp;
1274
1275     vp = &is->pictq[is->pictq_windex];
1276
1277     if (vp->bmp)
1278         SDL_FreeYUVOverlay(vp->bmp);
1279
1280 #if CONFIG_AVFILTER
1281     if (vp->picref)
1282         avfilter_unref_buffer(vp->picref);
1283     vp->picref = NULL;
1284
1285     vp->width   = is->out_video_filter->inputs[0]->w;
1286     vp->height  = is->out_video_filter->inputs[0]->h;
1287     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1288 #else
1289     vp->width   = is->video_st->codec->width;
1290     vp->height  = is->video_st->codec->height;
1291     vp->pix_fmt = is->video_st->codec->pix_fmt;
1292 #endif
1293
1294     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1295                                    SDL_YV12_OVERLAY,
1296                                    screen);
1297     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1298         /* SDL allocates a buffer smaller than requested if the video
1299          * overlay hardware is unable to support the requested size. */
1300         fprintf(stderr, "Error: the video system does not support an image\n"
1301                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1302                         "to reduce the image size.\n", vp->width, vp->height );
1303         do_exit();
1304     }
1305
1306     SDL_LockMutex(is->pictq_mutex);
1307     vp->allocated = 1;
1308     SDL_CondSignal(is->pictq_cond);
1309     SDL_UnlockMutex(is->pictq_mutex);
1310 }
1311
1312 /**
1313  *
1314  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1315  */
1316 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1317 {
1318     VideoPicture *vp;
1319     int dst_pix_fmt;
1320 #if CONFIG_AVFILTER
1321     AVPicture pict_src;
1322 #endif
1323     /* wait until we have space to put a new picture */
1324     SDL_LockMutex(is->pictq_mutex);
1325
1326     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1327         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1328
1329     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1330            !is->videoq.abort_request) {
1331         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1332     }
1333     SDL_UnlockMutex(is->pictq_mutex);
1334
1335     if (is->videoq.abort_request)
1336         return -1;
1337
1338     vp = &is->pictq[is->pictq_windex];
1339
1340     /* alloc or resize hardware picture buffer */
1341     if (!vp->bmp ||
1342 #if CONFIG_AVFILTER
1343         vp->width  != is->out_video_filter->inputs[0]->w ||
1344         vp->height != is->out_video_filter->inputs[0]->h) {
1345 #else
1346         vp->width != is->video_st->codec->width ||
1347         vp->height != is->video_st->codec->height) {
1348 #endif
1349         SDL_Event event;
1350
1351         vp->allocated = 0;
1352
1353         /* the allocation must be done in the main thread to avoid
1354            locking problems */
1355         event.type = FF_ALLOC_EVENT;
1356         event.user.data1 = is;
1357         SDL_PushEvent(&event);
1358
1359         /* wait until the picture is allocated */
1360         SDL_LockMutex(is->pictq_mutex);
1361         while (!vp->allocated && !is->videoq.abort_request) {
1362             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1363         }
1364         SDL_UnlockMutex(is->pictq_mutex);
1365
1366         if (is->videoq.abort_request)
1367             return -1;
1368     }
1369
1370     /* if the frame is not skipped, then display it */
1371     if (vp->bmp) {
1372         AVPicture pict;
1373 #if CONFIG_AVFILTER
1374         if(vp->picref)
1375             avfilter_unref_buffer(vp->picref);
1376         vp->picref = src_frame->opaque;
1377 #endif
1378
1379         /* get a pointer on the bitmap */
1380         SDL_LockYUVOverlay (vp->bmp);
1381
1382         dst_pix_fmt = PIX_FMT_YUV420P;
1383         memset(&pict,0,sizeof(AVPicture));
1384         pict.data[0] = vp->bmp->pixels[0];
1385         pict.data[1] = vp->bmp->pixels[2];
1386         pict.data[2] = vp->bmp->pixels[1];
1387
1388         pict.linesize[0] = vp->bmp->pitches[0];
1389         pict.linesize[1] = vp->bmp->pitches[2];
1390         pict.linesize[2] = vp->bmp->pitches[1];
1391
1392 #if CONFIG_AVFILTER
1393         pict_src.data[0] = src_frame->data[0];
1394         pict_src.data[1] = src_frame->data[1];
1395         pict_src.data[2] = src_frame->data[2];
1396
1397         pict_src.linesize[0] = src_frame->linesize[0];
1398         pict_src.linesize[1] = src_frame->linesize[1];
1399         pict_src.linesize[2] = src_frame->linesize[2];
1400
1401         //FIXME use direct rendering
1402         av_picture_copy(&pict, &pict_src,
1403                         vp->pix_fmt, vp->width, vp->height);
1404 #else
1405         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1406         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1407             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1408             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1409         if (is->img_convert_ctx == NULL) {
1410             fprintf(stderr, "Cannot initialize the conversion context\n");
1411             exit(1);
1412         }
1413         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1414                   0, vp->height, pict.data, pict.linesize);
1415 #endif
1416         /* update the bitmap content */
1417         SDL_UnlockYUVOverlay(vp->bmp);
1418
1419         vp->pts = pts;
1420         vp->pos = pos;
1421
1422         /* now we can update the picture count */
1423         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1424             is->pictq_windex = 0;
1425         SDL_LockMutex(is->pictq_mutex);
1426         vp->target_clock= compute_target_time(vp->pts, is);
1427
1428         is->pictq_size++;
1429         SDL_UnlockMutex(is->pictq_mutex);
1430     }
1431     return 0;
1432 }
1433
1434 /**
1435  * compute the exact PTS for the picture if it is omitted in the stream
1436  * @param pts1 the dts of the pkt / pts of the frame
1437  */
1438 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1439 {
1440     double frame_delay, pts;
1441
1442     pts = pts1;
1443
1444     if (pts != 0) {
1445         /* update video clock with pts, if present */
1446         is->video_clock = pts;
1447     } else {
1448         pts = is->video_clock;
1449     }
1450     /* update video clock for next frame */
1451     frame_delay = av_q2d(is->video_st->codec->time_base);
1452     /* for MPEG2, the frame can be repeated, so we update the
1453        clock accordingly */
1454     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1455     is->video_clock += frame_delay;
1456
1457     return queue_picture(is, src_frame, pts, pos);
1458 }
1459
1460 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1461 {
1462     int len1, got_picture, i;
1463
1464     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1465         return -1;
1466
1467     if (pkt->data == flush_pkt.data) {
1468         avcodec_flush_buffers(is->video_st->codec);
1469
1470         SDL_LockMutex(is->pictq_mutex);
1471         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1472         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1473             is->pictq[i].target_clock= 0;
1474         }
1475         while (is->pictq_size && !is->videoq.abort_request) {
1476             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1477         }
1478         is->video_current_pos = -1;
1479         SDL_UnlockMutex(is->pictq_mutex);
1480
1481         init_pts_correction(&is->pts_ctx);
1482         is->frame_last_pts = AV_NOPTS_VALUE;
1483         is->frame_last_delay = 0;
1484         is->frame_timer = (double)av_gettime() / 1000000.0;
1485         is->skip_frames = 1;
1486         is->skip_frames_index = 0;
1487         return 0;
1488     }
1489
1490     len1 = avcodec_decode_video2(is->video_st->codec,
1491                                  frame, &got_picture,
1492                                  pkt);
1493
1494     if (got_picture) {
1495         if (decoder_reorder_pts == -1) {
1496             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1497         } else if (decoder_reorder_pts) {
1498             *pts = frame->pkt_pts;
1499         } else {
1500             *pts = frame->pkt_dts;
1501         }
1502
1503         if (*pts == AV_NOPTS_VALUE) {
1504             *pts = 0;
1505         }
1506
1507         is->skip_frames_index += 1;
1508         if(is->skip_frames_index >= is->skip_frames){
1509             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1510             return 1;
1511         }
1512
1513     }
1514     return 0;
1515 }
1516
1517 #if CONFIG_AVFILTER
1518 typedef struct {
1519     VideoState *is;
1520     AVFrame *frame;
1521     int use_dr1;
1522 } FilterPriv;
1523
1524 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1525 {
1526     AVFilterContext *ctx = codec->opaque;
1527     AVFilterBufferRef  *ref;
1528     int perms = AV_PERM_WRITE;
1529     int i, w, h, stride[4];
1530     unsigned edge;
1531     int pixel_size;
1532
1533     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1534         perms |= AV_PERM_NEG_LINESIZES;
1535
1536     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1537         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1538         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1539         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1540     }
1541     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1542
1543     w = codec->width;
1544     h = codec->height;
1545     avcodec_align_dimensions2(codec, &w, &h, stride);
1546     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1547     w += edge << 1;
1548     h += edge << 1;
1549
1550     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1551         return -1;
1552
1553     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1554     ref->video->w = codec->width;
1555     ref->video->h = codec->height;
1556     for(i = 0; i < 4; i ++) {
1557         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1558         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1559
1560         if (ref->data[i]) {
1561             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1562         }
1563         pic->data[i]     = ref->data[i];
1564         pic->linesize[i] = ref->linesize[i];
1565     }
1566     pic->opaque = ref;
1567     pic->age    = INT_MAX;
1568     pic->type   = FF_BUFFER_TYPE_USER;
1569     pic->reordered_opaque = codec->reordered_opaque;
1570     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1571     else           pic->pkt_pts = AV_NOPTS_VALUE;
1572     return 0;
1573 }
1574
1575 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1576 {
1577     memset(pic->data, 0, sizeof(pic->data));
1578     avfilter_unref_buffer(pic->opaque);
1579 }
1580
1581 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1582 {
1583     AVFilterBufferRef *ref = pic->opaque;
1584
1585     if (pic->data[0] == NULL) {
1586         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1587         return codec->get_buffer(codec, pic);
1588     }
1589
1590     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1591         (codec->pix_fmt != ref->format)) {
1592         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1593         return -1;
1594     }
1595
1596     pic->reordered_opaque = codec->reordered_opaque;
1597     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1598     else           pic->pkt_pts = AV_NOPTS_VALUE;
1599     return 0;
1600 }
1601
1602 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1603 {
1604     FilterPriv *priv = ctx->priv;
1605     AVCodecContext *codec;
1606     if(!opaque) return -1;
1607
1608     priv->is = opaque;
1609     codec    = priv->is->video_st->codec;
1610     codec->opaque = ctx;
1611     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1612         priv->use_dr1 = 1;
1613         codec->get_buffer     = input_get_buffer;
1614         codec->release_buffer = input_release_buffer;
1615         codec->reget_buffer   = input_reget_buffer;
1616         codec->thread_safe_callbacks = 1;
1617     }
1618
1619     priv->frame = avcodec_alloc_frame();
1620
1621     return 0;
1622 }
1623
1624 static void input_uninit(AVFilterContext *ctx)
1625 {
1626     FilterPriv *priv = ctx->priv;
1627     av_free(priv->frame);
1628 }
1629
1630 static int input_request_frame(AVFilterLink *link)
1631 {
1632     FilterPriv *priv = link->src->priv;
1633     AVFilterBufferRef *picref;
1634     int64_t pts = 0;
1635     AVPacket pkt;
1636     int ret;
1637
1638     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1639         av_free_packet(&pkt);
1640     if (ret < 0)
1641         return -1;
1642
1643     if(priv->use_dr1) {
1644         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1645     } else {
1646         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1647         av_image_copy(picref->data, picref->linesize,
1648                       priv->frame->data, priv->frame->linesize,
1649                       picref->format, link->w, link->h);
1650     }
1651     av_free_packet(&pkt);
1652
1653     picref->pts = pts;
1654     picref->pos = pkt.pos;
1655     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1656     avfilter_start_frame(link, picref);
1657     avfilter_draw_slice(link, 0, link->h, 1);
1658     avfilter_end_frame(link);
1659
1660     return 0;
1661 }
1662
1663 static int input_query_formats(AVFilterContext *ctx)
1664 {
1665     FilterPriv *priv = ctx->priv;
1666     enum PixelFormat pix_fmts[] = {
1667         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1668     };
1669
1670     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1671     return 0;
1672 }
1673
1674 static int input_config_props(AVFilterLink *link)
1675 {
1676     FilterPriv *priv  = link->src->priv;
1677     AVCodecContext *c = priv->is->video_st->codec;
1678
1679     link->w = c->width;
1680     link->h = c->height;
1681     link->time_base = priv->is->video_st->time_base;
1682
1683     return 0;
1684 }
1685
1686 static AVFilter input_filter =
1687 {
1688     .name      = "ffplay_input",
1689
1690     .priv_size = sizeof(FilterPriv),
1691
1692     .init      = input_init,
1693     .uninit    = input_uninit,
1694
1695     .query_formats = input_query_formats,
1696
1697     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1698     .outputs   = (AVFilterPad[]) {{ .name = "default",
1699                                     .type = AVMEDIA_TYPE_VIDEO,
1700                                     .request_frame = input_request_frame,
1701                                     .config_props  = input_config_props, },
1702                                   { .name = NULL }},
1703 };
1704
1705 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1706 {
1707     char sws_flags_str[128];
1708     int ret;
1709     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1710     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1711     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1712     graph->scale_sws_opts = av_strdup(sws_flags_str);
1713
1714     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1715                                             NULL, is, graph)) < 0)
1716         goto the_end;
1717     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1718                                             NULL, &ffsink_ctx, graph)) < 0)
1719         goto the_end;
1720
1721     if(vfilters) {
1722         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1723         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1724
1725         outputs->name    = av_strdup("in");
1726         outputs->filter_ctx = filt_src;
1727         outputs->pad_idx = 0;
1728         outputs->next    = NULL;
1729
1730         inputs->name    = av_strdup("out");
1731         inputs->filter_ctx = filt_out;
1732         inputs->pad_idx = 0;
1733         inputs->next    = NULL;
1734
1735         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1736             goto the_end;
1737         av_freep(&vfilters);
1738     } else {
1739         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1740             goto the_end;
1741     }
1742
1743     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1744         goto the_end;
1745
1746     is->out_video_filter = filt_out;
1747 the_end:
1748     return ret;
1749 }
1750
1751 #endif  /* CONFIG_AVFILTER */
1752
1753 static int video_thread(void *arg)
1754 {
1755     VideoState *is = arg;
1756     AVFrame *frame= avcodec_alloc_frame();
1757     int64_t pts_int;
1758     double pts;
1759     int ret;
1760
1761 #if CONFIG_AVFILTER
1762     AVFilterGraph *graph = avfilter_graph_alloc();
1763     AVFilterContext *filt_out = NULL;
1764     int64_t pos;
1765
1766     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1767         goto the_end;
1768     filt_out = is->out_video_filter;
1769 #endif
1770
1771     for(;;) {
1772 #if !CONFIG_AVFILTER
1773         AVPacket pkt;
1774 #else
1775         AVFilterBufferRef *picref;
1776         AVRational tb;
1777 #endif
1778         while (is->paused && !is->videoq.abort_request)
1779             SDL_Delay(10);
1780 #if CONFIG_AVFILTER
1781         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1782         if (picref) {
1783             pts_int = picref->pts;
1784             pos     = picref->pos;
1785             frame->opaque = picref;
1786         }
1787
1788         if (av_cmp_q(tb, is->video_st->time_base)) {
1789             av_unused int64_t pts1 = pts_int;
1790             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1791             av_dlog(NULL, "video_thread(): "
1792                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1793                     tb.num, tb.den, pts1,
1794                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1795         }
1796 #else
1797         ret = get_video_frame(is, frame, &pts_int, &pkt);
1798 #endif
1799
1800         if (ret < 0) goto the_end;
1801
1802         if (!ret)
1803             continue;
1804
1805         pts = pts_int*av_q2d(is->video_st->time_base);
1806
1807 #if CONFIG_AVFILTER
1808         ret = output_picture2(is, frame, pts, pos);
1809 #else
1810         ret = output_picture2(is, frame, pts,  pkt.pos);
1811         av_free_packet(&pkt);
1812 #endif
1813         if (ret < 0)
1814             goto the_end;
1815
1816         if (step)
1817             if (cur_stream)
1818                 stream_pause(cur_stream);
1819     }
1820  the_end:
1821 #if CONFIG_AVFILTER
1822     avfilter_graph_free(&graph);
1823 #endif
1824     av_free(frame);
1825     return 0;
1826 }
1827
1828 static int subtitle_thread(void *arg)
1829 {
1830     VideoState *is = arg;
1831     SubPicture *sp;
1832     AVPacket pkt1, *pkt = &pkt1;
1833     int len1, got_subtitle;
1834     double pts;
1835     int i, j;
1836     int r, g, b, y, u, v, a;
1837
1838     for(;;) {
1839         while (is->paused && !is->subtitleq.abort_request) {
1840             SDL_Delay(10);
1841         }
1842         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1843             break;
1844
1845         if(pkt->data == flush_pkt.data){
1846             avcodec_flush_buffers(is->subtitle_st->codec);
1847             continue;
1848         }
1849         SDL_LockMutex(is->subpq_mutex);
1850         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1851                !is->subtitleq.abort_request) {
1852             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1853         }
1854         SDL_UnlockMutex(is->subpq_mutex);
1855
1856         if (is->subtitleq.abort_request)
1857             goto the_end;
1858
1859         sp = &is->subpq[is->subpq_windex];
1860
1861        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1862            this packet, if any */
1863         pts = 0;
1864         if (pkt->pts != AV_NOPTS_VALUE)
1865             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1866
1867         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1868                                     &sp->sub, &got_subtitle,
1869                                     pkt);
1870         if (got_subtitle && sp->sub.format == 0) {
1871             sp->pts = pts;
1872
1873             for (i = 0; i < sp->sub.num_rects; i++)
1874             {
1875                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1876                 {
1877                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1878                     y = RGB_TO_Y_CCIR(r, g, b);
1879                     u = RGB_TO_U_CCIR(r, g, b, 0);
1880                     v = RGB_TO_V_CCIR(r, g, b, 0);
1881                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1882                 }
1883             }
1884
1885             /* now we can update the picture count */
1886             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1887                 is->subpq_windex = 0;
1888             SDL_LockMutex(is->subpq_mutex);
1889             is->subpq_size++;
1890             SDL_UnlockMutex(is->subpq_mutex);
1891         }
1892         av_free_packet(pkt);
1893     }
1894  the_end:
1895     return 0;
1896 }
1897
1898 /* copy samples for viewing in editor window */
1899 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1900 {
1901     int size, len, channels;
1902
1903     channels = is->audio_st->codec->channels;
1904
1905     size = samples_size / sizeof(short);
1906     while (size > 0) {
1907         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1908         if (len > size)
1909             len = size;
1910         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1911         samples += len;
1912         is->sample_array_index += len;
1913         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1914             is->sample_array_index = 0;
1915         size -= len;
1916     }
1917 }
1918
1919 /* return the new audio buffer size (samples can be added or deleted
1920    to get better sync if video or external master clock) */
1921 static int synchronize_audio(VideoState *is, short *samples,
1922                              int samples_size1, double pts)
1923 {
1924     int n, samples_size;
1925     double ref_clock;
1926
1927     n = 2 * is->audio_st->codec->channels;
1928     samples_size = samples_size1;
1929
1930     /* if not master, then we try to remove or add samples to correct the clock */
1931     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1932          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1933         double diff, avg_diff;
1934         int wanted_size, min_size, max_size, nb_samples;
1935
1936         ref_clock = get_master_clock(is);
1937         diff = get_audio_clock(is) - ref_clock;
1938
1939         if (diff < AV_NOSYNC_THRESHOLD) {
1940             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1941             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1942                 /* not enough measures to have a correct estimate */
1943                 is->audio_diff_avg_count++;
1944             } else {
1945                 /* estimate the A-V difference */
1946                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1947
1948                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1949                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1950                     nb_samples = samples_size / n;
1951
1952                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1953                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1954                     if (wanted_size < min_size)
1955                         wanted_size = min_size;
1956                     else if (wanted_size > max_size)
1957                         wanted_size = max_size;
1958
1959                     /* add or remove samples to correction the synchro */
1960                     if (wanted_size < samples_size) {
1961                         /* remove samples */
1962                         samples_size = wanted_size;
1963                     } else if (wanted_size > samples_size) {
1964                         uint8_t *samples_end, *q;
1965                         int nb;
1966
1967                         /* add samples */
1968                         nb = (samples_size - wanted_size);
1969                         samples_end = (uint8_t *)samples + samples_size - n;
1970                         q = samples_end + n;
1971                         while (nb > 0) {
1972                             memcpy(q, samples_end, n);
1973                             q += n;
1974                             nb -= n;
1975                         }
1976                         samples_size = wanted_size;
1977                     }
1978                 }
1979                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1980                         diff, avg_diff, samples_size - samples_size1,
1981                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1982             }
1983         } else {
1984             /* too big difference : may be initial PTS errors, so
1985                reset A-V filter */
1986             is->audio_diff_avg_count = 0;
1987             is->audio_diff_cum = 0;
1988         }
1989     }
1990
1991     return samples_size;
1992 }
1993
1994 /* decode one audio frame and returns its uncompressed size */
1995 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1996 {
1997     AVPacket *pkt_temp = &is->audio_pkt_temp;
1998     AVPacket *pkt = &is->audio_pkt;
1999     AVCodecContext *dec= is->audio_st->codec;
2000     int n, len1, data_size;
2001     double pts;
2002
2003     for(;;) {
2004         /* NOTE: the audio packet can contain several frames */
2005         while (pkt_temp->size > 0) {
2006             data_size = sizeof(is->audio_buf1);
2007             len1 = avcodec_decode_audio3(dec,
2008                                         (int16_t *)is->audio_buf1, &data_size,
2009                                         pkt_temp);
2010             if (len1 < 0) {
2011                 /* if error, we skip the frame */
2012                 pkt_temp->size = 0;
2013                 break;
2014             }
2015
2016             pkt_temp->data += len1;
2017             pkt_temp->size -= len1;
2018             if (data_size <= 0)
2019                 continue;
2020
2021             if (dec->sample_fmt != is->audio_src_fmt) {
2022                 if (is->reformat_ctx)
2023                     av_audio_convert_free(is->reformat_ctx);
2024                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2025                                                          dec->sample_fmt, 1, NULL, 0);
2026                 if (!is->reformat_ctx) {
2027                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2028                         av_get_sample_fmt_name(dec->sample_fmt),
2029                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2030                         break;
2031                 }
2032                 is->audio_src_fmt= dec->sample_fmt;
2033             }
2034
2035             if (is->reformat_ctx) {
2036                 const void *ibuf[6]= {is->audio_buf1};
2037                 void *obuf[6]= {is->audio_buf2};
2038                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2039                 int ostride[6]= {2};
2040                 int len= data_size/istride[0];
2041                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2042                     printf("av_audio_convert() failed\n");
2043                     break;
2044                 }
2045                 is->audio_buf= is->audio_buf2;
2046                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2047                           remove this legacy cruft */
2048                 data_size= len*2;
2049             }else{
2050                 is->audio_buf= is->audio_buf1;
2051             }
2052
2053             /* if no pts, then compute it */
2054             pts = is->audio_clock;
2055             *pts_ptr = pts;
2056             n = 2 * dec->channels;
2057             is->audio_clock += (double)data_size /
2058                 (double)(n * dec->sample_rate);
2059 #if defined(DEBUG_SYNC)
2060             {
2061                 static double last_clock;
2062                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2063                        is->audio_clock - last_clock,
2064                        is->audio_clock, pts);
2065                 last_clock = is->audio_clock;
2066             }
2067 #endif
2068             return data_size;
2069         }
2070
2071         /* free the current packet */
2072         if (pkt->data)
2073             av_free_packet(pkt);
2074
2075         if (is->paused || is->audioq.abort_request) {
2076             return -1;
2077         }
2078
2079         /* read next packet */
2080         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2081             return -1;
2082         if(pkt->data == flush_pkt.data){
2083             avcodec_flush_buffers(dec);
2084             continue;
2085         }
2086
2087         pkt_temp->data = pkt->data;
2088         pkt_temp->size = pkt->size;
2089
2090         /* if update the audio clock with the pts */
2091         if (pkt->pts != AV_NOPTS_VALUE) {
2092             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2093         }
2094     }
2095 }
2096
2097 /* prepare a new audio buffer */
2098 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2099 {
2100     VideoState *is = opaque;
2101     int audio_size, len1;
2102     double pts;
2103
2104     audio_callback_time = av_gettime();
2105
2106     while (len > 0) {
2107         if (is->audio_buf_index >= is->audio_buf_size) {
2108            audio_size = audio_decode_frame(is, &pts);
2109            if (audio_size < 0) {
2110                 /* if error, just output silence */
2111                is->audio_buf = is->audio_buf1;
2112                is->audio_buf_size = 1024;
2113                memset(is->audio_buf, 0, is->audio_buf_size);
2114            } else {
2115                if (is->show_audio)
2116                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2117                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2118                                               pts);
2119                is->audio_buf_size = audio_size;
2120            }
2121            is->audio_buf_index = 0;
2122         }
2123         len1 = is->audio_buf_size - is->audio_buf_index;
2124         if (len1 > len)
2125             len1 = len;
2126         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2127         len -= len1;
2128         stream += len1;
2129         is->audio_buf_index += len1;
2130     }
2131 }
2132
2133 /* open a given stream. Return 0 if OK */
2134 static int stream_component_open(VideoState *is, int stream_index)
2135 {
2136     AVFormatContext *ic = is->ic;
2137     AVCodecContext *avctx;
2138     AVCodec *codec;
2139     SDL_AudioSpec wanted_spec, spec;
2140
2141     if (stream_index < 0 || stream_index >= ic->nb_streams)
2142         return -1;
2143     avctx = ic->streams[stream_index]->codec;
2144
2145     /* prepare audio output */
2146     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2147         if (avctx->channels > 0) {
2148             avctx->request_channels = FFMIN(2, avctx->channels);
2149         } else {
2150             avctx->request_channels = 2;
2151         }
2152     }
2153
2154     codec = avcodec_find_decoder(avctx->codec_id);
2155     avctx->debug_mv = debug_mv;
2156     avctx->debug = debug;
2157     avctx->workaround_bugs = workaround_bugs;
2158     avctx->lowres = lowres;
2159     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2160     avctx->idct_algo= idct;
2161     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2162     avctx->skip_frame= skip_frame;
2163     avctx->skip_idct= skip_idct;
2164     avctx->skip_loop_filter= skip_loop_filter;
2165     avctx->error_recognition= error_recognition;
2166     avctx->error_concealment= error_concealment;
2167     avctx->thread_count= thread_count;
2168
2169     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2170
2171     if (!codec ||
2172         avcodec_open(avctx, codec) < 0)
2173         return -1;
2174
2175     /* prepare audio output */
2176     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2177         wanted_spec.freq = avctx->sample_rate;
2178         wanted_spec.format = AUDIO_S16SYS;
2179         wanted_spec.channels = avctx->channels;
2180         wanted_spec.silence = 0;
2181         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2182         wanted_spec.callback = sdl_audio_callback;
2183         wanted_spec.userdata = is;
2184         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2185             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2186             return -1;
2187         }
2188         is->audio_hw_buf_size = spec.size;
2189         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2190     }
2191
2192     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2193     switch(avctx->codec_type) {
2194     case AVMEDIA_TYPE_AUDIO:
2195         is->audio_stream = stream_index;
2196         is->audio_st = ic->streams[stream_index];
2197         is->audio_buf_size = 0;
2198         is->audio_buf_index = 0;
2199
2200         /* init averaging filter */
2201         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2202         is->audio_diff_avg_count = 0;
2203         /* since we do not have a precise anough audio fifo fullness,
2204            we correct audio sync only if larger than this threshold */
2205         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2206
2207         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2208         packet_queue_init(&is->audioq);
2209         SDL_PauseAudio(0);
2210         break;
2211     case AVMEDIA_TYPE_VIDEO:
2212         is->video_stream = stream_index;
2213         is->video_st = ic->streams[stream_index];
2214
2215         packet_queue_init(&is->videoq);
2216         is->video_tid = SDL_CreateThread(video_thread, is);
2217         break;
2218     case AVMEDIA_TYPE_SUBTITLE:
2219         is->subtitle_stream = stream_index;
2220         is->subtitle_st = ic->streams[stream_index];
2221         packet_queue_init(&is->subtitleq);
2222
2223         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2224         break;
2225     default:
2226         break;
2227     }
2228     return 0;
2229 }
2230
2231 static void stream_component_close(VideoState *is, int stream_index)
2232 {
2233     AVFormatContext *ic = is->ic;
2234     AVCodecContext *avctx;
2235
2236     if (stream_index < 0 || stream_index >= ic->nb_streams)
2237         return;
2238     avctx = ic->streams[stream_index]->codec;
2239
2240     switch(avctx->codec_type) {
2241     case AVMEDIA_TYPE_AUDIO:
2242         packet_queue_abort(&is->audioq);
2243
2244         SDL_CloseAudio();
2245
2246         packet_queue_end(&is->audioq);
2247         if (is->reformat_ctx)
2248             av_audio_convert_free(is->reformat_ctx);
2249         is->reformat_ctx = NULL;
2250         break;
2251     case AVMEDIA_TYPE_VIDEO:
2252         packet_queue_abort(&is->videoq);
2253
2254         /* note: we also signal this mutex to make sure we deblock the
2255            video thread in all cases */
2256         SDL_LockMutex(is->pictq_mutex);
2257         SDL_CondSignal(is->pictq_cond);
2258         SDL_UnlockMutex(is->pictq_mutex);
2259
2260         SDL_WaitThread(is->video_tid, NULL);
2261
2262         packet_queue_end(&is->videoq);
2263         break;
2264     case AVMEDIA_TYPE_SUBTITLE:
2265         packet_queue_abort(&is->subtitleq);
2266
2267         /* note: we also signal this mutex to make sure we deblock the
2268            video thread in all cases */
2269         SDL_LockMutex(is->subpq_mutex);
2270         is->subtitle_stream_changed = 1;
2271
2272         SDL_CondSignal(is->subpq_cond);
2273         SDL_UnlockMutex(is->subpq_mutex);
2274
2275         SDL_WaitThread(is->subtitle_tid, NULL);
2276
2277         packet_queue_end(&is->subtitleq);
2278         break;
2279     default:
2280         break;
2281     }
2282
2283     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2284     avcodec_close(avctx);
2285     switch(avctx->codec_type) {
2286     case AVMEDIA_TYPE_AUDIO:
2287         is->audio_st = NULL;
2288         is->audio_stream = -1;
2289         break;
2290     case AVMEDIA_TYPE_VIDEO:
2291         is->video_st = NULL;
2292         is->video_stream = -1;
2293         break;
2294     case AVMEDIA_TYPE_SUBTITLE:
2295         is->subtitle_st = NULL;
2296         is->subtitle_stream = -1;
2297         break;
2298     default:
2299         break;
2300     }
2301 }
2302
2303 /* since we have only one decoding thread, we can use a global
2304    variable instead of a thread local variable */
2305 static VideoState *global_video_state;
2306
2307 static int decode_interrupt_cb(void)
2308 {
2309     return (global_video_state && global_video_state->abort_request);
2310 }
2311
2312 /* this thread gets the stream from the disk or the network */
2313 static int decode_thread(void *arg)
2314 {
2315     VideoState *is = arg;
2316     AVFormatContext *ic;
2317     int err, i, ret;
2318     int st_index[AVMEDIA_TYPE_NB];
2319     AVPacket pkt1, *pkt = &pkt1;
2320     AVFormatParameters params, *ap = &params;
2321     int eof=0;
2322     int pkt_in_play_range = 0;
2323
2324     ic = avformat_alloc_context();
2325
2326     memset(st_index, -1, sizeof(st_index));
2327     is->video_stream = -1;
2328     is->audio_stream = -1;
2329     is->subtitle_stream = -1;
2330
2331     global_video_state = is;
2332     avio_set_interrupt_cb(decode_interrupt_cb);
2333
2334     memset(ap, 0, sizeof(*ap));
2335
2336     ap->prealloced_context = 1;
2337     ap->width = frame_width;
2338     ap->height= frame_height;
2339     ap->time_base= (AVRational){1, 25};
2340     ap->pix_fmt = frame_pix_fmt;
2341
2342     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2343
2344     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2345     if (err < 0) {
2346         print_error(is->filename, err);
2347         ret = -1;
2348         goto fail;
2349     }
2350     is->ic = ic;
2351
2352     if(genpts)
2353         ic->flags |= AVFMT_FLAG_GENPTS;
2354
2355     /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2356     for (i = 0; i < ic->nb_streams; i++) {
2357         AVCodecContext *dec = ic->streams[i]->codec;
2358         switch (dec->codec_type) {
2359         case AVMEDIA_TYPE_AUDIO:
2360             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2361                              AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2362                              NULL);
2363             break;
2364         case AVMEDIA_TYPE_VIDEO:
2365             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2366                              AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2367                              NULL);
2368             break;
2369         }
2370     }
2371
2372     err = av_find_stream_info(ic);
2373     if (err < 0) {
2374         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2375         ret = -1;
2376         goto fail;
2377     }
2378     if(ic->pb)
2379         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2380
2381     if(seek_by_bytes<0)
2382         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2383
2384     /* if seeking requested, we execute it */
2385     if (start_time != AV_NOPTS_VALUE) {
2386         int64_t timestamp;
2387
2388         timestamp = start_time;
2389         /* add the stream start time */
2390         if (ic->start_time != AV_NOPTS_VALUE)
2391             timestamp += ic->start_time;
2392         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2393         if (ret < 0) {
2394             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2395                     is->filename, (double)timestamp / AV_TIME_BASE);
2396         }
2397     }
2398
2399     for (i = 0; i < ic->nb_streams; i++)
2400         ic->streams[i]->discard = AVDISCARD_ALL;
2401     if (!video_disable)
2402         st_index[AVMEDIA_TYPE_VIDEO] =
2403             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2404                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2405     if (!audio_disable)
2406         st_index[AVMEDIA_TYPE_AUDIO] =
2407             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2408                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2409                                 st_index[AVMEDIA_TYPE_VIDEO],
2410                                 NULL, 0);
2411     if (!video_disable)
2412         st_index[AVMEDIA_TYPE_SUBTITLE] =
2413             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2414                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2415                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2416                                  st_index[AVMEDIA_TYPE_AUDIO] :
2417                                  st_index[AVMEDIA_TYPE_VIDEO]),
2418                                 NULL, 0);
2419     if (show_status) {
2420         av_dump_format(ic, 0, is->filename, 0);
2421     }
2422
2423     /* open the streams */
2424     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2425         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2426     }
2427
2428     ret=-1;
2429     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2430         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2431     }
2432     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2433     if(ret<0) {
2434         if (!display_disable)
2435             is->show_audio = 2;
2436     }
2437
2438     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2439         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2440     }
2441
2442     if (is->video_stream < 0 && is->audio_stream < 0) {
2443         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2444         ret = -1;
2445         goto fail;
2446     }
2447
2448     for(;;) {
2449         if (is->abort_request)
2450             break;
2451         if (is->paused != is->last_paused) {
2452             is->last_paused = is->paused;
2453             if (is->paused)
2454                 is->read_pause_return= av_read_pause(ic);
2455             else
2456                 av_read_play(ic);
2457         }
2458 #if CONFIG_RTSP_DEMUXER
2459         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2460             /* wait 10 ms to avoid trying to get another packet */
2461             /* XXX: horrible */
2462             SDL_Delay(10);
2463             continue;
2464         }
2465 #endif
2466         if (is->seek_req) {
2467             int64_t seek_target= is->seek_pos;
2468             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2469             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2470 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2471 //      of the seek_pos/seek_rel variables
2472
2473             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2474             if (ret < 0) {
2475                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2476             }else{
2477                 if (is->audio_stream >= 0) {
2478                     packet_queue_flush(&is->audioq);
2479                     packet_queue_put(&is->audioq, &flush_pkt);
2480                 }
2481                 if (is->subtitle_stream >= 0) {
2482                     packet_queue_flush(&is->subtitleq);
2483                     packet_queue_put(&is->subtitleq, &flush_pkt);
2484                 }
2485                 if (is->video_stream >= 0) {
2486                     packet_queue_flush(&is->videoq);
2487                     packet_queue_put(&is->videoq, &flush_pkt);
2488                 }
2489             }
2490             is->seek_req = 0;
2491             eof= 0;
2492         }
2493
2494         /* if the queue are full, no need to read more */
2495         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2496             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2497                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2498                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2499             /* wait 10 ms */
2500             SDL_Delay(10);
2501             continue;
2502         }
2503         if(eof) {
2504             if(is->video_stream >= 0){
2505                 av_init_packet(pkt);
2506                 pkt->data=NULL;
2507                 pkt->size=0;
2508                 pkt->stream_index= is->video_stream;
2509                 packet_queue_put(&is->videoq, pkt);
2510             }
2511             SDL_Delay(10);
2512             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2513                 if(loop!=1 && (!loop || --loop)){
2514                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2515                 }else if(autoexit){
2516                     ret=AVERROR_EOF;
2517                     goto fail;
2518                 }
2519             }
2520             continue;
2521         }
2522         ret = av_read_frame(ic, pkt);
2523         if (ret < 0) {
2524             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2525                 eof=1;
2526             if (ic->pb && ic->pb->error)
2527                 break;
2528             SDL_Delay(100); /* wait for user event */
2529             continue;
2530         }
2531         /* check if packet is in play range specified by user, then queue, otherwise discard */
2532         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2533                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2534                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2535                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2536                 <= ((double)duration/1000000);
2537         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2538             packet_queue_put(&is->audioq, pkt);
2539         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2540             packet_queue_put(&is->videoq, pkt);
2541         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2542             packet_queue_put(&is->subtitleq, pkt);
2543         } else {
2544             av_free_packet(pkt);
2545         }
2546     }
2547     /* wait until the end */
2548     while (!is->abort_request) {
2549         SDL_Delay(100);
2550     }
2551
2552     ret = 0;
2553  fail:
2554     /* disable interrupting */
2555     global_video_state = NULL;
2556
2557     /* close each stream */
2558     if (is->audio_stream >= 0)
2559         stream_component_close(is, is->audio_stream);
2560     if (is->video_stream >= 0)
2561         stream_component_close(is, is->video_stream);
2562     if (is->subtitle_stream >= 0)
2563         stream_component_close(is, is->subtitle_stream);
2564     if (is->ic) {
2565         av_close_input_file(is->ic);
2566         is->ic = NULL; /* safety */
2567     }
2568     avio_set_interrupt_cb(NULL);
2569
2570     if (ret != 0) {
2571         SDL_Event event;
2572
2573         event.type = FF_QUIT_EVENT;
2574         event.user.data1 = is;
2575         SDL_PushEvent(&event);
2576     }
2577     return 0;
2578 }
2579
2580 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2581 {
2582     VideoState *is;
2583
2584     is = av_mallocz(sizeof(VideoState));
2585     if (!is)
2586         return NULL;
2587     av_strlcpy(is->filename, filename, sizeof(is->filename));
2588     is->iformat = iformat;
2589     is->ytop = 0;
2590     is->xleft = 0;
2591
2592     /* start video display */
2593     is->pictq_mutex = SDL_CreateMutex();
2594     is->pictq_cond = SDL_CreateCond();
2595
2596     is->subpq_mutex = SDL_CreateMutex();
2597     is->subpq_cond = SDL_CreateCond();
2598
2599     is->av_sync_type = av_sync_type;
2600     is->parse_tid = SDL_CreateThread(decode_thread, is);
2601     if (!is->parse_tid) {
2602         av_free(is);
2603         return NULL;
2604     }
2605     return is;
2606 }
2607
2608 static void stream_cycle_channel(VideoState *is, int codec_type)
2609 {
2610     AVFormatContext *ic = is->ic;
2611     int start_index, stream_index;
2612     AVStream *st;
2613
2614     if (codec_type == AVMEDIA_TYPE_VIDEO)
2615         start_index = is->video_stream;
2616     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2617         start_index = is->audio_stream;
2618     else
2619         start_index = is->subtitle_stream;
2620     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2621         return;
2622     stream_index = start_index;
2623     for(;;) {
2624         if (++stream_index >= is->ic->nb_streams)
2625         {
2626             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2627             {
2628                 stream_index = -1;
2629                 goto the_end;
2630             } else
2631                 stream_index = 0;
2632         }
2633         if (stream_index == start_index)
2634             return;
2635         st = ic->streams[stream_index];
2636         if (st->codec->codec_type == codec_type) {
2637             /* check that parameters are OK */
2638             switch(codec_type) {
2639             case AVMEDIA_TYPE_AUDIO:
2640                 if (st->codec->sample_rate != 0 &&
2641                     st->codec->channels != 0)
2642                     goto the_end;
2643                 break;
2644             case AVMEDIA_TYPE_VIDEO:
2645             case AVMEDIA_TYPE_SUBTITLE:
2646                 goto the_end;
2647             default:
2648                 break;
2649             }
2650         }
2651     }
2652  the_end:
2653     stream_component_close(is, start_index);
2654     stream_component_open(is, stream_index);
2655 }
2656
2657
2658 static void toggle_full_screen(void)
2659 {
2660     is_full_screen = !is_full_screen;
2661     video_open(cur_stream);
2662 }
2663
2664 static void toggle_pause(void)
2665 {
2666     if (cur_stream)
2667         stream_pause(cur_stream);
2668     step = 0;
2669 }
2670
2671 static void step_to_next_frame(void)
2672 {
2673     if (cur_stream) {
2674         /* if the stream is paused unpause it, then step */
2675         if (cur_stream->paused)
2676             stream_pause(cur_stream);
2677     }
2678     step = 1;
2679 }
2680
2681 static void toggle_audio_display(void)
2682 {
2683     if (cur_stream) {
2684         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2685         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2686         fill_rectangle(screen,
2687                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2688                     bgcolor);
2689         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2690     }
2691 }
2692
2693 /* handle an event sent by the GUI */
2694 static void event_loop(void)
2695 {
2696     SDL_Event event;
2697     double incr, pos, frac;
2698
2699     for(;;) {
2700         double x;
2701         SDL_WaitEvent(&event);
2702         switch(event.type) {
2703         case SDL_KEYDOWN:
2704             if (exit_on_keydown) {
2705                 do_exit();
2706                 break;
2707             }
2708             switch(event.key.keysym.sym) {
2709             case SDLK_ESCAPE:
2710             case SDLK_q:
2711                 do_exit();
2712                 break;
2713             case SDLK_f:
2714                 toggle_full_screen();
2715                 break;
2716             case SDLK_p:
2717             case SDLK_SPACE:
2718                 toggle_pause();
2719                 break;
2720             case SDLK_s: //S: Step to next frame
2721                 step_to_next_frame();
2722                 break;
2723             case SDLK_a:
2724                 if (cur_stream)
2725                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2726                 break;
2727             case SDLK_v:
2728                 if (cur_stream)
2729                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2730                 break;
2731             case SDLK_t:
2732                 if (cur_stream)
2733                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2734                 break;
2735             case SDLK_w:
2736                 toggle_audio_display();
2737                 break;
2738             case SDLK_LEFT:
2739                 incr = -10.0;
2740                 goto do_seek;
2741             case SDLK_RIGHT:
2742                 incr = 10.0;
2743                 goto do_seek;
2744             case SDLK_UP:
2745                 incr = 60.0;
2746                 goto do_seek;
2747             case SDLK_DOWN:
2748                 incr = -60.0;
2749             do_seek:
2750                 if (cur_stream) {
2751                     if (seek_by_bytes) {
2752                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2753                             pos= cur_stream->video_current_pos;
2754                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2755                             pos= cur_stream->audio_pkt.pos;
2756                         }else
2757                             pos = avio_tell(cur_stream->ic->pb);
2758                         if (cur_stream->ic->bit_rate)
2759                             incr *= cur_stream->ic->bit_rate / 8.0;
2760                         else
2761                             incr *= 180000.0;
2762                         pos += incr;
2763                         stream_seek(cur_stream, pos, incr, 1);
2764                     } else {
2765                         pos = get_master_clock(cur_stream);
2766                         pos += incr;
2767                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2768                     }
2769                 }
2770                 break;
2771             default:
2772                 break;
2773             }
2774             break;
2775         case SDL_MOUSEBUTTONDOWN:
2776             if (exit_on_mousedown) {
2777                 do_exit();
2778                 break;
2779             }
2780         case SDL_MOUSEMOTION:
2781             if(event.type ==SDL_MOUSEBUTTONDOWN){
2782                 x= event.button.x;
2783             }else{
2784                 if(event.motion.state != SDL_PRESSED)
2785                     break;
2786                 x= event.motion.x;
2787             }
2788             if (cur_stream) {
2789                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2790                     uint64_t size=  avio_size(cur_stream->ic->pb);
2791                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2792                 }else{
2793                     int64_t ts;
2794                     int ns, hh, mm, ss;
2795                     int tns, thh, tmm, tss;
2796                     tns = cur_stream->ic->duration/1000000LL;
2797                     thh = tns/3600;
2798                     tmm = (tns%3600)/60;
2799                     tss = (tns%60);
2800                     frac = x/cur_stream->width;
2801                     ns = frac*tns;
2802                     hh = ns/3600;
2803                     mm = (ns%3600)/60;
2804                     ss = (ns%60);
2805                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2806                             hh, mm, ss, thh, tmm, tss);
2807                     ts = frac*cur_stream->ic->duration;
2808                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2809                         ts += cur_stream->ic->start_time;
2810                     stream_seek(cur_stream, ts, 0, 0);
2811                 }
2812             }
2813             break;
2814         case SDL_VIDEORESIZE:
2815             if (cur_stream) {
2816                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2817                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2818                 screen_width = cur_stream->width = event.resize.w;
2819                 screen_height= cur_stream->height= event.resize.h;
2820             }
2821             break;
2822         case SDL_QUIT:
2823         case FF_QUIT_EVENT:
2824             do_exit();
2825             break;
2826         case FF_ALLOC_EVENT:
2827             video_open(event.user.data1);
2828             alloc_picture(event.user.data1);
2829             break;
2830         case FF_REFRESH_EVENT:
2831             video_refresh_timer(event.user.data1);
2832             cur_stream->refresh=0;
2833             break;
2834         default:
2835             break;
2836         }
2837     }
2838 }
2839
2840 static void opt_frame_size(const char *arg)
2841 {
2842     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2843         fprintf(stderr, "Incorrect frame size\n");
2844         exit(1);
2845     }
2846     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2847         fprintf(stderr, "Frame size must be a multiple of 2\n");
2848         exit(1);
2849     }
2850 }
2851
2852 static int opt_width(const char *opt, const char *arg)
2853 {
2854     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2855     return 0;
2856 }
2857
2858 static int opt_height(const char *opt, const char *arg)
2859 {
2860     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2861     return 0;
2862 }
2863
2864 static void opt_format(const char *arg)
2865 {
2866     file_iformat = av_find_input_format(arg);
2867     if (!file_iformat) {
2868         fprintf(stderr, "Unknown input format: %s\n", arg);
2869         exit(1);
2870     }
2871 }
2872
2873 static void opt_frame_pix_fmt(const char *arg)
2874 {
2875     frame_pix_fmt = av_get_pix_fmt(arg);
2876 }
2877
2878 static int opt_sync(const char *opt, const char *arg)
2879 {
2880     if (!strcmp(arg, "audio"))
2881         av_sync_type = AV_SYNC_AUDIO_MASTER;
2882     else if (!strcmp(arg, "video"))
2883         av_sync_type = AV_SYNC_VIDEO_MASTER;
2884     else if (!strcmp(arg, "ext"))
2885         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2886     else {
2887         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2888         exit(1);
2889     }
2890     return 0;
2891 }
2892
2893 static int opt_seek(const char *opt, const char *arg)
2894 {
2895     start_time = parse_time_or_die(opt, arg, 1);
2896     return 0;
2897 }
2898
2899 static int opt_duration(const char *opt, const char *arg)
2900 {
2901     duration = parse_time_or_die(opt, arg, 1);
2902     return 0;
2903 }
2904
2905 static int opt_debug(const char *opt, const char *arg)
2906 {
2907     av_log_set_level(99);
2908     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2909     return 0;
2910 }
2911
2912 static int opt_vismv(const char *opt, const char *arg)
2913 {
2914     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2915     return 0;
2916 }
2917
2918 static int opt_thread_count(const char *opt, const char *arg)
2919 {
2920     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2921 #if !HAVE_THREADS
2922     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2923 #endif
2924     return 0;
2925 }
2926
2927 static const OptionDef options[] = {
2928 #include "cmdutils_common_opts.h"
2929     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2930     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2931     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2932     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2933     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2934     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2935     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2936     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2937     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2938     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2939     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2940     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2941     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2942     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2943     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2944     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2945     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2946     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2947     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2948     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2949     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2950     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2951     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2952     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2953     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2954     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2955     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2956     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2957     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2958     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2959     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2960     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2961     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2962     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2963     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2964     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2965     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2966 #if CONFIG_AVFILTER
2967     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2968 #endif
2969     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2970     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2971     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2972     { NULL, },
2973 };
2974
2975 static void show_usage(void)
2976 {
2977     printf("Simple media player\n");
2978     printf("usage: ffplay [options] input_file\n");
2979     printf("\n");
2980 }
2981
2982 static void show_help(void)
2983 {
2984     av_log_set_callback(log_callback_help);
2985     show_usage();
2986     show_help_options(options, "Main options:\n",
2987                       OPT_EXPERT, 0);
2988     show_help_options(options, "\nAdvanced options:\n",
2989                       OPT_EXPERT, OPT_EXPERT);
2990     printf("\n");
2991     av_opt_show2(avcodec_opts[0], NULL,
2992                  AV_OPT_FLAG_DECODING_PARAM, 0);
2993     printf("\n");
2994     av_opt_show2(avformat_opts, NULL,
2995                  AV_OPT_FLAG_DECODING_PARAM, 0);
2996 #if !CONFIG_AVFILTER
2997     printf("\n");
2998     av_opt_show2(sws_opts, NULL,
2999                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3000 #endif
3001     printf("\nWhile playing:\n"
3002            "q, ESC              quit\n"
3003            "f                   toggle full screen\n"
3004            "p, SPC              pause\n"
3005            "a                   cycle audio channel\n"
3006            "v                   cycle video channel\n"
3007            "t                   cycle subtitle channel\n"
3008            "w                   show audio waves\n"
3009            "s                   activate frame-step mode\n"
3010            "left/right          seek backward/forward 10 seconds\n"
3011            "down/up             seek backward/forward 1 minute\n"
3012            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3013            );
3014 }
3015
3016 static void opt_input_file(const char *filename)
3017 {
3018     if (input_filename) {
3019         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3020                 filename, input_filename);
3021         exit(1);
3022     }
3023     if (!strcmp(filename, "-"))
3024         filename = "pipe:";
3025     input_filename = filename;
3026 }
3027
3028 /* Called from the main */
3029 int main(int argc, char **argv)
3030 {
3031     int flags;
3032
3033     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3034
3035     /* register all codecs, demux and protocols */
3036     avcodec_register_all();
3037 #if CONFIG_AVDEVICE
3038     avdevice_register_all();
3039 #endif
3040 #if CONFIG_AVFILTER
3041     avfilter_register_all();
3042 #endif
3043     av_register_all();
3044
3045     init_opts();
3046
3047     show_banner();
3048
3049     parse_options(argc, argv, options, opt_input_file);
3050
3051     if (!input_filename) {
3052         show_usage();
3053         fprintf(stderr, "An input file must be specified\n");
3054         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3055         exit(1);
3056     }
3057
3058     if (display_disable) {
3059         video_disable = 1;
3060     }
3061     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3062 #if !defined(__MINGW32__) && !defined(__APPLE__)
3063     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3064 #endif
3065     if (SDL_Init (flags)) {
3066         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3067         exit(1);
3068     }
3069
3070     if (!display_disable) {
3071 #if HAVE_SDL_VIDEO_SIZE
3072         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3073         fs_screen_width = vi->current_w;
3074         fs_screen_height = vi->current_h;
3075 #endif
3076     }
3077
3078     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3079     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3080     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3081
3082     av_init_packet(&flush_pkt);
3083     flush_pkt.data= "FLUSH";
3084
3085     cur_stream = stream_open(input_filename, file_iformat);
3086
3087     event_loop();
3088
3089     /* never returns */
3090
3091     return 0;
3092 }