]> git.sesse.net Git - ffmpeg/blob - ffplay.c
cmdutils: move "#undef main" from ffplay.c to cmdutils.h
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #include "cmdutils.h"
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "ffplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG
58 //#define DEBUG_SYNC
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static void show_help(void);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int frame_width = 0;
227 static int frame_height = 0;
228 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int debug = 0;
243 static int debug_mv = 0;
244 static int step = 0;
245 static int thread_count = 1;
246 static int workaround_bugs = 1;
247 static int fast = 0;
248 static int genpts = 0;
249 static int lowres = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
254 static int error_recognition = FF_ER_CAREFUL;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts= -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop=1;
261 static int framedrop=1;
262 static enum ShowMode show_mode = SHOW_MODE_NONE;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
283 {
284     AVPacketList *pkt1;
285
286     /* duplicate the packet */
287     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
288         return -1;
289
290     pkt1 = av_malloc(sizeof(AVPacketList));
291     if (!pkt1)
292         return -1;
293     pkt1->pkt = *pkt;
294     pkt1->next = NULL;
295
296
297     SDL_LockMutex(q->mutex);
298
299     if (!q->last_pkt)
300
301         q->first_pkt = pkt1;
302     else
303         q->last_pkt->next = pkt1;
304     q->last_pkt = pkt1;
305     q->nb_packets++;
306     q->size += pkt1->pkt.size + sizeof(*pkt1);
307     /* XXX: should duplicate packet data in DV case */
308     SDL_CondSignal(q->cond);
309
310     SDL_UnlockMutex(q->mutex);
311     return 0;
312 }
313
314 /* packet queue handling */
315 static void packet_queue_init(PacketQueue *q)
316 {
317     memset(q, 0, sizeof(PacketQueue));
318     q->mutex = SDL_CreateMutex();
319     q->cond = SDL_CreateCond();
320     packet_queue_put(q, &flush_pkt);
321 }
322
323 static void packet_queue_flush(PacketQueue *q)
324 {
325     AVPacketList *pkt, *pkt1;
326
327     SDL_LockMutex(q->mutex);
328     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
329         pkt1 = pkt->next;
330         av_free_packet(&pkt->pkt);
331         av_freep(&pkt);
332     }
333     q->last_pkt = NULL;
334     q->first_pkt = NULL;
335     q->nb_packets = 0;
336     q->size = 0;
337     SDL_UnlockMutex(q->mutex);
338 }
339
340 static void packet_queue_end(PacketQueue *q)
341 {
342     packet_queue_flush(q);
343     SDL_DestroyMutex(q->mutex);
344     SDL_DestroyCond(q->cond);
345 }
346
347 static void packet_queue_abort(PacketQueue *q)
348 {
349     SDL_LockMutex(q->mutex);
350
351     q->abort_request = 1;
352
353     SDL_CondSignal(q->cond);
354
355     SDL_UnlockMutex(q->mutex);
356 }
357
358 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
359 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
360 {
361     AVPacketList *pkt1;
362     int ret;
363
364     SDL_LockMutex(q->mutex);
365
366     for(;;) {
367         if (q->abort_request) {
368             ret = -1;
369             break;
370         }
371
372         pkt1 = q->first_pkt;
373         if (pkt1) {
374             q->first_pkt = pkt1->next;
375             if (!q->first_pkt)
376                 q->last_pkt = NULL;
377             q->nb_packets--;
378             q->size -= pkt1->pkt.size + sizeof(*pkt1);
379             *pkt = pkt1->pkt;
380             av_free(pkt1);
381             ret = 1;
382             break;
383         } else if (!block) {
384             ret = 0;
385             break;
386         } else {
387             SDL_CondWait(q->cond, q->mutex);
388         }
389     }
390     SDL_UnlockMutex(q->mutex);
391     return ret;
392 }
393
394 static inline void fill_rectangle(SDL_Surface *screen,
395                                   int x, int y, int w, int h, int color)
396 {
397     SDL_Rect rect;
398     rect.x = x;
399     rect.y = y;
400     rect.w = w;
401     rect.h = h;
402     SDL_FillRect(screen, &rect, color);
403 }
404
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
407
408 #define RGBA_IN(r, g, b, a, s)\
409 {\
410     unsigned int v = ((const uint32_t *)(s))[0];\
411     a = (v >> 24) & 0xff;\
412     r = (v >> 16) & 0xff;\
413     g = (v >> 8) & 0xff;\
414     b = v & 0xff;\
415 }
416
417 #define YUVA_IN(y, u, v, a, s, pal)\
418 {\
419     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420     a = (val >> 24) & 0xff;\
421     y = (val >> 16) & 0xff;\
422     u = (val >> 8) & 0xff;\
423     v = val & 0xff;\
424 }
425
426 #define YUVA_OUT(d, y, u, v, a)\
427 {\
428     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
429 }
430
431
432 #define BPP 1
433
434 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
435 {
436     int wrap, wrap3, width2, skip2;
437     int y, u, v, a, u1, v1, a1, w, h;
438     uint8_t *lum, *cb, *cr;
439     const uint8_t *p;
440     const uint32_t *pal;
441     int dstx, dsty, dstw, dsth;
442
443     dstw = av_clip(rect->w, 0, imgw);
444     dsth = av_clip(rect->h, 0, imgh);
445     dstx = av_clip(rect->x, 0, imgw - dstw);
446     dsty = av_clip(rect->y, 0, imgh - dsth);
447     lum = dst->data[0] + dsty * dst->linesize[0];
448     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
450
451     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
452     skip2 = dstx >> 1;
453     wrap = dst->linesize[0];
454     wrap3 = rect->pict.linesize[0];
455     p = rect->pict.data[0];
456     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
457
458     if (dsty & 1) {
459         lum += dstx;
460         cb += skip2;
461         cr += skip2;
462
463         if (dstx & 1) {
464             YUVA_IN(y, u, v, a, p, pal);
465             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468             cb++;
469             cr++;
470             lum++;
471             p += BPP;
472         }
473         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474             YUVA_IN(y, u, v, a, p, pal);
475             u1 = u;
476             v1 = v;
477             a1 = a;
478             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
479
480             YUVA_IN(y, u, v, a, p + BPP, pal);
481             u1 += u;
482             v1 += v;
483             a1 += a;
484             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487             cb++;
488             cr++;
489             p += 2 * BPP;
490             lum += 2;
491         }
492         if (w) {
493             YUVA_IN(y, u, v, a, p, pal);
494             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
497             p++;
498             lum++;
499         }
500         p += wrap3 - dstw * BPP;
501         lum += wrap - dstw - dstx;
502         cb += dst->linesize[1] - width2 - skip2;
503         cr += dst->linesize[2] - width2 - skip2;
504     }
505     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
506         lum += dstx;
507         cb += skip2;
508         cr += skip2;
509
510         if (dstx & 1) {
511             YUVA_IN(y, u, v, a, p, pal);
512             u1 = u;
513             v1 = v;
514             a1 = a;
515             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
516             p += wrap3;
517             lum += wrap;
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 += u;
520             v1 += v;
521             a1 += a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
524             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
525             cb++;
526             cr++;
527             p += -wrap3 + BPP;
528             lum += -wrap + 1;
529         }
530         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
531             YUVA_IN(y, u, v, a, p, pal);
532             u1 = u;
533             v1 = v;
534             a1 = a;
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
537             YUVA_IN(y, u, v, a, p + BPP, pal);
538             u1 += u;
539             v1 += v;
540             a1 += a;
541             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542             p += wrap3;
543             lum += wrap;
544
545             YUVA_IN(y, u, v, a, p, pal);
546             u1 += u;
547             v1 += v;
548             a1 += a;
549             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
550
551             YUVA_IN(y, u, v, a, p + BPP, pal);
552             u1 += u;
553             v1 += v;
554             a1 += a;
555             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
556
557             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
558             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
559
560             cb++;
561             cr++;
562             p += -wrap3 + 2 * BPP;
563             lum += -wrap + 2;
564         }
565         if (w) {
566             YUVA_IN(y, u, v, a, p, pal);
567             u1 = u;
568             v1 = v;
569             a1 = a;
570             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571             p += wrap3;
572             lum += wrap;
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 += u;
575             v1 += v;
576             a1 += a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
579             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
580             cb++;
581             cr++;
582             p += -wrap3 + BPP;
583             lum += -wrap + 1;
584         }
585         p += wrap3 + (wrap3 - dstw * BPP);
586         lum += wrap + (wrap - dstw - dstx);
587         cb += dst->linesize[1] - width2 - skip2;
588         cr += dst->linesize[2] - width2 - skip2;
589     }
590     /* handle odd height */
591     if (h) {
592         lum += dstx;
593         cb += skip2;
594         cr += skip2;
595
596         if (dstx & 1) {
597             YUVA_IN(y, u, v, a, p, pal);
598             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601             cb++;
602             cr++;
603             lum++;
604             p += BPP;
605         }
606         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612
613             YUVA_IN(y, u, v, a, p + BPP, pal);
614             u1 += u;
615             v1 += v;
616             a1 += a;
617             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
619             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
620             cb++;
621             cr++;
622             p += 2 * BPP;
623             lum += 2;
624         }
625         if (w) {
626             YUVA_IN(y, u, v, a, p, pal);
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
630         }
631     }
632 }
633
634 static void free_subpicture(SubPicture *sp)
635 {
636     avsubtitle_free(&sp->sub);
637 }
638
639 static void video_image_display(VideoState *is)
640 {
641     VideoPicture *vp;
642     SubPicture *sp;
643     AVPicture pict;
644     float aspect_ratio;
645     int width, height, x, y;
646     SDL_Rect rect;
647     int i;
648
649     vp = &is->pictq[is->pictq_rindex];
650     if (vp->bmp) {
651 #if CONFIG_AVFILTER
652          if (vp->picref->video->sample_aspect_ratio.num == 0)
653              aspect_ratio = 0;
654          else
655              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
656 #else
657
658         /* XXX: use variable in the frame */
659         if (is->video_st->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
661         else if (is->video_st->codec->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
663         else
664             aspect_ratio = 0;
665 #endif
666         if (aspect_ratio <= 0.0)
667             aspect_ratio = 1.0;
668         aspect_ratio *= (float)vp->width / (float)vp->height;
669
670         if (is->subtitle_st) {
671             if (is->subpq_size > 0) {
672                 sp = &is->subpq[is->subpq_rindex];
673
674                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
675                     SDL_LockYUVOverlay (vp->bmp);
676
677                     pict.data[0] = vp->bmp->pixels[0];
678                     pict.data[1] = vp->bmp->pixels[2];
679                     pict.data[2] = vp->bmp->pixels[1];
680
681                     pict.linesize[0] = vp->bmp->pitches[0];
682                     pict.linesize[1] = vp->bmp->pitches[2];
683                     pict.linesize[2] = vp->bmp->pitches[1];
684
685                     for (i = 0; i < sp->sub.num_rects; i++)
686                         blend_subrect(&pict, sp->sub.rects[i],
687                                       vp->bmp->w, vp->bmp->h);
688
689                     SDL_UnlockYUVOverlay (vp->bmp);
690                 }
691             }
692         }
693
694
695         /* XXX: we suppose the screen has a 1.0 pixel ratio */
696         height = is->height;
697         width = ((int)rint(height * aspect_ratio)) & ~1;
698         if (width > is->width) {
699             width = is->width;
700             height = ((int)rint(width / aspect_ratio)) & ~1;
701         }
702         x = (is->width - width) / 2;
703         y = (is->height - height) / 2;
704         is->no_background = 0;
705         rect.x = is->xleft + x;
706         rect.y = is->ytop  + y;
707         rect.w = FFMAX(width,  1);
708         rect.h = FFMAX(height, 1);
709         SDL_DisplayYUVOverlay(vp->bmp, &rect);
710     }
711 }
712
713 /* get the current audio output buffer size, in samples. With SDL, we
714    cannot have a precise information */
715 static int audio_write_get_buf_size(VideoState *is)
716 {
717     return is->audio_buf_size - is->audio_buf_index;
718 }
719
720 static inline int compute_mod(int a, int b)
721 {
722     return a < 0 ? a%b + b : a%b;
723 }
724
725 static void video_audio_display(VideoState *s)
726 {
727     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
728     int ch, channels, h, h2, bgcolor, fgcolor;
729     int16_t time_diff;
730     int rdft_bits, nb_freq;
731
732     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
733         ;
734     nb_freq= 1<<(rdft_bits-1);
735
736     /* compute display index : center on currently output samples */
737     channels = s->audio_st->codec->channels;
738     nb_display_channels = channels;
739     if (!s->paused) {
740         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
741         n = 2 * channels;
742         delay = audio_write_get_buf_size(s);
743         delay /= n;
744
745         /* to be more precise, we take into account the time spent since
746            the last buffer computation */
747         if (audio_callback_time) {
748             time_diff = av_gettime() - audio_callback_time;
749             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
750         }
751
752         delay += 2*data_used;
753         if (delay < data_used)
754             delay = data_used;
755
756         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
757         if (s->show_mode == SHOW_MODE_WAVES) {
758             h= INT_MIN;
759             for(i=0; i<1000; i+=channels){
760                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
761                 int a= s->sample_array[idx];
762                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
763                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
764                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
765                 int score= a-d;
766                 if(h<score && (b^c)<0){
767                     h= score;
768                     i_start= idx;
769                 }
770             }
771         }
772
773         s->last_i_start = i_start;
774     } else {
775         i_start = s->last_i_start;
776     }
777
778     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
779     if (s->show_mode == SHOW_MODE_WAVES) {
780         fill_rectangle(screen,
781                        s->xleft, s->ytop, s->width, s->height,
782                        bgcolor);
783
784         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
785
786         /* total height for one channel */
787         h = s->height / nb_display_channels;
788         /* graph height / 2 */
789         h2 = (h * 9) / 20;
790         for(ch = 0;ch < nb_display_channels; ch++) {
791             i = i_start + ch;
792             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
793             for(x = 0; x < s->width; x++) {
794                 y = (s->sample_array[i] * h2) >> 15;
795                 if (y < 0) {
796                     y = -y;
797                     ys = y1 - y;
798                 } else {
799                     ys = y1;
800                 }
801                 fill_rectangle(screen,
802                                s->xleft + x, ys, 1, y,
803                                fgcolor);
804                 i += channels;
805                 if (i >= SAMPLE_ARRAY_SIZE)
806                     i -= SAMPLE_ARRAY_SIZE;
807             }
808         }
809
810         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
811
812         for(ch = 1;ch < nb_display_channels; ch++) {
813             y = s->ytop + ch * h;
814             fill_rectangle(screen,
815                            s->xleft, y, s->width, 1,
816                            fgcolor);
817         }
818         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
819     }else{
820         nb_display_channels= FFMIN(nb_display_channels, 2);
821         if(rdft_bits != s->rdft_bits){
822             av_rdft_end(s->rdft);
823             av_free(s->rdft_data);
824             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
825             s->rdft_bits= rdft_bits;
826             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
827         }
828         {
829             FFTSample *data[2];
830             for(ch = 0;ch < nb_display_channels; ch++) {
831                 data[ch] = s->rdft_data + 2*nb_freq*ch;
832                 i = i_start + ch;
833                 for(x = 0; x < 2*nb_freq; x++) {
834                     double w= (x-nb_freq)*(1.0/nb_freq);
835                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
836                     i += channels;
837                     if (i >= SAMPLE_ARRAY_SIZE)
838                         i -= SAMPLE_ARRAY_SIZE;
839                 }
840                 av_rdft_calc(s->rdft, data[ch]);
841             }
842             //least efficient way to do this, we should of course directly access it but its more than fast enough
843             for(y=0; y<s->height; y++){
844                 double w= 1/sqrt(nb_freq);
845                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
846                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
847                        + data[1][2*y+1]*data[1][2*y+1])) : a;
848                 a= FFMIN(a,255);
849                 b= FFMIN(b,255);
850                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
851
852                 fill_rectangle(screen,
853                             s->xpos, s->height-y, 1, 1,
854                             fgcolor);
855             }
856         }
857         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
858         s->xpos++;
859         if(s->xpos >= s->width)
860             s->xpos= s->xleft;
861     }
862 }
863
864 static int video_open(VideoState *is){
865     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
866     int w,h;
867
868     if(is_full_screen) flags |= SDL_FULLSCREEN;
869     else               flags |= SDL_RESIZABLE;
870
871     if (is_full_screen && fs_screen_width) {
872         w = fs_screen_width;
873         h = fs_screen_height;
874     } else if(!is_full_screen && screen_width){
875         w = screen_width;
876         h = screen_height;
877 #if CONFIG_AVFILTER
878     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
879         w = is->out_video_filter->inputs[0]->w;
880         h = is->out_video_filter->inputs[0]->h;
881 #else
882     }else if (is->video_st && is->video_st->codec->width){
883         w = is->video_st->codec->width;
884         h = is->video_st->codec->height;
885 #endif
886     } else {
887         w = 640;
888         h = 480;
889     }
890     if(screen && is->width == screen->w && screen->w == w
891        && is->height== screen->h && screen->h == h)
892         return 0;
893
894 #ifndef __APPLE__
895     screen = SDL_SetVideoMode(w, h, 0, flags);
896 #else
897     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
898     screen = SDL_SetVideoMode(w, h, 24, flags);
899 #endif
900     if (!screen) {
901         fprintf(stderr, "SDL: could not set video mode - exiting\n");
902         return -1;
903     }
904     if (!window_title)
905         window_title = input_filename;
906     SDL_WM_SetCaption(window_title, window_title);
907
908     is->width = screen->w;
909     is->height = screen->h;
910
911     return 0;
912 }
913
914 /* display the current picture, if any */
915 static void video_display(VideoState *is)
916 {
917     if(!screen)
918         video_open(cur_stream);
919     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
920         video_audio_display(is);
921     else if (is->video_st)
922         video_image_display(is);
923 }
924
925 static int refresh_thread(void *opaque)
926 {
927     VideoState *is= opaque;
928     while(!is->abort_request){
929         SDL_Event event;
930         event.type = FF_REFRESH_EVENT;
931         event.user.data1 = opaque;
932         if(!is->refresh){
933             is->refresh=1;
934             SDL_PushEvent(&event);
935         }
936         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
937         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
938     }
939     return 0;
940 }
941
942 /* get the current audio clock value */
943 static double get_audio_clock(VideoState *is)
944 {
945     double pts;
946     int hw_buf_size, bytes_per_sec;
947     pts = is->audio_clock;
948     hw_buf_size = audio_write_get_buf_size(is);
949     bytes_per_sec = 0;
950     if (is->audio_st) {
951         bytes_per_sec = is->audio_st->codec->sample_rate *
952             2 * is->audio_st->codec->channels;
953     }
954     if (bytes_per_sec)
955         pts -= (double)hw_buf_size / bytes_per_sec;
956     return pts;
957 }
958
959 /* get the current video clock value */
960 static double get_video_clock(VideoState *is)
961 {
962     if (is->paused) {
963         return is->video_current_pts;
964     } else {
965         return is->video_current_pts_drift + av_gettime() / 1000000.0;
966     }
967 }
968
969 /* get the current external clock value */
970 static double get_external_clock(VideoState *is)
971 {
972     int64_t ti;
973     ti = av_gettime();
974     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
975 }
976
977 /* get the current master clock value */
978 static double get_master_clock(VideoState *is)
979 {
980     double val;
981
982     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
983         if (is->video_st)
984             val = get_video_clock(is);
985         else
986             val = get_audio_clock(is);
987     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
988         if (is->audio_st)
989             val = get_audio_clock(is);
990         else
991             val = get_video_clock(is);
992     } else {
993         val = get_external_clock(is);
994     }
995     return val;
996 }
997
998 /* seek in the stream */
999 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1000 {
1001     if (!is->seek_req) {
1002         is->seek_pos = pos;
1003         is->seek_rel = rel;
1004         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1005         if (seek_by_bytes)
1006             is->seek_flags |= AVSEEK_FLAG_BYTE;
1007         is->seek_req = 1;
1008     }
1009 }
1010
1011 /* pause or resume the video */
1012 static void stream_toggle_pause(VideoState *is)
1013 {
1014     if (is->paused) {
1015         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1016         if(is->read_pause_return != AVERROR(ENOSYS)){
1017             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1018         }
1019         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1020     }
1021     is->paused = !is->paused;
1022 }
1023
1024 static double compute_target_time(double frame_current_pts, VideoState *is)
1025 {
1026     double delay, sync_threshold, diff;
1027
1028     /* compute nominal delay */
1029     delay = frame_current_pts - is->frame_last_pts;
1030     if (delay <= 0 || delay >= 10.0) {
1031         /* if incorrect delay, use previous one */
1032         delay = is->frame_last_delay;
1033     } else {
1034         is->frame_last_delay = delay;
1035     }
1036     is->frame_last_pts = frame_current_pts;
1037
1038     /* update delay to follow master synchronisation source */
1039     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1040          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1041         /* if video is slave, we try to correct big delays by
1042            duplicating or deleting a frame */
1043         diff = get_video_clock(is) - get_master_clock(is);
1044
1045         /* skip or repeat frame. We take into account the
1046            delay to compute the threshold. I still don't know
1047            if it is the best guess */
1048         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1049         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1050             if (diff <= -sync_threshold)
1051                 delay = 0;
1052             else if (diff >= sync_threshold)
1053                 delay = 2 * delay;
1054         }
1055     }
1056     is->frame_timer += delay;
1057 #if defined(DEBUG_SYNC)
1058     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1059             delay, actual_delay, frame_current_pts, -diff);
1060 #endif
1061
1062     return is->frame_timer;
1063 }
1064
1065 /* called to display each frame */
1066 static void video_refresh(void *opaque)
1067 {
1068     VideoState *is = opaque;
1069     VideoPicture *vp;
1070
1071     SubPicture *sp, *sp2;
1072
1073     if (is->video_st) {
1074 retry:
1075         if (is->pictq_size == 0) {
1076             //nothing to do, no picture to display in the que
1077         } else {
1078             double time= av_gettime()/1000000.0;
1079             double next_target;
1080             /* dequeue the picture */
1081             vp = &is->pictq[is->pictq_rindex];
1082
1083             if(time < vp->target_clock)
1084                 return;
1085             /* update current video pts */
1086             is->video_current_pts = vp->pts;
1087             is->video_current_pts_drift = is->video_current_pts - time;
1088             is->video_current_pos = vp->pos;
1089             if(is->pictq_size > 1){
1090                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1091                 assert(nextvp->target_clock >= vp->target_clock);
1092                 next_target= nextvp->target_clock;
1093             }else{
1094                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1095             }
1096             if(framedrop && time > next_target){
1097                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1098                 if(is->pictq_size > 1 || time > next_target + 0.5){
1099                     /* update queue size and signal for next picture */
1100                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1101                         is->pictq_rindex = 0;
1102
1103                     SDL_LockMutex(is->pictq_mutex);
1104                     is->pictq_size--;
1105                     SDL_CondSignal(is->pictq_cond);
1106                     SDL_UnlockMutex(is->pictq_mutex);
1107                     goto retry;
1108                 }
1109             }
1110
1111             if(is->subtitle_st) {
1112                 if (is->subtitle_stream_changed) {
1113                     SDL_LockMutex(is->subpq_mutex);
1114
1115                     while (is->subpq_size) {
1116                         free_subpicture(&is->subpq[is->subpq_rindex]);
1117
1118                         /* update queue size and signal for next picture */
1119                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1120                             is->subpq_rindex = 0;
1121
1122                         is->subpq_size--;
1123                     }
1124                     is->subtitle_stream_changed = 0;
1125
1126                     SDL_CondSignal(is->subpq_cond);
1127                     SDL_UnlockMutex(is->subpq_mutex);
1128                 } else {
1129                     if (is->subpq_size > 0) {
1130                         sp = &is->subpq[is->subpq_rindex];
1131
1132                         if (is->subpq_size > 1)
1133                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1134                         else
1135                             sp2 = NULL;
1136
1137                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1138                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1139                         {
1140                             free_subpicture(sp);
1141
1142                             /* update queue size and signal for next picture */
1143                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1144                                 is->subpq_rindex = 0;
1145
1146                             SDL_LockMutex(is->subpq_mutex);
1147                             is->subpq_size--;
1148                             SDL_CondSignal(is->subpq_cond);
1149                             SDL_UnlockMutex(is->subpq_mutex);
1150                         }
1151                     }
1152                 }
1153             }
1154
1155             /* display picture */
1156             if (!display_disable)
1157                 video_display(is);
1158
1159             /* update queue size and signal for next picture */
1160             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1161                 is->pictq_rindex = 0;
1162
1163             SDL_LockMutex(is->pictq_mutex);
1164             is->pictq_size--;
1165             SDL_CondSignal(is->pictq_cond);
1166             SDL_UnlockMutex(is->pictq_mutex);
1167         }
1168     } else if (is->audio_st) {
1169         /* draw the next audio frame */
1170
1171         /* if only audio stream, then display the audio bars (better
1172            than nothing, just to test the implementation */
1173
1174         /* display picture */
1175         if (!display_disable)
1176             video_display(is);
1177     }
1178     if (show_status) {
1179         static int64_t last_time;
1180         int64_t cur_time;
1181         int aqsize, vqsize, sqsize;
1182         double av_diff;
1183
1184         cur_time = av_gettime();
1185         if (!last_time || (cur_time - last_time) >= 30000) {
1186             aqsize = 0;
1187             vqsize = 0;
1188             sqsize = 0;
1189             if (is->audio_st)
1190                 aqsize = is->audioq.size;
1191             if (is->video_st)
1192                 vqsize = is->videoq.size;
1193             if (is->subtitle_st)
1194                 sqsize = is->subtitleq.size;
1195             av_diff = 0;
1196             if (is->audio_st && is->video_st)
1197                 av_diff = get_audio_clock(is) - get_video_clock(is);
1198             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1199                    get_master_clock(is),
1200                    av_diff,
1201                    FFMAX(is->skip_frames-1, 0),
1202                    aqsize / 1024,
1203                    vqsize / 1024,
1204                    sqsize,
1205                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1206                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1207             fflush(stdout);
1208             last_time = cur_time;
1209         }
1210     }
1211 }
1212
1213 static void stream_close(VideoState *is)
1214 {
1215     VideoPicture *vp;
1216     int i;
1217     /* XXX: use a special url_shutdown call to abort parse cleanly */
1218     is->abort_request = 1;
1219     SDL_WaitThread(is->read_tid, NULL);
1220     SDL_WaitThread(is->refresh_tid, NULL);
1221
1222     /* free all pictures */
1223     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1224         vp = &is->pictq[i];
1225 #if CONFIG_AVFILTER
1226         if (vp->picref) {
1227             avfilter_unref_buffer(vp->picref);
1228             vp->picref = NULL;
1229         }
1230 #endif
1231         if (vp->bmp) {
1232             SDL_FreeYUVOverlay(vp->bmp);
1233             vp->bmp = NULL;
1234         }
1235     }
1236     SDL_DestroyMutex(is->pictq_mutex);
1237     SDL_DestroyCond(is->pictq_cond);
1238     SDL_DestroyMutex(is->subpq_mutex);
1239     SDL_DestroyCond(is->subpq_cond);
1240 #if !CONFIG_AVFILTER
1241     if (is->img_convert_ctx)
1242         sws_freeContext(is->img_convert_ctx);
1243 #endif
1244     av_free(is);
1245 }
1246
1247 static void do_exit(void)
1248 {
1249     if (cur_stream) {
1250         stream_close(cur_stream);
1251         cur_stream = NULL;
1252     }
1253     uninit_opts();
1254 #if CONFIG_AVFILTER
1255     avfilter_uninit();
1256 #endif
1257     if (show_status)
1258         printf("\n");
1259     SDL_Quit();
1260     av_log(NULL, AV_LOG_QUIET, "");
1261     exit(0);
1262 }
1263
1264 /* allocate a picture (needs to do that in main thread to avoid
1265    potential locking problems */
1266 static void alloc_picture(void *opaque)
1267 {
1268     VideoState *is = opaque;
1269     VideoPicture *vp;
1270
1271     vp = &is->pictq[is->pictq_windex];
1272
1273     if (vp->bmp)
1274         SDL_FreeYUVOverlay(vp->bmp);
1275
1276 #if CONFIG_AVFILTER
1277     if (vp->picref)
1278         avfilter_unref_buffer(vp->picref);
1279     vp->picref = NULL;
1280
1281     vp->width   = is->out_video_filter->inputs[0]->w;
1282     vp->height  = is->out_video_filter->inputs[0]->h;
1283     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1284 #else
1285     vp->width   = is->video_st->codec->width;
1286     vp->height  = is->video_st->codec->height;
1287     vp->pix_fmt = is->video_st->codec->pix_fmt;
1288 #endif
1289
1290     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1291                                    SDL_YV12_OVERLAY,
1292                                    screen);
1293     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1294         /* SDL allocates a buffer smaller than requested if the video
1295          * overlay hardware is unable to support the requested size. */
1296         fprintf(stderr, "Error: the video system does not support an image\n"
1297                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1298                         "to reduce the image size.\n", vp->width, vp->height );
1299         do_exit();
1300     }
1301
1302     SDL_LockMutex(is->pictq_mutex);
1303     vp->allocated = 1;
1304     SDL_CondSignal(is->pictq_cond);
1305     SDL_UnlockMutex(is->pictq_mutex);
1306 }
1307
1308 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1309 {
1310     VideoPicture *vp;
1311     double frame_delay, pts = pts1;
1312
1313     /* compute the exact PTS for the picture if it is omitted in the stream
1314      * pts1 is the dts of the pkt / pts of the frame */
1315     if (pts != 0) {
1316         /* update video clock with pts, if present */
1317         is->video_clock = pts;
1318     } else {
1319         pts = is->video_clock;
1320     }
1321     /* update video clock for next frame */
1322     frame_delay = av_q2d(is->video_st->codec->time_base);
1323     /* for MPEG2, the frame can be repeated, so we update the
1324        clock accordingly */
1325     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1326     is->video_clock += frame_delay;
1327
1328 #if defined(DEBUG_SYNC) && 0
1329     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1330            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1331 #endif
1332
1333     /* wait until we have space to put a new picture */
1334     SDL_LockMutex(is->pictq_mutex);
1335
1336     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1337         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1338
1339     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1340            !is->videoq.abort_request) {
1341         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1342     }
1343     SDL_UnlockMutex(is->pictq_mutex);
1344
1345     if (is->videoq.abort_request)
1346         return -1;
1347
1348     vp = &is->pictq[is->pictq_windex];
1349
1350     /* alloc or resize hardware picture buffer */
1351     if (!vp->bmp ||
1352 #if CONFIG_AVFILTER
1353         vp->width  != is->out_video_filter->inputs[0]->w ||
1354         vp->height != is->out_video_filter->inputs[0]->h) {
1355 #else
1356         vp->width != is->video_st->codec->width ||
1357         vp->height != is->video_st->codec->height) {
1358 #endif
1359         SDL_Event event;
1360
1361         vp->allocated = 0;
1362
1363         /* the allocation must be done in the main thread to avoid
1364            locking problems */
1365         event.type = FF_ALLOC_EVENT;
1366         event.user.data1 = is;
1367         SDL_PushEvent(&event);
1368
1369         /* wait until the picture is allocated */
1370         SDL_LockMutex(is->pictq_mutex);
1371         while (!vp->allocated && !is->videoq.abort_request) {
1372             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1373         }
1374         SDL_UnlockMutex(is->pictq_mutex);
1375
1376         if (is->videoq.abort_request)
1377             return -1;
1378     }
1379
1380     /* if the frame is not skipped, then display it */
1381     if (vp->bmp) {
1382         AVPicture pict;
1383 #if CONFIG_AVFILTER
1384         if(vp->picref)
1385             avfilter_unref_buffer(vp->picref);
1386         vp->picref = src_frame->opaque;
1387 #endif
1388
1389         /* get a pointer on the bitmap */
1390         SDL_LockYUVOverlay (vp->bmp);
1391
1392         memset(&pict,0,sizeof(AVPicture));
1393         pict.data[0] = vp->bmp->pixels[0];
1394         pict.data[1] = vp->bmp->pixels[2];
1395         pict.data[2] = vp->bmp->pixels[1];
1396
1397         pict.linesize[0] = vp->bmp->pitches[0];
1398         pict.linesize[1] = vp->bmp->pitches[2];
1399         pict.linesize[2] = vp->bmp->pitches[1];
1400
1401 #if CONFIG_AVFILTER
1402         //FIXME use direct rendering
1403         av_picture_copy(&pict, (AVPicture *)src_frame,
1404                         vp->pix_fmt, vp->width, vp->height);
1405 #else
1406         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1407         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1408             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1409             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1410         if (is->img_convert_ctx == NULL) {
1411             fprintf(stderr, "Cannot initialize the conversion context\n");
1412             exit(1);
1413         }
1414         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1415                   0, vp->height, pict.data, pict.linesize);
1416 #endif
1417         /* update the bitmap content */
1418         SDL_UnlockYUVOverlay(vp->bmp);
1419
1420         vp->pts = pts;
1421         vp->pos = pos;
1422
1423         /* now we can update the picture count */
1424         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1425             is->pictq_windex = 0;
1426         SDL_LockMutex(is->pictq_mutex);
1427         vp->target_clock= compute_target_time(vp->pts, is);
1428
1429         is->pictq_size++;
1430         SDL_UnlockMutex(is->pictq_mutex);
1431     }
1432     return 0;
1433 }
1434
1435 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1436 {
1437     int len1 av_unused, got_picture, i;
1438
1439     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1440         return -1;
1441
1442     if (pkt->data == flush_pkt.data) {
1443         avcodec_flush_buffers(is->video_st->codec);
1444
1445         SDL_LockMutex(is->pictq_mutex);
1446         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1447         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1448             is->pictq[i].target_clock= 0;
1449         }
1450         while (is->pictq_size && !is->videoq.abort_request) {
1451             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1452         }
1453         is->video_current_pos = -1;
1454         SDL_UnlockMutex(is->pictq_mutex);
1455
1456         is->frame_last_pts = AV_NOPTS_VALUE;
1457         is->frame_last_delay = 0;
1458         is->frame_timer = (double)av_gettime() / 1000000.0;
1459         is->skip_frames = 1;
1460         is->skip_frames_index = 0;
1461         return 0;
1462     }
1463
1464     len1 = avcodec_decode_video2(is->video_st->codec,
1465                                  frame, &got_picture,
1466                                  pkt);
1467
1468     if (got_picture) {
1469         if (decoder_reorder_pts == -1) {
1470             *pts = frame->best_effort_timestamp;
1471         } else if (decoder_reorder_pts) {
1472             *pts = frame->pkt_pts;
1473         } else {
1474             *pts = frame->pkt_dts;
1475         }
1476
1477         if (*pts == AV_NOPTS_VALUE) {
1478             *pts = 0;
1479         }
1480
1481         is->skip_frames_index += 1;
1482         if(is->skip_frames_index >= is->skip_frames){
1483             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1484             return 1;
1485         }
1486
1487     }
1488     return 0;
1489 }
1490
1491 #if CONFIG_AVFILTER
1492 typedef struct {
1493     VideoState *is;
1494     AVFrame *frame;
1495     int use_dr1;
1496 } FilterPriv;
1497
1498 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1499 {
1500     AVFilterContext *ctx = codec->opaque;
1501     AVFilterBufferRef  *ref;
1502     int perms = AV_PERM_WRITE;
1503     int i, w, h, stride[4];
1504     unsigned edge;
1505     int pixel_size;
1506
1507     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1508
1509     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1510         perms |= AV_PERM_NEG_LINESIZES;
1511
1512     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1513         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1514         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1515         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1516     }
1517     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1518
1519     w = codec->width;
1520     h = codec->height;
1521
1522     if(av_image_check_size(w, h, 0, codec))
1523         return -1;
1524
1525     avcodec_align_dimensions2(codec, &w, &h, stride);
1526     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1527     w += edge << 1;
1528     h += edge << 1;
1529
1530     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1531         return -1;
1532
1533     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1534     ref->video->w = codec->width;
1535     ref->video->h = codec->height;
1536     for(i = 0; i < 4; i ++) {
1537         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1538         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1539
1540         if (ref->data[i]) {
1541             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1542         }
1543         pic->data[i]     = ref->data[i];
1544         pic->linesize[i] = ref->linesize[i];
1545     }
1546     pic->opaque = ref;
1547     pic->age    = INT_MAX;
1548     pic->type   = FF_BUFFER_TYPE_USER;
1549     pic->reordered_opaque = codec->reordered_opaque;
1550     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1551     else           pic->pkt_pts = AV_NOPTS_VALUE;
1552     return 0;
1553 }
1554
1555 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1556 {
1557     memset(pic->data, 0, sizeof(pic->data));
1558     avfilter_unref_buffer(pic->opaque);
1559 }
1560
1561 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1562 {
1563     AVFilterBufferRef *ref = pic->opaque;
1564
1565     if (pic->data[0] == NULL) {
1566         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1567         return codec->get_buffer(codec, pic);
1568     }
1569
1570     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1571         (codec->pix_fmt != ref->format)) {
1572         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1573         return -1;
1574     }
1575
1576     pic->reordered_opaque = codec->reordered_opaque;
1577     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1578     else           pic->pkt_pts = AV_NOPTS_VALUE;
1579     return 0;
1580 }
1581
1582 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1583 {
1584     FilterPriv *priv = ctx->priv;
1585     AVCodecContext *codec;
1586     if(!opaque) return -1;
1587
1588     priv->is = opaque;
1589     codec    = priv->is->video_st->codec;
1590     codec->opaque = ctx;
1591     if((codec->codec->capabilities & CODEC_CAP_DR1)
1592     ) {
1593         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1594         priv->use_dr1 = 1;
1595         codec->get_buffer     = input_get_buffer;
1596         codec->release_buffer = input_release_buffer;
1597         codec->reget_buffer   = input_reget_buffer;
1598         codec->thread_safe_callbacks = 1;
1599     }
1600
1601     priv->frame = avcodec_alloc_frame();
1602
1603     return 0;
1604 }
1605
1606 static void input_uninit(AVFilterContext *ctx)
1607 {
1608     FilterPriv *priv = ctx->priv;
1609     av_free(priv->frame);
1610 }
1611
1612 static int input_request_frame(AVFilterLink *link)
1613 {
1614     FilterPriv *priv = link->src->priv;
1615     AVFilterBufferRef *picref;
1616     int64_t pts = 0;
1617     AVPacket pkt;
1618     int ret;
1619
1620     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1621         av_free_packet(&pkt);
1622     if (ret < 0)
1623         return -1;
1624
1625     if(priv->use_dr1) {
1626         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1627     } else {
1628         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1629         av_image_copy(picref->data, picref->linesize,
1630                       priv->frame->data, priv->frame->linesize,
1631                       picref->format, link->w, link->h);
1632     }
1633     av_free_packet(&pkt);
1634
1635     avfilter_copy_frame_props(picref, priv->frame);
1636     picref->pts = pts;
1637
1638     avfilter_start_frame(link, picref);
1639     avfilter_draw_slice(link, 0, link->h, 1);
1640     avfilter_end_frame(link);
1641
1642     return 0;
1643 }
1644
1645 static int input_query_formats(AVFilterContext *ctx)
1646 {
1647     FilterPriv *priv = ctx->priv;
1648     enum PixelFormat pix_fmts[] = {
1649         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1650     };
1651
1652     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1653     return 0;
1654 }
1655
1656 static int input_config_props(AVFilterLink *link)
1657 {
1658     FilterPriv *priv  = link->src->priv;
1659     AVCodecContext *c = priv->is->video_st->codec;
1660
1661     link->w = c->width;
1662     link->h = c->height;
1663     link->time_base = priv->is->video_st->time_base;
1664
1665     return 0;
1666 }
1667
1668 static AVFilter input_filter =
1669 {
1670     .name      = "ffplay_input",
1671
1672     .priv_size = sizeof(FilterPriv),
1673
1674     .init      = input_init,
1675     .uninit    = input_uninit,
1676
1677     .query_formats = input_query_formats,
1678
1679     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1680     .outputs   = (AVFilterPad[]) {{ .name = "default",
1681                                     .type = AVMEDIA_TYPE_VIDEO,
1682                                     .request_frame = input_request_frame,
1683                                     .config_props  = input_config_props, },
1684                                   { .name = NULL }},
1685 };
1686
1687 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1688 {
1689     char sws_flags_str[128];
1690     int ret;
1691     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1692     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1693     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1694     graph->scale_sws_opts = av_strdup(sws_flags_str);
1695
1696     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1697                                             NULL, is, graph)) < 0)
1698         goto the_end;
1699     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1700                                             NULL, &ffsink_ctx, graph)) < 0)
1701         goto the_end;
1702
1703     if(vfilters) {
1704         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1705         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1706
1707         outputs->name    = av_strdup("in");
1708         outputs->filter_ctx = filt_src;
1709         outputs->pad_idx = 0;
1710         outputs->next    = NULL;
1711
1712         inputs->name    = av_strdup("out");
1713         inputs->filter_ctx = filt_out;
1714         inputs->pad_idx = 0;
1715         inputs->next    = NULL;
1716
1717         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1718             goto the_end;
1719         av_freep(&vfilters);
1720     } else {
1721         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1722             goto the_end;
1723     }
1724
1725     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1726         goto the_end;
1727
1728     is->out_video_filter = filt_out;
1729 the_end:
1730     return ret;
1731 }
1732
1733 #endif  /* CONFIG_AVFILTER */
1734
1735 static int video_thread(void *arg)
1736 {
1737     VideoState *is = arg;
1738     AVFrame *frame= avcodec_alloc_frame();
1739     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1740     double pts;
1741     int ret;
1742
1743 #if CONFIG_AVFILTER
1744     AVFilterGraph *graph = avfilter_graph_alloc();
1745     AVFilterContext *filt_out = NULL;
1746
1747     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1748         goto the_end;
1749     filt_out = is->out_video_filter;
1750 #endif
1751
1752     for(;;) {
1753 #if !CONFIG_AVFILTER
1754         AVPacket pkt;
1755 #else
1756         AVFilterBufferRef *picref;
1757         AVRational tb;
1758 #endif
1759         while (is->paused && !is->videoq.abort_request)
1760             SDL_Delay(10);
1761 #if CONFIG_AVFILTER
1762         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1763         if (picref) {
1764             pts_int = picref->pts;
1765             pos     = picref->pos;
1766             frame->opaque = picref;
1767         }
1768
1769         if (av_cmp_q(tb, is->video_st->time_base)) {
1770             av_unused int64_t pts1 = pts_int;
1771             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1772             av_dlog(NULL, "video_thread(): "
1773                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1774                     tb.num, tb.den, pts1,
1775                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1776         }
1777 #else
1778         ret = get_video_frame(is, frame, &pts_int, &pkt);
1779         pos = pkt.pos;
1780         av_free_packet(&pkt);
1781 #endif
1782
1783         if (ret < 0) goto the_end;
1784
1785         if (!ret)
1786             continue;
1787
1788         pts = pts_int*av_q2d(is->video_st->time_base);
1789
1790         ret = queue_picture(is, frame, pts, pos);
1791
1792         if (ret < 0)
1793             goto the_end;
1794
1795         if (step)
1796             if (cur_stream)
1797                 stream_toggle_pause(cur_stream);
1798     }
1799  the_end:
1800 #if CONFIG_AVFILTER
1801     avfilter_graph_free(&graph);
1802 #endif
1803     av_free(frame);
1804     return 0;
1805 }
1806
1807 static int subtitle_thread(void *arg)
1808 {
1809     VideoState *is = arg;
1810     SubPicture *sp;
1811     AVPacket pkt1, *pkt = &pkt1;
1812     int len1 av_unused, got_subtitle;
1813     double pts;
1814     int i, j;
1815     int r, g, b, y, u, v, a;
1816
1817     for(;;) {
1818         while (is->paused && !is->subtitleq.abort_request) {
1819             SDL_Delay(10);
1820         }
1821         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1822             break;
1823
1824         if(pkt->data == flush_pkt.data){
1825             avcodec_flush_buffers(is->subtitle_st->codec);
1826             continue;
1827         }
1828         SDL_LockMutex(is->subpq_mutex);
1829         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1830                !is->subtitleq.abort_request) {
1831             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1832         }
1833         SDL_UnlockMutex(is->subpq_mutex);
1834
1835         if (is->subtitleq.abort_request)
1836             goto the_end;
1837
1838         sp = &is->subpq[is->subpq_windex];
1839
1840        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1841            this packet, if any */
1842         pts = 0;
1843         if (pkt->pts != AV_NOPTS_VALUE)
1844             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1845
1846         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1847                                     &sp->sub, &got_subtitle,
1848                                     pkt);
1849         if (got_subtitle && sp->sub.format == 0) {
1850             sp->pts = pts;
1851
1852             for (i = 0; i < sp->sub.num_rects; i++)
1853             {
1854                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1855                 {
1856                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1857                     y = RGB_TO_Y_CCIR(r, g, b);
1858                     u = RGB_TO_U_CCIR(r, g, b, 0);
1859                     v = RGB_TO_V_CCIR(r, g, b, 0);
1860                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1861                 }
1862             }
1863
1864             /* now we can update the picture count */
1865             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1866                 is->subpq_windex = 0;
1867             SDL_LockMutex(is->subpq_mutex);
1868             is->subpq_size++;
1869             SDL_UnlockMutex(is->subpq_mutex);
1870         }
1871         av_free_packet(pkt);
1872     }
1873  the_end:
1874     return 0;
1875 }
1876
1877 /* copy samples for viewing in editor window */
1878 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1879 {
1880     int size, len;
1881
1882     size = samples_size / sizeof(short);
1883     while (size > 0) {
1884         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1885         if (len > size)
1886             len = size;
1887         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1888         samples += len;
1889         is->sample_array_index += len;
1890         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1891             is->sample_array_index = 0;
1892         size -= len;
1893     }
1894 }
1895
1896 /* return the new audio buffer size (samples can be added or deleted
1897    to get better sync if video or external master clock) */
1898 static int synchronize_audio(VideoState *is, short *samples,
1899                              int samples_size1, double pts)
1900 {
1901     int n, samples_size;
1902     double ref_clock;
1903
1904     n = 2 * is->audio_st->codec->channels;
1905     samples_size = samples_size1;
1906
1907     /* if not master, then we try to remove or add samples to correct the clock */
1908     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1909          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1910         double diff, avg_diff;
1911         int wanted_size, min_size, max_size, nb_samples;
1912
1913         ref_clock = get_master_clock(is);
1914         diff = get_audio_clock(is) - ref_clock;
1915
1916         if (diff < AV_NOSYNC_THRESHOLD) {
1917             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1918             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1919                 /* not enough measures to have a correct estimate */
1920                 is->audio_diff_avg_count++;
1921             } else {
1922                 /* estimate the A-V difference */
1923                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1924
1925                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1926                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1927                     nb_samples = samples_size / n;
1928
1929                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1930                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1931                     if (wanted_size < min_size)
1932                         wanted_size = min_size;
1933                     else if (wanted_size > max_size)
1934                         wanted_size = max_size;
1935
1936                     /* add or remove samples to correction the synchro */
1937                     if (wanted_size < samples_size) {
1938                         /* remove samples */
1939                         samples_size = wanted_size;
1940                     } else if (wanted_size > samples_size) {
1941                         uint8_t *samples_end, *q;
1942                         int nb;
1943
1944                         /* add samples */
1945                         nb = (samples_size - wanted_size);
1946                         samples_end = (uint8_t *)samples + samples_size - n;
1947                         q = samples_end + n;
1948                         while (nb > 0) {
1949                             memcpy(q, samples_end, n);
1950                             q += n;
1951                             nb -= n;
1952                         }
1953                         samples_size = wanted_size;
1954                     }
1955                 }
1956 #if 0
1957                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1958                        diff, avg_diff, samples_size - samples_size1,
1959                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1960 #endif
1961             }
1962         } else {
1963             /* too big difference : may be initial PTS errors, so
1964                reset A-V filter */
1965             is->audio_diff_avg_count = 0;
1966             is->audio_diff_cum = 0;
1967         }
1968     }
1969
1970     return samples_size;
1971 }
1972
1973 /* decode one audio frame and returns its uncompressed size */
1974 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1975 {
1976     AVPacket *pkt_temp = &is->audio_pkt_temp;
1977     AVPacket *pkt = &is->audio_pkt;
1978     AVCodecContext *dec= is->audio_st->codec;
1979     int n, len1, data_size;
1980     double pts;
1981
1982     for(;;) {
1983         /* NOTE: the audio packet can contain several frames */
1984         while (pkt_temp->size > 0) {
1985             data_size = sizeof(is->audio_buf1);
1986             len1 = avcodec_decode_audio3(dec,
1987                                         (int16_t *)is->audio_buf1, &data_size,
1988                                         pkt_temp);
1989             if (len1 < 0) {
1990                 /* if error, we skip the frame */
1991                 pkt_temp->size = 0;
1992                 break;
1993             }
1994
1995             pkt_temp->data += len1;
1996             pkt_temp->size -= len1;
1997             if (data_size <= 0)
1998                 continue;
1999
2000             if (dec->sample_fmt != is->audio_src_fmt) {
2001                 if (is->reformat_ctx)
2002                     av_audio_convert_free(is->reformat_ctx);
2003                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2004                                                          dec->sample_fmt, 1, NULL, 0);
2005                 if (!is->reformat_ctx) {
2006                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2007                         av_get_sample_fmt_name(dec->sample_fmt),
2008                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2009                         break;
2010                 }
2011                 is->audio_src_fmt= dec->sample_fmt;
2012             }
2013
2014             if (is->reformat_ctx) {
2015                 const void *ibuf[6]= {is->audio_buf1};
2016                 void *obuf[6]= {is->audio_buf2};
2017                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2018                 int ostride[6]= {2};
2019                 int len= data_size/istride[0];
2020                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2021                     printf("av_audio_convert() failed\n");
2022                     break;
2023                 }
2024                 is->audio_buf= is->audio_buf2;
2025                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2026                           remove this legacy cruft */
2027                 data_size= len*2;
2028             }else{
2029                 is->audio_buf= is->audio_buf1;
2030             }
2031
2032             /* if no pts, then compute it */
2033             pts = is->audio_clock;
2034             *pts_ptr = pts;
2035             n = 2 * dec->channels;
2036             is->audio_clock += (double)data_size /
2037                 (double)(n * dec->sample_rate);
2038 #if defined(DEBUG_SYNC)
2039             {
2040                 static double last_clock;
2041                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2042                        is->audio_clock - last_clock,
2043                        is->audio_clock, pts);
2044                 last_clock = is->audio_clock;
2045             }
2046 #endif
2047             return data_size;
2048         }
2049
2050         /* free the current packet */
2051         if (pkt->data)
2052             av_free_packet(pkt);
2053
2054         if (is->paused || is->audioq.abort_request) {
2055             return -1;
2056         }
2057
2058         /* read next packet */
2059         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2060             return -1;
2061         if(pkt->data == flush_pkt.data){
2062             avcodec_flush_buffers(dec);
2063             continue;
2064         }
2065
2066         pkt_temp->data = pkt->data;
2067         pkt_temp->size = pkt->size;
2068
2069         /* if update the audio clock with the pts */
2070         if (pkt->pts != AV_NOPTS_VALUE) {
2071             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2072         }
2073     }
2074 }
2075
2076 /* prepare a new audio buffer */
2077 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2078 {
2079     VideoState *is = opaque;
2080     int audio_size, len1;
2081     double pts;
2082
2083     audio_callback_time = av_gettime();
2084
2085     while (len > 0) {
2086         if (is->audio_buf_index >= is->audio_buf_size) {
2087            audio_size = audio_decode_frame(is, &pts);
2088            if (audio_size < 0) {
2089                 /* if error, just output silence */
2090                is->audio_buf = is->audio_buf1;
2091                is->audio_buf_size = 1024;
2092                memset(is->audio_buf, 0, is->audio_buf_size);
2093            } else {
2094                if (is->show_mode != SHOW_MODE_VIDEO)
2095                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2096                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2097                                               pts);
2098                is->audio_buf_size = audio_size;
2099            }
2100            is->audio_buf_index = 0;
2101         }
2102         len1 = is->audio_buf_size - is->audio_buf_index;
2103         if (len1 > len)
2104             len1 = len;
2105         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2106         len -= len1;
2107         stream += len1;
2108         is->audio_buf_index += len1;
2109     }
2110 }
2111
2112 /* open a given stream. Return 0 if OK */
2113 static int stream_component_open(VideoState *is, int stream_index)
2114 {
2115     AVFormatContext *ic = is->ic;
2116     AVCodecContext *avctx;
2117     AVCodec *codec;
2118     SDL_AudioSpec wanted_spec, spec;
2119
2120     if (stream_index < 0 || stream_index >= ic->nb_streams)
2121         return -1;
2122     avctx = ic->streams[stream_index]->codec;
2123
2124     /* prepare audio output */
2125     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2126         if (avctx->channels > 0) {
2127             avctx->request_channels = FFMIN(2, avctx->channels);
2128         } else {
2129             avctx->request_channels = 2;
2130         }
2131     }
2132
2133     codec = avcodec_find_decoder(avctx->codec_id);
2134     if (!codec)
2135         return -1;
2136
2137     avctx->debug_mv = debug_mv;
2138     avctx->debug = debug;
2139     avctx->workaround_bugs = workaround_bugs;
2140     avctx->lowres = lowres;
2141     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2142     avctx->idct_algo= idct;
2143     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2144     avctx->skip_frame= skip_frame;
2145     avctx->skip_idct= skip_idct;
2146     avctx->skip_loop_filter= skip_loop_filter;
2147     avctx->error_recognition= error_recognition;
2148     avctx->error_concealment= error_concealment;
2149     avctx->thread_count= thread_count;
2150
2151     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2152
2153     if(codec->capabilities & CODEC_CAP_DR1)
2154         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2155
2156     if (avcodec_open(avctx, codec) < 0)
2157         return -1;
2158
2159     /* prepare audio output */
2160     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2161         wanted_spec.freq = avctx->sample_rate;
2162         wanted_spec.format = AUDIO_S16SYS;
2163         wanted_spec.channels = avctx->channels;
2164         wanted_spec.silence = 0;
2165         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2166         wanted_spec.callback = sdl_audio_callback;
2167         wanted_spec.userdata = is;
2168         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2169             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2170             return -1;
2171         }
2172         is->audio_hw_buf_size = spec.size;
2173         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2174     }
2175
2176     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2177     switch(avctx->codec_type) {
2178     case AVMEDIA_TYPE_AUDIO:
2179         is->audio_stream = stream_index;
2180         is->audio_st = ic->streams[stream_index];
2181         is->audio_buf_size = 0;
2182         is->audio_buf_index = 0;
2183
2184         /* init averaging filter */
2185         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2186         is->audio_diff_avg_count = 0;
2187         /* since we do not have a precise anough audio fifo fullness,
2188            we correct audio sync only if larger than this threshold */
2189         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2190
2191         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2192         packet_queue_init(&is->audioq);
2193         SDL_PauseAudio(0);
2194         break;
2195     case AVMEDIA_TYPE_VIDEO:
2196         is->video_stream = stream_index;
2197         is->video_st = ic->streams[stream_index];
2198
2199         packet_queue_init(&is->videoq);
2200         is->video_tid = SDL_CreateThread(video_thread, is);
2201         break;
2202     case AVMEDIA_TYPE_SUBTITLE:
2203         is->subtitle_stream = stream_index;
2204         is->subtitle_st = ic->streams[stream_index];
2205         packet_queue_init(&is->subtitleq);
2206
2207         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2208         break;
2209     default:
2210         break;
2211     }
2212     return 0;
2213 }
2214
2215 static void stream_component_close(VideoState *is, int stream_index)
2216 {
2217     AVFormatContext *ic = is->ic;
2218     AVCodecContext *avctx;
2219
2220     if (stream_index < 0 || stream_index >= ic->nb_streams)
2221         return;
2222     avctx = ic->streams[stream_index]->codec;
2223
2224     switch(avctx->codec_type) {
2225     case AVMEDIA_TYPE_AUDIO:
2226         packet_queue_abort(&is->audioq);
2227
2228         SDL_CloseAudio();
2229
2230         packet_queue_end(&is->audioq);
2231         if (is->reformat_ctx)
2232             av_audio_convert_free(is->reformat_ctx);
2233         is->reformat_ctx = NULL;
2234         break;
2235     case AVMEDIA_TYPE_VIDEO:
2236         packet_queue_abort(&is->videoq);
2237
2238         /* note: we also signal this mutex to make sure we deblock the
2239            video thread in all cases */
2240         SDL_LockMutex(is->pictq_mutex);
2241         SDL_CondSignal(is->pictq_cond);
2242         SDL_UnlockMutex(is->pictq_mutex);
2243
2244         SDL_WaitThread(is->video_tid, NULL);
2245
2246         packet_queue_end(&is->videoq);
2247         break;
2248     case AVMEDIA_TYPE_SUBTITLE:
2249         packet_queue_abort(&is->subtitleq);
2250
2251         /* note: we also signal this mutex to make sure we deblock the
2252            video thread in all cases */
2253         SDL_LockMutex(is->subpq_mutex);
2254         is->subtitle_stream_changed = 1;
2255
2256         SDL_CondSignal(is->subpq_cond);
2257         SDL_UnlockMutex(is->subpq_mutex);
2258
2259         SDL_WaitThread(is->subtitle_tid, NULL);
2260
2261         packet_queue_end(&is->subtitleq);
2262         break;
2263     default:
2264         break;
2265     }
2266
2267     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2268     avcodec_close(avctx);
2269     switch(avctx->codec_type) {
2270     case AVMEDIA_TYPE_AUDIO:
2271         is->audio_st = NULL;
2272         is->audio_stream = -1;
2273         break;
2274     case AVMEDIA_TYPE_VIDEO:
2275         is->video_st = NULL;
2276         is->video_stream = -1;
2277         break;
2278     case AVMEDIA_TYPE_SUBTITLE:
2279         is->subtitle_st = NULL;
2280         is->subtitle_stream = -1;
2281         break;
2282     default:
2283         break;
2284     }
2285 }
2286
2287 /* since we have only one decoding thread, we can use a global
2288    variable instead of a thread local variable */
2289 static VideoState *global_video_state;
2290
2291 static int decode_interrupt_cb(void)
2292 {
2293     return (global_video_state && global_video_state->abort_request);
2294 }
2295
2296 /* this thread gets the stream from the disk or the network */
2297 static int read_thread(void *arg)
2298 {
2299     VideoState *is = arg;
2300     AVFormatContext *ic;
2301     int err, i, ret;
2302     int st_index[AVMEDIA_TYPE_NB];
2303     AVPacket pkt1, *pkt = &pkt1;
2304     AVFormatParameters params, *ap = &params;
2305     int eof=0;
2306     int pkt_in_play_range = 0;
2307
2308     ic = avformat_alloc_context();
2309
2310     memset(st_index, -1, sizeof(st_index));
2311     is->video_stream = -1;
2312     is->audio_stream = -1;
2313     is->subtitle_stream = -1;
2314
2315     global_video_state = is;
2316     avio_set_interrupt_cb(decode_interrupt_cb);
2317
2318     memset(ap, 0, sizeof(*ap));
2319
2320     ap->prealloced_context = 1;
2321     ap->width = frame_width;
2322     ap->height= frame_height;
2323     ap->time_base= (AVRational){1, 25};
2324     ap->pix_fmt = frame_pix_fmt;
2325     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2326
2327
2328     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2329     if (err >= 0) {
2330         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2331         err = av_demuxer_open(ic, ap);
2332         if(err < 0){
2333             avformat_free_context(ic);
2334             ic= NULL;
2335         }
2336     }
2337     if (err < 0) {
2338         print_error(is->filename, err);
2339         ret = -1;
2340         goto fail;
2341     }
2342     is->ic = ic;
2343
2344     if(genpts)
2345         ic->flags |= AVFMT_FLAG_GENPTS;
2346
2347     err = av_find_stream_info(ic);
2348     if (err < 0) {
2349         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2350         ret = -1;
2351         goto fail;
2352     }
2353     if(ic->pb)
2354         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2355
2356     if(seek_by_bytes<0)
2357         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2358
2359     /* if seeking requested, we execute it */
2360     if (start_time != AV_NOPTS_VALUE) {
2361         int64_t timestamp;
2362
2363         timestamp = start_time;
2364         /* add the stream start time */
2365         if (ic->start_time != AV_NOPTS_VALUE)
2366             timestamp += ic->start_time;
2367         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2368         if (ret < 0) {
2369             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2370                     is->filename, (double)timestamp / AV_TIME_BASE);
2371         }
2372     }
2373
2374     for (i = 0; i < ic->nb_streams; i++)
2375         ic->streams[i]->discard = AVDISCARD_ALL;
2376     if (!video_disable)
2377         st_index[AVMEDIA_TYPE_VIDEO] =
2378             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2379                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2380     if (!audio_disable)
2381         st_index[AVMEDIA_TYPE_AUDIO] =
2382             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2383                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2384                                 st_index[AVMEDIA_TYPE_VIDEO],
2385                                 NULL, 0);
2386     if (!video_disable)
2387         st_index[AVMEDIA_TYPE_SUBTITLE] =
2388             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2389                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2390                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2391                                  st_index[AVMEDIA_TYPE_AUDIO] :
2392                                  st_index[AVMEDIA_TYPE_VIDEO]),
2393                                 NULL, 0);
2394     if (show_status) {
2395         av_dump_format(ic, 0, is->filename, 0);
2396     }
2397
2398     is->show_mode = show_mode;
2399
2400     /* open the streams */
2401     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2402         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2403     }
2404
2405     ret=-1;
2406     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2407         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2408     }
2409     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2410     if (is->show_mode == SHOW_MODE_NONE)
2411         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2412
2413     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2414         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2415     }
2416
2417     if (is->video_stream < 0 && is->audio_stream < 0) {
2418         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2419         ret = -1;
2420         goto fail;
2421     }
2422
2423     for(;;) {
2424         if (is->abort_request)
2425             break;
2426         if (is->paused != is->last_paused) {
2427             is->last_paused = is->paused;
2428             if (is->paused)
2429                 is->read_pause_return= av_read_pause(ic);
2430             else
2431                 av_read_play(ic);
2432         }
2433 #if CONFIG_RTSP_DEMUXER
2434         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2435             /* wait 10 ms to avoid trying to get another packet */
2436             /* XXX: horrible */
2437             SDL_Delay(10);
2438             continue;
2439         }
2440 #endif
2441         if (is->seek_req) {
2442             int64_t seek_target= is->seek_pos;
2443             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2444             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2445 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2446 //      of the seek_pos/seek_rel variables
2447
2448             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2449             if (ret < 0) {
2450                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2451             }else{
2452                 if (is->audio_stream >= 0) {
2453                     packet_queue_flush(&is->audioq);
2454                     packet_queue_put(&is->audioq, &flush_pkt);
2455                 }
2456                 if (is->subtitle_stream >= 0) {
2457                     packet_queue_flush(&is->subtitleq);
2458                     packet_queue_put(&is->subtitleq, &flush_pkt);
2459                 }
2460                 if (is->video_stream >= 0) {
2461                     packet_queue_flush(&is->videoq);
2462                     packet_queue_put(&is->videoq, &flush_pkt);
2463                 }
2464             }
2465             is->seek_req = 0;
2466             eof= 0;
2467         }
2468
2469         /* if the queue are full, no need to read more */
2470         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2471             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2472                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2473                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2474             /* wait 10 ms */
2475             SDL_Delay(10);
2476             continue;
2477         }
2478         if(eof) {
2479             if(is->video_stream >= 0){
2480                 av_init_packet(pkt);
2481                 pkt->data=NULL;
2482                 pkt->size=0;
2483                 pkt->stream_index= is->video_stream;
2484                 packet_queue_put(&is->videoq, pkt);
2485             }
2486             SDL_Delay(10);
2487             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2488                 if(loop!=1 && (!loop || --loop)){
2489                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2490                 }else if(autoexit){
2491                     ret=AVERROR_EOF;
2492                     goto fail;
2493                 }
2494             }
2495             eof=0;
2496             continue;
2497         }
2498         ret = av_read_frame(ic, pkt);
2499         if (ret < 0) {
2500             if (ret == AVERROR_EOF || url_feof(ic->pb))
2501                 eof=1;
2502             if (ic->pb && ic->pb->error)
2503                 break;
2504             SDL_Delay(100); /* wait for user event */
2505             continue;
2506         }
2507         /* check if packet is in play range specified by user, then queue, otherwise discard */
2508         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2509                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2510                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2511                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2512                 <= ((double)duration/1000000);
2513         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2514             packet_queue_put(&is->audioq, pkt);
2515         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2516             packet_queue_put(&is->videoq, pkt);
2517         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2518             packet_queue_put(&is->subtitleq, pkt);
2519         } else {
2520             av_free_packet(pkt);
2521         }
2522     }
2523     /* wait until the end */
2524     while (!is->abort_request) {
2525         SDL_Delay(100);
2526     }
2527
2528     ret = 0;
2529  fail:
2530     /* disable interrupting */
2531     global_video_state = NULL;
2532
2533     /* close each stream */
2534     if (is->audio_stream >= 0)
2535         stream_component_close(is, is->audio_stream);
2536     if (is->video_stream >= 0)
2537         stream_component_close(is, is->video_stream);
2538     if (is->subtitle_stream >= 0)
2539         stream_component_close(is, is->subtitle_stream);
2540     if (is->ic) {
2541         av_close_input_file(is->ic);
2542         is->ic = NULL; /* safety */
2543     }
2544     avio_set_interrupt_cb(NULL);
2545
2546     if (ret != 0) {
2547         SDL_Event event;
2548
2549         event.type = FF_QUIT_EVENT;
2550         event.user.data1 = is;
2551         SDL_PushEvent(&event);
2552     }
2553     return 0;
2554 }
2555
2556 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2557 {
2558     VideoState *is;
2559
2560     is = av_mallocz(sizeof(VideoState));
2561     if (!is)
2562         return NULL;
2563     av_strlcpy(is->filename, filename, sizeof(is->filename));
2564     is->iformat = iformat;
2565     is->ytop = 0;
2566     is->xleft = 0;
2567
2568     /* start video display */
2569     is->pictq_mutex = SDL_CreateMutex();
2570     is->pictq_cond = SDL_CreateCond();
2571
2572     is->subpq_mutex = SDL_CreateMutex();
2573     is->subpq_cond = SDL_CreateCond();
2574
2575     is->av_sync_type = av_sync_type;
2576     is->read_tid = SDL_CreateThread(read_thread, is);
2577     if (!is->read_tid) {
2578         av_free(is);
2579         return NULL;
2580     }
2581     return is;
2582 }
2583
2584 static void stream_cycle_channel(VideoState *is, int codec_type)
2585 {
2586     AVFormatContext *ic = is->ic;
2587     int start_index, stream_index;
2588     AVStream *st;
2589
2590     if (codec_type == AVMEDIA_TYPE_VIDEO)
2591         start_index = is->video_stream;
2592     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2593         start_index = is->audio_stream;
2594     else
2595         start_index = is->subtitle_stream;
2596     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2597         return;
2598     stream_index = start_index;
2599     for(;;) {
2600         if (++stream_index >= is->ic->nb_streams)
2601         {
2602             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2603             {
2604                 stream_index = -1;
2605                 goto the_end;
2606             } else
2607                 stream_index = 0;
2608         }
2609         if (stream_index == start_index)
2610             return;
2611         st = ic->streams[stream_index];
2612         if (st->codec->codec_type == codec_type) {
2613             /* check that parameters are OK */
2614             switch(codec_type) {
2615             case AVMEDIA_TYPE_AUDIO:
2616                 if (st->codec->sample_rate != 0 &&
2617                     st->codec->channels != 0)
2618                     goto the_end;
2619                 break;
2620             case AVMEDIA_TYPE_VIDEO:
2621             case AVMEDIA_TYPE_SUBTITLE:
2622                 goto the_end;
2623             default:
2624                 break;
2625             }
2626         }
2627     }
2628  the_end:
2629     stream_component_close(is, start_index);
2630     stream_component_open(is, stream_index);
2631 }
2632
2633
2634 static void toggle_full_screen(void)
2635 {
2636     is_full_screen = !is_full_screen;
2637     video_open(cur_stream);
2638 }
2639
2640 static void toggle_pause(void)
2641 {
2642     if (cur_stream)
2643         stream_toggle_pause(cur_stream);
2644     step = 0;
2645 }
2646
2647 static void step_to_next_frame(void)
2648 {
2649     if (cur_stream) {
2650         /* if the stream is paused unpause it, then step */
2651         if (cur_stream->paused)
2652             stream_toggle_pause(cur_stream);
2653     }
2654     step = 1;
2655 }
2656
2657 static void toggle_audio_display(void)
2658 {
2659     if (cur_stream) {
2660         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2661         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2662         fill_rectangle(screen,
2663                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2664                     bgcolor);
2665         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2666     }
2667 }
2668
2669 /* handle an event sent by the GUI */
2670 static void event_loop(void)
2671 {
2672     SDL_Event event;
2673     double incr, pos, frac;
2674
2675     for(;;) {
2676         double x;
2677         SDL_WaitEvent(&event);
2678         switch(event.type) {
2679         case SDL_KEYDOWN:
2680             if (exit_on_keydown) {
2681                 do_exit();
2682                 break;
2683             }
2684             switch(event.key.keysym.sym) {
2685             case SDLK_ESCAPE:
2686             case SDLK_q:
2687                 do_exit();
2688                 break;
2689             case SDLK_f:
2690                 toggle_full_screen();
2691                 break;
2692             case SDLK_p:
2693             case SDLK_SPACE:
2694                 toggle_pause();
2695                 break;
2696             case SDLK_s: //S: Step to next frame
2697                 step_to_next_frame();
2698                 break;
2699             case SDLK_a:
2700                 if (cur_stream)
2701                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2702                 break;
2703             case SDLK_v:
2704                 if (cur_stream)
2705                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2706                 break;
2707             case SDLK_t:
2708                 if (cur_stream)
2709                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2710                 break;
2711             case SDLK_w:
2712                 toggle_audio_display();
2713                 break;
2714             case SDLK_LEFT:
2715                 incr = -10.0;
2716                 goto do_seek;
2717             case SDLK_RIGHT:
2718                 incr = 10.0;
2719                 goto do_seek;
2720             case SDLK_UP:
2721                 incr = 60.0;
2722                 goto do_seek;
2723             case SDLK_DOWN:
2724                 incr = -60.0;
2725             do_seek:
2726                 if (cur_stream) {
2727                     if (seek_by_bytes) {
2728                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2729                             pos= cur_stream->video_current_pos;
2730                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2731                             pos= cur_stream->audio_pkt.pos;
2732                         }else
2733                             pos = avio_tell(cur_stream->ic->pb);
2734                         if (cur_stream->ic->bit_rate)
2735                             incr *= cur_stream->ic->bit_rate / 8.0;
2736                         else
2737                             incr *= 180000.0;
2738                         pos += incr;
2739                         stream_seek(cur_stream, pos, incr, 1);
2740                     } else {
2741                         pos = get_master_clock(cur_stream);
2742                         pos += incr;
2743                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2744                     }
2745                 }
2746                 break;
2747             default:
2748                 break;
2749             }
2750             break;
2751         case SDL_MOUSEBUTTONDOWN:
2752             if (exit_on_mousedown) {
2753                 do_exit();
2754                 break;
2755             }
2756         case SDL_MOUSEMOTION:
2757             if(event.type ==SDL_MOUSEBUTTONDOWN){
2758                 x= event.button.x;
2759             }else{
2760                 if(event.motion.state != SDL_PRESSED)
2761                     break;
2762                 x= event.motion.x;
2763             }
2764             if (cur_stream) {
2765                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2766                     uint64_t size=  avio_size(cur_stream->ic->pb);
2767                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2768                 }else{
2769                     int64_t ts;
2770                     int ns, hh, mm, ss;
2771                     int tns, thh, tmm, tss;
2772                     tns = cur_stream->ic->duration/1000000LL;
2773                     thh = tns/3600;
2774                     tmm = (tns%3600)/60;
2775                     tss = (tns%60);
2776                     frac = x/cur_stream->width;
2777                     ns = frac*tns;
2778                     hh = ns/3600;
2779                     mm = (ns%3600)/60;
2780                     ss = (ns%60);
2781                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2782                             hh, mm, ss, thh, tmm, tss);
2783                     ts = frac*cur_stream->ic->duration;
2784                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2785                         ts += cur_stream->ic->start_time;
2786                     stream_seek(cur_stream, ts, 0, 0);
2787                 }
2788             }
2789             break;
2790         case SDL_VIDEORESIZE:
2791             if (cur_stream) {
2792                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2793                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2794                 screen_width = cur_stream->width = event.resize.w;
2795                 screen_height= cur_stream->height= event.resize.h;
2796             }
2797             break;
2798         case SDL_QUIT:
2799         case FF_QUIT_EVENT:
2800             do_exit();
2801             break;
2802         case FF_ALLOC_EVENT:
2803             video_open(event.user.data1);
2804             alloc_picture(event.user.data1);
2805             break;
2806         case FF_REFRESH_EVENT:
2807             video_refresh(event.user.data1);
2808             cur_stream->refresh=0;
2809             break;
2810         default:
2811             break;
2812         }
2813     }
2814 }
2815
2816 static int opt_frame_size(const char *opt, const char *arg)
2817 {
2818     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2819         fprintf(stderr, "Incorrect frame size\n");
2820         return AVERROR(EINVAL);
2821     }
2822     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2823         fprintf(stderr, "Frame size must be a multiple of 2\n");
2824         return AVERROR(EINVAL);
2825     }
2826     return 0;
2827 }
2828
2829 static int opt_width(const char *opt, const char *arg)
2830 {
2831     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2832     return 0;
2833 }
2834
2835 static int opt_height(const char *opt, const char *arg)
2836 {
2837     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2838     return 0;
2839 }
2840
2841 static int opt_format(const char *opt, const char *arg)
2842 {
2843     file_iformat = av_find_input_format(arg);
2844     if (!file_iformat) {
2845         fprintf(stderr, "Unknown input format: %s\n", arg);
2846         return AVERROR(EINVAL);
2847     }
2848     return 0;
2849 }
2850
2851 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2852 {
2853     frame_pix_fmt = av_get_pix_fmt(arg);
2854     return 0;
2855 }
2856
2857 static int opt_sync(const char *opt, const char *arg)
2858 {
2859     if (!strcmp(arg, "audio"))
2860         av_sync_type = AV_SYNC_AUDIO_MASTER;
2861     else if (!strcmp(arg, "video"))
2862         av_sync_type = AV_SYNC_VIDEO_MASTER;
2863     else if (!strcmp(arg, "ext"))
2864         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2865     else {
2866         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2867         exit(1);
2868     }
2869     return 0;
2870 }
2871
2872 static int opt_seek(const char *opt, const char *arg)
2873 {
2874     start_time = parse_time_or_die(opt, arg, 1);
2875     return 0;
2876 }
2877
2878 static int opt_duration(const char *opt, const char *arg)
2879 {
2880     duration = parse_time_or_die(opt, arg, 1);
2881     return 0;
2882 }
2883
2884 static int opt_debug(const char *opt, const char *arg)
2885 {
2886     av_log_set_level(99);
2887     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2888     return 0;
2889 }
2890
2891 static int opt_vismv(const char *opt, const char *arg)
2892 {
2893     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2894     return 0;
2895 }
2896
2897 static int opt_thread_count(const char *opt, const char *arg)
2898 {
2899     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2900 #if !HAVE_THREADS
2901     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2902 #endif
2903     return 0;
2904 }
2905
2906 static int opt_show_mode(const char *opt, const char *arg)
2907 {
2908     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2909                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2910                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2911                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2912     return 0;
2913 }
2914
2915 static const OptionDef options[] = {
2916 #include "cmdutils_common_opts.h"
2917     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2918     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2919     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2920     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2921     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2922     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2923     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2924     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2925     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2926     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2927     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2928     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2929     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2930     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2931     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2932     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2933     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2934     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2935     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2936     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2937     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2938     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2939     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2940     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2941     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2942     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2943     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2944     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2945     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2946     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2947     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2948     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2949     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2950     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2951     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2952     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2953     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2954 #if CONFIG_AVFILTER
2955     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2956 #endif
2957     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2958     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2959     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2960     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
2961     { NULL, },
2962 };
2963
2964 static void show_usage(void)
2965 {
2966     printf("Simple media player\n");
2967     printf("usage: ffplay [options] input_file\n");
2968     printf("\n");
2969 }
2970
2971 static void show_help(void)
2972 {
2973     av_log_set_callback(log_callback_help);
2974     show_usage();
2975     show_help_options(options, "Main options:\n",
2976                       OPT_EXPERT, 0);
2977     show_help_options(options, "\nAdvanced options:\n",
2978                       OPT_EXPERT, OPT_EXPERT);
2979     printf("\n");
2980     av_opt_show2(avcodec_opts[0], NULL,
2981                  AV_OPT_FLAG_DECODING_PARAM, 0);
2982     printf("\n");
2983     av_opt_show2(avformat_opts, NULL,
2984                  AV_OPT_FLAG_DECODING_PARAM, 0);
2985 #if !CONFIG_AVFILTER
2986     printf("\n");
2987     av_opt_show2(sws_opts, NULL,
2988                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2989 #endif
2990     printf("\nWhile playing:\n"
2991            "q, ESC              quit\n"
2992            "f                   toggle full screen\n"
2993            "p, SPC              pause\n"
2994            "a                   cycle audio channel\n"
2995            "v                   cycle video channel\n"
2996            "t                   cycle subtitle channel\n"
2997            "w                   show audio waves\n"
2998            "s                   activate frame-step mode\n"
2999            "left/right          seek backward/forward 10 seconds\n"
3000            "down/up             seek backward/forward 1 minute\n"
3001            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3002            );
3003 }
3004
3005 static void opt_input_file(const char *filename)
3006 {
3007     if (input_filename) {
3008         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3009                 filename, input_filename);
3010         exit(1);
3011     }
3012     if (!strcmp(filename, "-"))
3013         filename = "pipe:";
3014     input_filename = filename;
3015 }
3016
3017 /* Called from the main */
3018 int main(int argc, char **argv)
3019 {
3020     int flags;
3021
3022     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3023
3024     /* register all codecs, demux and protocols */
3025     avcodec_register_all();
3026 #if CONFIG_AVDEVICE
3027     avdevice_register_all();
3028 #endif
3029 #if CONFIG_AVFILTER
3030     avfilter_register_all();
3031 #endif
3032     av_register_all();
3033
3034     init_opts();
3035
3036     show_banner();
3037
3038     parse_options(argc, argv, options, opt_input_file);
3039
3040     if (!input_filename) {
3041         show_usage();
3042         fprintf(stderr, "An input file must be specified\n");
3043         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3044         exit(1);
3045     }
3046
3047     if (display_disable) {
3048         video_disable = 1;
3049     }
3050     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3051 #if !defined(__MINGW32__) && !defined(__APPLE__)
3052     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3053 #endif
3054     if (SDL_Init (flags)) {
3055         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3056         exit(1);
3057     }
3058
3059     if (!display_disable) {
3060 #if HAVE_SDL_VIDEO_SIZE
3061         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3062         fs_screen_width = vi->current_w;
3063         fs_screen_height = vi->current_h;
3064 #endif
3065     }
3066
3067     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3068     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3069     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3070
3071     av_init_packet(&flush_pkt);
3072     flush_pkt.data= "FLUSH";
3073
3074     cur_stream = stream_open(input_filename, file_iformat);
3075
3076     event_loop();
3077
3078     /* never returns */
3079
3080     return 0;
3081 }