]> git.sesse.net Git - ffmpeg/blob - ffplay.c
8788771491dc697e7c402a15009bb84b9ddd56d0
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/parseutils.h"
32 #include "libavutil/samplefmt.h"
33 #include "libavutil/avassert.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avcodec.h"
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/vsink_buffer.h"
46 #endif
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #include "cmdutils.h"
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100     int64_t pos;                                 ///<byte position in file
101     SDL_Overlay *bmp;
102     int width, height; /* source height & width */
103     int allocated;
104     enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107     AVFilterBufferRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112     double pts; /* presentation time stamp for this picture */
113     AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117     AV_SYNC_AUDIO_MASTER, /* default choice */
118     AV_SYNC_VIDEO_MASTER,
119     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123     SDL_Thread *read_tid;
124     SDL_Thread *video_tid;
125     SDL_Thread *refresh_tid;
126     AVInputFormat *iformat;
127     int no_background;
128     int abort_request;
129     int paused;
130     int last_paused;
131     int seek_req;
132     int seek_flags;
133     int64_t seek_pos;
134     int64_t seek_rel;
135     int read_pause_return;
136     AVFormatContext *ic;
137
138     int audio_stream;
139
140     int av_sync_type;
141     double external_clock; /* external clock base */
142     int64_t external_clock_time;
143
144     double audio_clock;
145     double audio_diff_cum; /* used for AV difference average computation */
146     double audio_diff_avg_coef;
147     double audio_diff_threshold;
148     int audio_diff_avg_count;
149     AVStream *audio_st;
150     PacketQueue audioq;
151     int audio_hw_buf_size;
152     /* samples output by the codec. we reserve more space for avsync
153        compensation */
154     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
155     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     uint8_t *audio_buf;
157     unsigned int audio_buf_size; /* in bytes */
158     int audio_buf_index; /* in bytes */
159     AVPacket audio_pkt_temp;
160     AVPacket audio_pkt;
161     enum AVSampleFormat audio_src_fmt;
162     AVAudioConvert *reformat_ctx;
163
164     enum ShowMode {
165         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
166     } show_mode;
167     int16_t sample_array[SAMPLE_ARRAY_SIZE];
168     int sample_array_index;
169     int last_i_start;
170     RDFTContext *rdft;
171     int rdft_bits;
172     FFTSample *rdft_data;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     char filename[1024];
204     int width, height, xleft, ytop;
205
206 #if CONFIG_AVFILTER
207     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
208 #endif
209
210     float skip_frames;
211     float skip_frames_index;
212     int refresh;
213 } VideoState;
214
215 static void show_help(void);
216
217 /* options specified by the user */
218 static AVInputFormat *file_iformat;
219 static const char *input_filename;
220 static const char *window_title;
221 static int fs_screen_width;
222 static int fs_screen_height;
223 static int screen_width = 0;
224 static int screen_height = 0;
225 static int frame_width = 0;
226 static int frame_height = 0;
227 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
228 static int audio_disable;
229 static int video_disable;
230 static int wanted_stream[AVMEDIA_TYPE_NB]={
231     [AVMEDIA_TYPE_AUDIO]=-1,
232     [AVMEDIA_TYPE_VIDEO]=-1,
233     [AVMEDIA_TYPE_SUBTITLE]=-1,
234 };
235 static int seek_by_bytes=-1;
236 static int display_disable;
237 static int show_status = 1;
238 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
239 static int64_t start_time = AV_NOPTS_VALUE;
240 static int64_t duration = AV_NOPTS_VALUE;
241 static int step = 0;
242 static int thread_count = 1;
243 static int workaround_bugs = 1;
244 static int fast = 0;
245 static int genpts = 0;
246 static int lowres = 0;
247 static int idct = FF_IDCT_AUTO;
248 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
249 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
251 static int error_recognition = FF_ER_CAREFUL;
252 static int error_concealment = 3;
253 static int decoder_reorder_pts= -1;
254 static int autoexit;
255 static int exit_on_keydown;
256 static int exit_on_mousedown;
257 static int loop=1;
258 static int framedrop=-1;
259 static enum ShowMode show_mode = SHOW_MODE_NONE;
260
261 static int rdftspeed=20;
262 #if CONFIG_AVFILTER
263 static char *vfilters = NULL;
264 #endif
265
266 /* current context */
267 static int is_full_screen;
268 static VideoState *cur_stream;
269 static int64_t audio_callback_time;
270
271 static AVPacket flush_pkt;
272
273 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
274 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
275 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
276
277 static SDL_Surface *screen;
278
279 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
280 {
281     AVPacketList *pkt1;
282
283     /* duplicate the packet */
284     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
285         return -1;
286
287     pkt1 = av_malloc(sizeof(AVPacketList));
288     if (!pkt1)
289         return -1;
290     pkt1->pkt = *pkt;
291     pkt1->next = NULL;
292
293
294     SDL_LockMutex(q->mutex);
295
296     if (!q->last_pkt)
297
298         q->first_pkt = pkt1;
299     else
300         q->last_pkt->next = pkt1;
301     q->last_pkt = pkt1;
302     q->nb_packets++;
303     q->size += pkt1->pkt.size + sizeof(*pkt1);
304     /* XXX: should duplicate packet data in DV case */
305     SDL_CondSignal(q->cond);
306
307     SDL_UnlockMutex(q->mutex);
308     return 0;
309 }
310
311 /* packet queue handling */
312 static void packet_queue_init(PacketQueue *q)
313 {
314     memset(q, 0, sizeof(PacketQueue));
315     q->mutex = SDL_CreateMutex();
316     q->cond = SDL_CreateCond();
317     packet_queue_put(q, &flush_pkt);
318 }
319
320 static void packet_queue_flush(PacketQueue *q)
321 {
322     AVPacketList *pkt, *pkt1;
323
324     SDL_LockMutex(q->mutex);
325     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
326         pkt1 = pkt->next;
327         av_free_packet(&pkt->pkt);
328         av_freep(&pkt);
329     }
330     q->last_pkt = NULL;
331     q->first_pkt = NULL;
332     q->nb_packets = 0;
333     q->size = 0;
334     SDL_UnlockMutex(q->mutex);
335 }
336
337 static void packet_queue_end(PacketQueue *q)
338 {
339     packet_queue_flush(q);
340     SDL_DestroyMutex(q->mutex);
341     SDL_DestroyCond(q->cond);
342 }
343
344 static void packet_queue_abort(PacketQueue *q)
345 {
346     SDL_LockMutex(q->mutex);
347
348     q->abort_request = 1;
349
350     SDL_CondSignal(q->cond);
351
352     SDL_UnlockMutex(q->mutex);
353 }
354
355 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
356 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
357 {
358     AVPacketList *pkt1;
359     int ret;
360
361     SDL_LockMutex(q->mutex);
362
363     for(;;) {
364         if (q->abort_request) {
365             ret = -1;
366             break;
367         }
368
369         pkt1 = q->first_pkt;
370         if (pkt1) {
371             q->first_pkt = pkt1->next;
372             if (!q->first_pkt)
373                 q->last_pkt = NULL;
374             q->nb_packets--;
375             q->size -= pkt1->pkt.size + sizeof(*pkt1);
376             *pkt = pkt1->pkt;
377             av_free(pkt1);
378             ret = 1;
379             break;
380         } else if (!block) {
381             ret = 0;
382             break;
383         } else {
384             SDL_CondWait(q->cond, q->mutex);
385         }
386     }
387     SDL_UnlockMutex(q->mutex);
388     return ret;
389 }
390
391 static inline void fill_rectangle(SDL_Surface *screen,
392                                   int x, int y, int w, int h, int color)
393 {
394     SDL_Rect rect;
395     rect.x = x;
396     rect.y = y;
397     rect.w = w;
398     rect.h = h;
399     SDL_FillRect(screen, &rect, color);
400 }
401
402 #define ALPHA_BLEND(a, oldp, newp, s)\
403 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
404
405 #define RGBA_IN(r, g, b, a, s)\
406 {\
407     unsigned int v = ((const uint32_t *)(s))[0];\
408     a = (v >> 24) & 0xff;\
409     r = (v >> 16) & 0xff;\
410     g = (v >> 8) & 0xff;\
411     b = v & 0xff;\
412 }
413
414 #define YUVA_IN(y, u, v, a, s, pal)\
415 {\
416     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
417     a = (val >> 24) & 0xff;\
418     y = (val >> 16) & 0xff;\
419     u = (val >> 8) & 0xff;\
420     v = val & 0xff;\
421 }
422
423 #define YUVA_OUT(d, y, u, v, a)\
424 {\
425     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
426 }
427
428
429 #define BPP 1
430
431 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
432 {
433     int wrap, wrap3, width2, skip2;
434     int y, u, v, a, u1, v1, a1, w, h;
435     uint8_t *lum, *cb, *cr;
436     const uint8_t *p;
437     const uint32_t *pal;
438     int dstx, dsty, dstw, dsth;
439
440     dstw = av_clip(rect->w, 0, imgw);
441     dsth = av_clip(rect->h, 0, imgh);
442     dstx = av_clip(rect->x, 0, imgw - dstw);
443     dsty = av_clip(rect->y, 0, imgh - dsth);
444     lum = dst->data[0] + dsty * dst->linesize[0];
445     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
446     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
447
448     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
449     skip2 = dstx >> 1;
450     wrap = dst->linesize[0];
451     wrap3 = rect->pict.linesize[0];
452     p = rect->pict.data[0];
453     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
454
455     if (dsty & 1) {
456         lum += dstx;
457         cb += skip2;
458         cr += skip2;
459
460         if (dstx & 1) {
461             YUVA_IN(y, u, v, a, p, pal);
462             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
463             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
464             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
465             cb++;
466             cr++;
467             lum++;
468             p += BPP;
469         }
470         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
471             YUVA_IN(y, u, v, a, p, pal);
472             u1 = u;
473             v1 = v;
474             a1 = a;
475             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
476
477             YUVA_IN(y, u, v, a, p + BPP, pal);
478             u1 += u;
479             v1 += v;
480             a1 += a;
481             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
482             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
483             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
484             cb++;
485             cr++;
486             p += 2 * BPP;
487             lum += 2;
488         }
489         if (w) {
490             YUVA_IN(y, u, v, a, p, pal);
491             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
492             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
493             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
494             p++;
495             lum++;
496         }
497         p += wrap3 - dstw * BPP;
498         lum += wrap - dstw - dstx;
499         cb += dst->linesize[1] - width2 - skip2;
500         cr += dst->linesize[2] - width2 - skip2;
501     }
502     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             u1 = u;
510             v1 = v;
511             a1 = a;
512             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513             p += wrap3;
514             lum += wrap;
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 += u;
517             v1 += v;
518             a1 += a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
521             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
522             cb++;
523             cr++;
524             p += -wrap3 + BPP;
525             lum += -wrap + 1;
526         }
527         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
528             YUVA_IN(y, u, v, a, p, pal);
529             u1 = u;
530             v1 = v;
531             a1 = a;
532             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533
534             YUVA_IN(y, u, v, a, p + BPP, pal);
535             u1 += u;
536             v1 += v;
537             a1 += a;
538             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
539             p += wrap3;
540             lum += wrap;
541
542             YUVA_IN(y, u, v, a, p, pal);
543             u1 += u;
544             v1 += v;
545             a1 += a;
546             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
547
548             YUVA_IN(y, u, v, a, p + BPP, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
553
554             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
555             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
556
557             cb++;
558             cr++;
559             p += -wrap3 + 2 * BPP;
560             lum += -wrap + 2;
561         }
562         if (w) {
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 = u;
565             v1 = v;
566             a1 = a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568             p += wrap3;
569             lum += wrap;
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 += u;
572             v1 += v;
573             a1 += a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
576             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
577             cb++;
578             cr++;
579             p += -wrap3 + BPP;
580             lum += -wrap + 1;
581         }
582         p += wrap3 + (wrap3 - dstw * BPP);
583         lum += wrap + (wrap - dstw - dstx);
584         cb += dst->linesize[1] - width2 - skip2;
585         cr += dst->linesize[2] - width2 - skip2;
586     }
587     /* handle odd height */
588     if (h) {
589         lum += dstx;
590         cb += skip2;
591         cr += skip2;
592
593         if (dstx & 1) {
594             YUVA_IN(y, u, v, a, p, pal);
595             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
597             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
598             cb++;
599             cr++;
600             lum++;
601             p += BPP;
602         }
603         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
604             YUVA_IN(y, u, v, a, p, pal);
605             u1 = u;
606             v1 = v;
607             a1 = a;
608             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
609
610             YUVA_IN(y, u, v, a, p + BPP, pal);
611             u1 += u;
612             v1 += v;
613             a1 += a;
614             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
615             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
616             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
617             cb++;
618             cr++;
619             p += 2 * BPP;
620             lum += 2;
621         }
622         if (w) {
623             YUVA_IN(y, u, v, a, p, pal);
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
626             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
627         }
628     }
629 }
630
631 static void free_subpicture(SubPicture *sp)
632 {
633     avsubtitle_free(&sp->sub);
634 }
635
636 static void video_image_display(VideoState *is)
637 {
638     VideoPicture *vp;
639     SubPicture *sp;
640     AVPicture pict;
641     float aspect_ratio;
642     int width, height, x, y;
643     SDL_Rect rect;
644     int i;
645
646     vp = &is->pictq[is->pictq_rindex];
647     if (vp->bmp) {
648 #if CONFIG_AVFILTER
649          if (vp->picref->video->sample_aspect_ratio.num == 0)
650              aspect_ratio = 0;
651          else
652              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
653 #else
654
655         /* XXX: use variable in the frame */
656         if (is->video_st->sample_aspect_ratio.num)
657             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
658         else if (is->video_st->codec->sample_aspect_ratio.num)
659             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
660         else
661             aspect_ratio = 0;
662 #endif
663         if (aspect_ratio <= 0.0)
664             aspect_ratio = 1.0;
665         aspect_ratio *= (float)vp->width / (float)vp->height;
666
667         if (is->subtitle_st) {
668             if (is->subpq_size > 0) {
669                 sp = &is->subpq[is->subpq_rindex];
670
671                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
672                     SDL_LockYUVOverlay (vp->bmp);
673
674                     pict.data[0] = vp->bmp->pixels[0];
675                     pict.data[1] = vp->bmp->pixels[2];
676                     pict.data[2] = vp->bmp->pixels[1];
677
678                     pict.linesize[0] = vp->bmp->pitches[0];
679                     pict.linesize[1] = vp->bmp->pitches[2];
680                     pict.linesize[2] = vp->bmp->pitches[1];
681
682                     for (i = 0; i < sp->sub.num_rects; i++)
683                         blend_subrect(&pict, sp->sub.rects[i],
684                                       vp->bmp->w, vp->bmp->h);
685
686                     SDL_UnlockYUVOverlay (vp->bmp);
687                 }
688             }
689         }
690
691
692         /* XXX: we suppose the screen has a 1.0 pixel ratio */
693         height = is->height;
694         width = ((int)rint(height * aspect_ratio)) & ~1;
695         if (width > is->width) {
696             width = is->width;
697             height = ((int)rint(width / aspect_ratio)) & ~1;
698         }
699         x = (is->width - width) / 2;
700         y = (is->height - height) / 2;
701         is->no_background = 0;
702         rect.x = is->xleft + x;
703         rect.y = is->ytop  + y;
704         rect.w = FFMAX(width,  1);
705         rect.h = FFMAX(height, 1);
706         SDL_DisplayYUVOverlay(vp->bmp, &rect);
707     }
708 }
709
710 /* get the current audio output buffer size, in samples. With SDL, we
711    cannot have a precise information */
712 static int audio_write_get_buf_size(VideoState *is)
713 {
714     return is->audio_buf_size - is->audio_buf_index;
715 }
716
717 static inline int compute_mod(int a, int b)
718 {
719     return a < 0 ? a%b + b : a%b;
720 }
721
722 static void video_audio_display(VideoState *s)
723 {
724     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
725     int ch, channels, h, h2, bgcolor, fgcolor;
726     int16_t time_diff;
727     int rdft_bits, nb_freq;
728
729     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
730         ;
731     nb_freq= 1<<(rdft_bits-1);
732
733     /* compute display index : center on currently output samples */
734     channels = s->audio_st->codec->channels;
735     nb_display_channels = channels;
736     if (!s->paused) {
737         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
738         n = 2 * channels;
739         delay = audio_write_get_buf_size(s);
740         delay /= n;
741
742         /* to be more precise, we take into account the time spent since
743            the last buffer computation */
744         if (audio_callback_time) {
745             time_diff = av_gettime() - audio_callback_time;
746             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
747         }
748
749         delay += 2*data_used;
750         if (delay < data_used)
751             delay = data_used;
752
753         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
754         if (s->show_mode == SHOW_MODE_WAVES) {
755             h= INT_MIN;
756             for(i=0; i<1000; i+=channels){
757                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
758                 int a= s->sample_array[idx];
759                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
760                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
761                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
762                 int score= a-d;
763                 if(h<score && (b^c)<0){
764                     h= score;
765                     i_start= idx;
766                 }
767             }
768         }
769
770         s->last_i_start = i_start;
771     } else {
772         i_start = s->last_i_start;
773     }
774
775     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
776     if (s->show_mode == SHOW_MODE_WAVES) {
777         fill_rectangle(screen,
778                        s->xleft, s->ytop, s->width, s->height,
779                        bgcolor);
780
781         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
782
783         /* total height for one channel */
784         h = s->height / nb_display_channels;
785         /* graph height / 2 */
786         h2 = (h * 9) / 20;
787         for(ch = 0;ch < nb_display_channels; ch++) {
788             i = i_start + ch;
789             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
790             for(x = 0; x < s->width; x++) {
791                 y = (s->sample_array[i] * h2) >> 15;
792                 if (y < 0) {
793                     y = -y;
794                     ys = y1 - y;
795                 } else {
796                     ys = y1;
797                 }
798                 fill_rectangle(screen,
799                                s->xleft + x, ys, 1, y,
800                                fgcolor);
801                 i += channels;
802                 if (i >= SAMPLE_ARRAY_SIZE)
803                     i -= SAMPLE_ARRAY_SIZE;
804             }
805         }
806
807         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
808
809         for(ch = 1;ch < nb_display_channels; ch++) {
810             y = s->ytop + ch * h;
811             fill_rectangle(screen,
812                            s->xleft, y, s->width, 1,
813                            fgcolor);
814         }
815         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
816     }else{
817         nb_display_channels= FFMIN(nb_display_channels, 2);
818         if(rdft_bits != s->rdft_bits){
819             av_rdft_end(s->rdft);
820             av_free(s->rdft_data);
821             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
822             s->rdft_bits= rdft_bits;
823             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
824         }
825         {
826             FFTSample *data[2];
827             for(ch = 0;ch < nb_display_channels; ch++) {
828                 data[ch] = s->rdft_data + 2*nb_freq*ch;
829                 i = i_start + ch;
830                 for(x = 0; x < 2*nb_freq; x++) {
831                     double w= (x-nb_freq)*(1.0/nb_freq);
832                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
833                     i += channels;
834                     if (i >= SAMPLE_ARRAY_SIZE)
835                         i -= SAMPLE_ARRAY_SIZE;
836                 }
837                 av_rdft_calc(s->rdft, data[ch]);
838             }
839             //least efficient way to do this, we should of course directly access it but its more than fast enough
840             for(y=0; y<s->height; y++){
841                 double w= 1/sqrt(nb_freq);
842                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
843                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
844                        + data[1][2*y+1]*data[1][2*y+1])) : a;
845                 a= FFMIN(a,255);
846                 b= FFMIN(b,255);
847                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
848
849                 fill_rectangle(screen,
850                             s->xpos, s->height-y, 1, 1,
851                             fgcolor);
852             }
853         }
854         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
855         s->xpos++;
856         if(s->xpos >= s->width)
857             s->xpos= s->xleft;
858     }
859 }
860
861 static void stream_close(VideoState *is)
862 {
863     VideoPicture *vp;
864     int i;
865     /* XXX: use a special url_shutdown call to abort parse cleanly */
866     is->abort_request = 1;
867     SDL_WaitThread(is->read_tid, NULL);
868     SDL_WaitThread(is->refresh_tid, NULL);
869
870     /* free all pictures */
871     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
872         vp = &is->pictq[i];
873 #if CONFIG_AVFILTER
874         if (vp->picref) {
875             avfilter_unref_buffer(vp->picref);
876             vp->picref = NULL;
877         }
878 #endif
879         if (vp->bmp) {
880             SDL_FreeYUVOverlay(vp->bmp);
881             vp->bmp = NULL;
882         }
883     }
884     SDL_DestroyMutex(is->pictq_mutex);
885     SDL_DestroyCond(is->pictq_cond);
886     SDL_DestroyMutex(is->subpq_mutex);
887     SDL_DestroyCond(is->subpq_cond);
888 #if !CONFIG_AVFILTER
889     if (is->img_convert_ctx)
890         sws_freeContext(is->img_convert_ctx);
891 #endif
892     av_free(is);
893 }
894
895 static void do_exit(void)
896 {
897     if (cur_stream) {
898         stream_close(cur_stream);
899         cur_stream = NULL;
900     }
901     uninit_opts();
902 #if CONFIG_AVFILTER
903     avfilter_uninit();
904 #endif
905     if (show_status)
906         printf("\n");
907     SDL_Quit();
908     av_log(NULL, AV_LOG_QUIET, "%s", "");
909     exit(0);
910 }
911
912 static int video_open(VideoState *is){
913     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
914     int w,h;
915
916     if(is_full_screen) flags |= SDL_FULLSCREEN;
917     else               flags |= SDL_RESIZABLE;
918
919     if (is_full_screen && fs_screen_width) {
920         w = fs_screen_width;
921         h = fs_screen_height;
922     } else if(!is_full_screen && screen_width){
923         w = screen_width;
924         h = screen_height;
925 #if CONFIG_AVFILTER
926     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
927         w = is->out_video_filter->inputs[0]->w;
928         h = is->out_video_filter->inputs[0]->h;
929 #else
930     }else if (is->video_st && is->video_st->codec->width){
931         w = is->video_st->codec->width;
932         h = is->video_st->codec->height;
933 #endif
934     } else {
935         w = 640;
936         h = 480;
937     }
938     if(screen && is->width == screen->w && screen->w == w
939        && is->height== screen->h && screen->h == h)
940         return 0;
941
942 #ifndef __APPLE__
943     screen = SDL_SetVideoMode(w, h, 0, flags);
944 #else
945     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
946     screen = SDL_SetVideoMode(w, h, 24, flags);
947 #endif
948     if (!screen) {
949         fprintf(stderr, "SDL: could not set video mode - exiting\n");
950         do_exit();
951     }
952     if (!window_title)
953         window_title = input_filename;
954     SDL_WM_SetCaption(window_title, window_title);
955
956     is->width = screen->w;
957     is->height = screen->h;
958
959     return 0;
960 }
961
962 /* display the current picture, if any */
963 static void video_display(VideoState *is)
964 {
965     if(!screen)
966         video_open(cur_stream);
967     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
968         video_audio_display(is);
969     else if (is->video_st)
970         video_image_display(is);
971 }
972
973 static int refresh_thread(void *opaque)
974 {
975     VideoState *is= opaque;
976     while(!is->abort_request){
977         SDL_Event event;
978         event.type = FF_REFRESH_EVENT;
979         event.user.data1 = opaque;
980         if(!is->refresh){
981             is->refresh=1;
982             SDL_PushEvent(&event);
983         }
984         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
985         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
986     }
987     return 0;
988 }
989
990 /* get the current audio clock value */
991 static double get_audio_clock(VideoState *is)
992 {
993     double pts;
994     int hw_buf_size, bytes_per_sec;
995     pts = is->audio_clock;
996     hw_buf_size = audio_write_get_buf_size(is);
997     bytes_per_sec = 0;
998     if (is->audio_st) {
999         bytes_per_sec = is->audio_st->codec->sample_rate *
1000             2 * is->audio_st->codec->channels;
1001     }
1002     if (bytes_per_sec)
1003         pts -= (double)hw_buf_size / bytes_per_sec;
1004     return pts;
1005 }
1006
1007 /* get the current video clock value */
1008 static double get_video_clock(VideoState *is)
1009 {
1010     if (is->paused) {
1011         return is->video_current_pts;
1012     } else {
1013         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1014     }
1015 }
1016
1017 /* get the current external clock value */
1018 static double get_external_clock(VideoState *is)
1019 {
1020     int64_t ti;
1021     ti = av_gettime();
1022     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1023 }
1024
1025 /* get the current master clock value */
1026 static double get_master_clock(VideoState *is)
1027 {
1028     double val;
1029
1030     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1031         if (is->video_st)
1032             val = get_video_clock(is);
1033         else
1034             val = get_audio_clock(is);
1035     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1036         if (is->audio_st)
1037             val = get_audio_clock(is);
1038         else
1039             val = get_video_clock(is);
1040     } else {
1041         val = get_external_clock(is);
1042     }
1043     return val;
1044 }
1045
1046 /* seek in the stream */
1047 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1048 {
1049     if (!is->seek_req) {
1050         is->seek_pos = pos;
1051         is->seek_rel = rel;
1052         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1053         if (seek_by_bytes)
1054             is->seek_flags |= AVSEEK_FLAG_BYTE;
1055         is->seek_req = 1;
1056     }
1057 }
1058
1059 /* pause or resume the video */
1060 static void stream_toggle_pause(VideoState *is)
1061 {
1062     if (is->paused) {
1063         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1064         if(is->read_pause_return != AVERROR(ENOSYS)){
1065             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1066         }
1067         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1068     }
1069     is->paused = !is->paused;
1070 }
1071
1072 static double compute_target_time(double frame_current_pts, VideoState *is)
1073 {
1074     double delay, sync_threshold, diff;
1075
1076     /* compute nominal delay */
1077     delay = frame_current_pts - is->frame_last_pts;
1078     if (delay <= 0 || delay >= 10.0) {
1079         /* if incorrect delay, use previous one */
1080         delay = is->frame_last_delay;
1081     } else {
1082         is->frame_last_delay = delay;
1083     }
1084     is->frame_last_pts = frame_current_pts;
1085
1086     /* update delay to follow master synchronisation source */
1087     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1088          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1089         /* if video is slave, we try to correct big delays by
1090            duplicating or deleting a frame */
1091         diff = get_video_clock(is) - get_master_clock(is);
1092
1093         /* skip or repeat frame. We take into account the
1094            delay to compute the threshold. I still don't know
1095            if it is the best guess */
1096         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1097         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1098             if (diff <= -sync_threshold)
1099                 delay = 0;
1100             else if (diff >= sync_threshold)
1101                 delay = 2 * delay;
1102         }
1103     }
1104     is->frame_timer += delay;
1105
1106     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1107             delay, frame_current_pts, -diff);
1108
1109     return is->frame_timer;
1110 }
1111
1112 /* called to display each frame */
1113 static void video_refresh(void *opaque)
1114 {
1115     VideoState *is = opaque;
1116     VideoPicture *vp;
1117
1118     SubPicture *sp, *sp2;
1119
1120     if (is->video_st) {
1121 retry:
1122         if (is->pictq_size == 0) {
1123             //nothing to do, no picture to display in the que
1124         } else {
1125             double time= av_gettime()/1000000.0;
1126             double next_target;
1127             /* dequeue the picture */
1128             vp = &is->pictq[is->pictq_rindex];
1129
1130             if(time < vp->target_clock)
1131                 return;
1132             /* update current video pts */
1133             is->video_current_pts = vp->pts;
1134             is->video_current_pts_drift = is->video_current_pts - time;
1135             is->video_current_pos = vp->pos;
1136             if(is->pictq_size > 1){
1137                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1138                 assert(nextvp->target_clock >= vp->target_clock);
1139                 next_target= nextvp->target_clock;
1140             }else{
1141                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1142             }
1143             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1144                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1145                 if(is->pictq_size > 1 || time > next_target + 0.5){
1146                     /* update queue size and signal for next picture */
1147                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1148                         is->pictq_rindex = 0;
1149
1150                     SDL_LockMutex(is->pictq_mutex);
1151                     is->pictq_size--;
1152                     SDL_CondSignal(is->pictq_cond);
1153                     SDL_UnlockMutex(is->pictq_mutex);
1154                     goto retry;
1155                 }
1156             }
1157
1158             if(is->subtitle_st) {
1159                 if (is->subtitle_stream_changed) {
1160                     SDL_LockMutex(is->subpq_mutex);
1161
1162                     while (is->subpq_size) {
1163                         free_subpicture(&is->subpq[is->subpq_rindex]);
1164
1165                         /* update queue size and signal for next picture */
1166                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1167                             is->subpq_rindex = 0;
1168
1169                         is->subpq_size--;
1170                     }
1171                     is->subtitle_stream_changed = 0;
1172
1173                     SDL_CondSignal(is->subpq_cond);
1174                     SDL_UnlockMutex(is->subpq_mutex);
1175                 } else {
1176                     if (is->subpq_size > 0) {
1177                         sp = &is->subpq[is->subpq_rindex];
1178
1179                         if (is->subpq_size > 1)
1180                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1181                         else
1182                             sp2 = NULL;
1183
1184                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1185                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1186                         {
1187                             free_subpicture(sp);
1188
1189                             /* update queue size and signal for next picture */
1190                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1191                                 is->subpq_rindex = 0;
1192
1193                             SDL_LockMutex(is->subpq_mutex);
1194                             is->subpq_size--;
1195                             SDL_CondSignal(is->subpq_cond);
1196                             SDL_UnlockMutex(is->subpq_mutex);
1197                         }
1198                     }
1199                 }
1200             }
1201
1202             /* display picture */
1203             if (!display_disable)
1204                 video_display(is);
1205
1206             /* update queue size and signal for next picture */
1207             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1208                 is->pictq_rindex = 0;
1209
1210             SDL_LockMutex(is->pictq_mutex);
1211             is->pictq_size--;
1212             SDL_CondSignal(is->pictq_cond);
1213             SDL_UnlockMutex(is->pictq_mutex);
1214         }
1215     } else if (is->audio_st) {
1216         /* draw the next audio frame */
1217
1218         /* if only audio stream, then display the audio bars (better
1219            than nothing, just to test the implementation */
1220
1221         /* display picture */
1222         if (!display_disable)
1223             video_display(is);
1224     }
1225     if (show_status) {
1226         static int64_t last_time;
1227         int64_t cur_time;
1228         int aqsize, vqsize, sqsize;
1229         double av_diff;
1230
1231         cur_time = av_gettime();
1232         if (!last_time || (cur_time - last_time) >= 30000) {
1233             aqsize = 0;
1234             vqsize = 0;
1235             sqsize = 0;
1236             if (is->audio_st)
1237                 aqsize = is->audioq.size;
1238             if (is->video_st)
1239                 vqsize = is->videoq.size;
1240             if (is->subtitle_st)
1241                 sqsize = is->subtitleq.size;
1242             av_diff = 0;
1243             if (is->audio_st && is->video_st)
1244                 av_diff = get_audio_clock(is) - get_video_clock(is);
1245             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1246                    get_master_clock(is),
1247                    av_diff,
1248                    FFMAX(is->skip_frames-1, 0),
1249                    aqsize / 1024,
1250                    vqsize / 1024,
1251                    sqsize,
1252                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1253                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1254             fflush(stdout);
1255             last_time = cur_time;
1256         }
1257     }
1258 }
1259
1260 /* allocate a picture (needs to do that in main thread to avoid
1261    potential locking problems */
1262 static void alloc_picture(void *opaque)
1263 {
1264     VideoState *is = opaque;
1265     VideoPicture *vp;
1266
1267     vp = &is->pictq[is->pictq_windex];
1268
1269     if (vp->bmp)
1270         SDL_FreeYUVOverlay(vp->bmp);
1271
1272 #if CONFIG_AVFILTER
1273     if (vp->picref)
1274         avfilter_unref_buffer(vp->picref);
1275     vp->picref = NULL;
1276
1277     vp->width   = is->out_video_filter->inputs[0]->w;
1278     vp->height  = is->out_video_filter->inputs[0]->h;
1279     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1280 #else
1281     vp->width   = is->video_st->codec->width;
1282     vp->height  = is->video_st->codec->height;
1283     vp->pix_fmt = is->video_st->codec->pix_fmt;
1284 #endif
1285
1286     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1287                                    SDL_YV12_OVERLAY,
1288                                    screen);
1289     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1290         /* SDL allocates a buffer smaller than requested if the video
1291          * overlay hardware is unable to support the requested size. */
1292         fprintf(stderr, "Error: the video system does not support an image\n"
1293                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1294                         "to reduce the image size.\n", vp->width, vp->height );
1295         do_exit();
1296     }
1297
1298     SDL_LockMutex(is->pictq_mutex);
1299     vp->allocated = 1;
1300     SDL_CondSignal(is->pictq_cond);
1301     SDL_UnlockMutex(is->pictq_mutex);
1302 }
1303
1304 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1305 {
1306     VideoPicture *vp;
1307     double frame_delay, pts = pts1;
1308
1309     /* compute the exact PTS for the picture if it is omitted in the stream
1310      * pts1 is the dts of the pkt / pts of the frame */
1311     if (pts != 0) {
1312         /* update video clock with pts, if present */
1313         is->video_clock = pts;
1314     } else {
1315         pts = is->video_clock;
1316     }
1317     /* update video clock for next frame */
1318     frame_delay = av_q2d(is->video_st->codec->time_base);
1319     /* for MPEG2, the frame can be repeated, so we update the
1320        clock accordingly */
1321     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1322     is->video_clock += frame_delay;
1323
1324 #if defined(DEBUG_SYNC) && 0
1325     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1326            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1327 #endif
1328
1329     /* wait until we have space to put a new picture */
1330     SDL_LockMutex(is->pictq_mutex);
1331
1332     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1333         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1334
1335     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1336            !is->videoq.abort_request) {
1337         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1338     }
1339     SDL_UnlockMutex(is->pictq_mutex);
1340
1341     if (is->videoq.abort_request)
1342         return -1;
1343
1344     vp = &is->pictq[is->pictq_windex];
1345
1346     /* alloc or resize hardware picture buffer */
1347     if (!vp->bmp ||
1348 #if CONFIG_AVFILTER
1349         vp->width  != is->out_video_filter->inputs[0]->w ||
1350         vp->height != is->out_video_filter->inputs[0]->h) {
1351 #else
1352         vp->width != is->video_st->codec->width ||
1353         vp->height != is->video_st->codec->height) {
1354 #endif
1355         SDL_Event event;
1356
1357         vp->allocated = 0;
1358
1359         /* the allocation must be done in the main thread to avoid
1360            locking problems */
1361         event.type = FF_ALLOC_EVENT;
1362         event.user.data1 = is;
1363         SDL_PushEvent(&event);
1364
1365         /* wait until the picture is allocated */
1366         SDL_LockMutex(is->pictq_mutex);
1367         while (!vp->allocated && !is->videoq.abort_request) {
1368             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict;
1379 #if CONFIG_AVFILTER
1380         if(vp->picref)
1381             avfilter_unref_buffer(vp->picref);
1382         vp->picref = src_frame->opaque;
1383 #endif
1384
1385         /* get a pointer on the bitmap */
1386         SDL_LockYUVOverlay (vp->bmp);
1387
1388         memset(&pict,0,sizeof(AVPicture));
1389         pict.data[0] = vp->bmp->pixels[0];
1390         pict.data[1] = vp->bmp->pixels[2];
1391         pict.data[2] = vp->bmp->pixels[1];
1392
1393         pict.linesize[0] = vp->bmp->pitches[0];
1394         pict.linesize[1] = vp->bmp->pitches[2];
1395         pict.linesize[2] = vp->bmp->pitches[1];
1396
1397 #if CONFIG_AVFILTER
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, (AVPicture *)src_frame,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418
1419         /* now we can update the picture count */
1420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421             is->pictq_windex = 0;
1422         SDL_LockMutex(is->pictq_mutex);
1423         vp->target_clock= compute_target_time(vp->pts, is);
1424
1425         is->pictq_size++;
1426         SDL_UnlockMutex(is->pictq_mutex);
1427     }
1428     return 0;
1429 }
1430
1431 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1432 {
1433     int len1 av_unused, got_picture, i;
1434
1435     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1436         return -1;
1437
1438     if (pkt->data == flush_pkt.data) {
1439         avcodec_flush_buffers(is->video_st->codec);
1440
1441         SDL_LockMutex(is->pictq_mutex);
1442         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1443         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1444             is->pictq[i].target_clock= 0;
1445         }
1446         while (is->pictq_size && !is->videoq.abort_request) {
1447             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1448         }
1449         is->video_current_pos = -1;
1450         SDL_UnlockMutex(is->pictq_mutex);
1451
1452         is->frame_last_pts = AV_NOPTS_VALUE;
1453         is->frame_last_delay = 0;
1454         is->frame_timer = (double)av_gettime() / 1000000.0;
1455         is->skip_frames = 1;
1456         is->skip_frames_index = 0;
1457         return 0;
1458     }
1459
1460     len1 = avcodec_decode_video2(is->video_st->codec,
1461                                  frame, &got_picture,
1462                                  pkt);
1463
1464     if (got_picture) {
1465         if (decoder_reorder_pts == -1) {
1466             *pts = frame->best_effort_timestamp;
1467         } else if (decoder_reorder_pts) {
1468             *pts = frame->pkt_pts;
1469         } else {
1470             *pts = frame->pkt_dts;
1471         }
1472
1473         if (*pts == AV_NOPTS_VALUE) {
1474             *pts = 0;
1475         }
1476
1477         is->skip_frames_index += 1;
1478         if(is->skip_frames_index >= is->skip_frames){
1479             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1480             return 1;
1481         }
1482
1483     }
1484     return 0;
1485 }
1486
1487 #if CONFIG_AVFILTER
1488 typedef struct {
1489     VideoState *is;
1490     AVFrame *frame;
1491     int use_dr1;
1492 } FilterPriv;
1493
1494 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1495 {
1496     AVFilterContext *ctx = codec->opaque;
1497     AVFilterBufferRef  *ref;
1498     int perms = AV_PERM_WRITE;
1499     int i, w, h, stride[4];
1500     unsigned edge;
1501     int pixel_size;
1502
1503     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1504
1505     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1506         perms |= AV_PERM_NEG_LINESIZES;
1507
1508     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1511         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1512     }
1513     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1514
1515     w = codec->width;
1516     h = codec->height;
1517
1518     if(av_image_check_size(w, h, 0, codec))
1519         return -1;
1520
1521     avcodec_align_dimensions2(codec, &w, &h, stride);
1522     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1523     w += edge << 1;
1524     h += edge << 1;
1525
1526     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1527         return -1;
1528
1529     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1530     ref->video->w = codec->width;
1531     ref->video->h = codec->height;
1532     for(i = 0; i < 4; i ++) {
1533         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1534         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1535
1536         if (ref->data[i]) {
1537             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1538         }
1539         pic->data[i]     = ref->data[i];
1540         pic->linesize[i] = ref->linesize[i];
1541     }
1542     pic->opaque = ref;
1543     pic->age    = INT_MAX;
1544     pic->type   = FF_BUFFER_TYPE_USER;
1545     pic->reordered_opaque = codec->reordered_opaque;
1546     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1547     else           pic->pkt_pts = AV_NOPTS_VALUE;
1548     return 0;
1549 }
1550
1551 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1552 {
1553     memset(pic->data, 0, sizeof(pic->data));
1554     avfilter_unref_buffer(pic->opaque);
1555 }
1556
1557 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1558 {
1559     AVFilterBufferRef *ref = pic->opaque;
1560
1561     if (pic->data[0] == NULL) {
1562         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1563         return codec->get_buffer(codec, pic);
1564     }
1565
1566     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1567         (codec->pix_fmt != ref->format)) {
1568         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1569         return -1;
1570     }
1571
1572     pic->reordered_opaque = codec->reordered_opaque;
1573     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1574     else           pic->pkt_pts = AV_NOPTS_VALUE;
1575     return 0;
1576 }
1577
1578 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1579 {
1580     FilterPriv *priv = ctx->priv;
1581     AVCodecContext *codec;
1582     if(!opaque) return -1;
1583
1584     priv->is = opaque;
1585     codec    = priv->is->video_st->codec;
1586     codec->opaque = ctx;
1587     if((codec->codec->capabilities & CODEC_CAP_DR1)
1588     ) {
1589         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1590         priv->use_dr1 = 1;
1591         codec->get_buffer     = input_get_buffer;
1592         codec->release_buffer = input_release_buffer;
1593         codec->reget_buffer   = input_reget_buffer;
1594         codec->thread_safe_callbacks = 1;
1595     }
1596
1597     priv->frame = avcodec_alloc_frame();
1598
1599     return 0;
1600 }
1601
1602 static void input_uninit(AVFilterContext *ctx)
1603 {
1604     FilterPriv *priv = ctx->priv;
1605     av_free(priv->frame);
1606 }
1607
1608 static int input_request_frame(AVFilterLink *link)
1609 {
1610     FilterPriv *priv = link->src->priv;
1611     AVFilterBufferRef *picref;
1612     int64_t pts = 0;
1613     AVPacket pkt;
1614     int ret;
1615
1616     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1617         av_free_packet(&pkt);
1618     if (ret < 0)
1619         return -1;
1620
1621     if(priv->use_dr1 && priv->frame->opaque) {
1622         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1623     } else {
1624         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1625         av_image_copy(picref->data, picref->linesize,
1626                       priv->frame->data, priv->frame->linesize,
1627                       picref->format, link->w, link->h);
1628     }
1629     av_free_packet(&pkt);
1630
1631     avfilter_copy_frame_props(picref, priv->frame);
1632     picref->pts = pts;
1633
1634     avfilter_start_frame(link, picref);
1635     avfilter_draw_slice(link, 0, link->h, 1);
1636     avfilter_end_frame(link);
1637
1638     return 0;
1639 }
1640
1641 static int input_query_formats(AVFilterContext *ctx)
1642 {
1643     FilterPriv *priv = ctx->priv;
1644     enum PixelFormat pix_fmts[] = {
1645         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1646     };
1647
1648     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1649     return 0;
1650 }
1651
1652 static int input_config_props(AVFilterLink *link)
1653 {
1654     FilterPriv *priv  = link->src->priv;
1655     AVCodecContext *c = priv->is->video_st->codec;
1656
1657     link->w = c->width;
1658     link->h = c->height;
1659     link->time_base = priv->is->video_st->time_base;
1660
1661     return 0;
1662 }
1663
1664 static AVFilter input_filter =
1665 {
1666     .name      = "ffplay_input",
1667
1668     .priv_size = sizeof(FilterPriv),
1669
1670     .init      = input_init,
1671     .uninit    = input_uninit,
1672
1673     .query_formats = input_query_formats,
1674
1675     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1676     .outputs   = (AVFilterPad[]) {{ .name = "default",
1677                                     .type = AVMEDIA_TYPE_VIDEO,
1678                                     .request_frame = input_request_frame,
1679                                     .config_props  = input_config_props, },
1680                                   { .name = NULL }},
1681 };
1682
1683 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1684 {
1685     char sws_flags_str[128];
1686     int ret;
1687     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1688     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1689     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1690     graph->scale_sws_opts = av_strdup(sws_flags_str);
1691
1692     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1693                                             NULL, is, graph)) < 0)
1694         goto the_end;
1695     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1696                                             NULL, pix_fmts, graph)) < 0)
1697         goto the_end;
1698
1699     if(vfilters) {
1700         AVFilterInOut *outputs = avfilter_inout_alloc();
1701         AVFilterInOut *inputs  = avfilter_inout_alloc();
1702
1703         outputs->name    = av_strdup("in");
1704         outputs->filter_ctx = filt_src;
1705         outputs->pad_idx = 0;
1706         outputs->next    = NULL;
1707
1708         inputs->name    = av_strdup("out");
1709         inputs->filter_ctx = filt_out;
1710         inputs->pad_idx = 0;
1711         inputs->next    = NULL;
1712
1713         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1714             goto the_end;
1715         av_freep(&vfilters);
1716     } else {
1717         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1718             goto the_end;
1719     }
1720
1721     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1722         goto the_end;
1723
1724     is->out_video_filter = filt_out;
1725 the_end:
1726     return ret;
1727 }
1728
1729 #endif  /* CONFIG_AVFILTER */
1730
1731 static int video_thread(void *arg)
1732 {
1733     VideoState *is = arg;
1734     AVFrame *frame= avcodec_alloc_frame();
1735     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1736     double pts;
1737     int ret;
1738
1739 #if CONFIG_AVFILTER
1740     AVFilterGraph *graph = avfilter_graph_alloc();
1741     AVFilterContext *filt_out = NULL;
1742
1743     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1744         goto the_end;
1745     filt_out = is->out_video_filter;
1746 #endif
1747
1748     for(;;) {
1749 #if !CONFIG_AVFILTER
1750         AVPacket pkt;
1751 #else
1752         AVFilterBufferRef *picref;
1753         AVRational tb = filt_out->inputs[0]->time_base;
1754 #endif
1755         while (is->paused && !is->videoq.abort_request)
1756             SDL_Delay(10);
1757 #if CONFIG_AVFILTER
1758         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1759         if (picref) {
1760             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1761             pts_int = picref->pts;
1762             pos     = picref->pos;
1763             frame->opaque = picref;
1764         }
1765
1766         if (av_cmp_q(tb, is->video_st->time_base)) {
1767             av_unused int64_t pts1 = pts_int;
1768             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1769             av_dlog(NULL, "video_thread(): "
1770                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1771                     tb.num, tb.den, pts1,
1772                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1773         }
1774 #else
1775         ret = get_video_frame(is, frame, &pts_int, &pkt);
1776         pos = pkt.pos;
1777         av_free_packet(&pkt);
1778 #endif
1779
1780         if (ret < 0) goto the_end;
1781
1782         if (!picref)
1783             continue;
1784
1785         pts = pts_int*av_q2d(is->video_st->time_base);
1786
1787         ret = queue_picture(is, frame, pts, pos);
1788
1789         if (ret < 0)
1790             goto the_end;
1791
1792         if (step)
1793             if (cur_stream)
1794                 stream_toggle_pause(cur_stream);
1795     }
1796  the_end:
1797 #if CONFIG_AVFILTER
1798     avfilter_graph_free(&graph);
1799 #endif
1800     av_free(frame);
1801     return 0;
1802 }
1803
1804 static int subtitle_thread(void *arg)
1805 {
1806     VideoState *is = arg;
1807     SubPicture *sp;
1808     AVPacket pkt1, *pkt = &pkt1;
1809     int len1 av_unused, got_subtitle;
1810     double pts;
1811     int i, j;
1812     int r, g, b, y, u, v, a;
1813
1814     for(;;) {
1815         while (is->paused && !is->subtitleq.abort_request) {
1816             SDL_Delay(10);
1817         }
1818         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1819             break;
1820
1821         if(pkt->data == flush_pkt.data){
1822             avcodec_flush_buffers(is->subtitle_st->codec);
1823             continue;
1824         }
1825         SDL_LockMutex(is->subpq_mutex);
1826         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1827                !is->subtitleq.abort_request) {
1828             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1829         }
1830         SDL_UnlockMutex(is->subpq_mutex);
1831
1832         if (is->subtitleq.abort_request)
1833             goto the_end;
1834
1835         sp = &is->subpq[is->subpq_windex];
1836
1837        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1838            this packet, if any */
1839         pts = 0;
1840         if (pkt->pts != AV_NOPTS_VALUE)
1841             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1842
1843         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1844                                     &sp->sub, &got_subtitle,
1845                                     pkt);
1846         if (got_subtitle && sp->sub.format == 0) {
1847             sp->pts = pts;
1848
1849             for (i = 0; i < sp->sub.num_rects; i++)
1850             {
1851                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1852                 {
1853                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1854                     y = RGB_TO_Y_CCIR(r, g, b);
1855                     u = RGB_TO_U_CCIR(r, g, b, 0);
1856                     v = RGB_TO_V_CCIR(r, g, b, 0);
1857                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1858                 }
1859             }
1860
1861             /* now we can update the picture count */
1862             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1863                 is->subpq_windex = 0;
1864             SDL_LockMutex(is->subpq_mutex);
1865             is->subpq_size++;
1866             SDL_UnlockMutex(is->subpq_mutex);
1867         }
1868         av_free_packet(pkt);
1869     }
1870  the_end:
1871     return 0;
1872 }
1873
1874 /* copy samples for viewing in editor window */
1875 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1876 {
1877     int size, len;
1878
1879     size = samples_size / sizeof(short);
1880     while (size > 0) {
1881         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1882         if (len > size)
1883             len = size;
1884         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1885         samples += len;
1886         is->sample_array_index += len;
1887         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1888             is->sample_array_index = 0;
1889         size -= len;
1890     }
1891 }
1892
1893 /* return the new audio buffer size (samples can be added or deleted
1894    to get better sync if video or external master clock) */
1895 static int synchronize_audio(VideoState *is, short *samples,
1896                              int samples_size1, double pts)
1897 {
1898     int n, samples_size;
1899     double ref_clock;
1900
1901     n = 2 * is->audio_st->codec->channels;
1902     samples_size = samples_size1;
1903
1904     /* if not master, then we try to remove or add samples to correct the clock */
1905     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1906          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1907         double diff, avg_diff;
1908         int wanted_size, min_size, max_size, nb_samples;
1909
1910         ref_clock = get_master_clock(is);
1911         diff = get_audio_clock(is) - ref_clock;
1912
1913         if (diff < AV_NOSYNC_THRESHOLD) {
1914             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1915             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1916                 /* not enough measures to have a correct estimate */
1917                 is->audio_diff_avg_count++;
1918             } else {
1919                 /* estimate the A-V difference */
1920                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1921
1922                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1923                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1924                     nb_samples = samples_size / n;
1925
1926                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1927                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1928                     if (wanted_size < min_size)
1929                         wanted_size = min_size;
1930                     else if (wanted_size > max_size)
1931                         wanted_size = max_size;
1932
1933                     /* add or remove samples to correction the synchro */
1934                     if (wanted_size < samples_size) {
1935                         /* remove samples */
1936                         samples_size = wanted_size;
1937                     } else if (wanted_size > samples_size) {
1938                         uint8_t *samples_end, *q;
1939                         int nb;
1940
1941                         /* add samples */
1942                         nb = (samples_size - wanted_size);
1943                         samples_end = (uint8_t *)samples + samples_size - n;
1944                         q = samples_end + n;
1945                         while (nb > 0) {
1946                             memcpy(q, samples_end, n);
1947                             q += n;
1948                             nb -= n;
1949                         }
1950                         samples_size = wanted_size;
1951                     }
1952                 }
1953 #if 0
1954                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1955                        diff, avg_diff, samples_size - samples_size1,
1956                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1957 #endif
1958             }
1959         } else {
1960             /* too big difference : may be initial PTS errors, so
1961                reset A-V filter */
1962             is->audio_diff_avg_count = 0;
1963             is->audio_diff_cum = 0;
1964         }
1965     }
1966
1967     return samples_size;
1968 }
1969
1970 /* decode one audio frame and returns its uncompressed size */
1971 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1972 {
1973     AVPacket *pkt_temp = &is->audio_pkt_temp;
1974     AVPacket *pkt = &is->audio_pkt;
1975     AVCodecContext *dec= is->audio_st->codec;
1976     int n, len1, data_size;
1977     double pts;
1978
1979     for(;;) {
1980         /* NOTE: the audio packet can contain several frames */
1981         while (pkt_temp->size > 0) {
1982             data_size = sizeof(is->audio_buf1);
1983             len1 = avcodec_decode_audio3(dec,
1984                                         (int16_t *)is->audio_buf1, &data_size,
1985                                         pkt_temp);
1986             if (len1 < 0) {
1987                 /* if error, we skip the frame */
1988                 pkt_temp->size = 0;
1989                 break;
1990             }
1991
1992             pkt_temp->data += len1;
1993             pkt_temp->size -= len1;
1994             if (data_size <= 0)
1995                 continue;
1996
1997             if (dec->sample_fmt != is->audio_src_fmt) {
1998                 if (is->reformat_ctx)
1999                     av_audio_convert_free(is->reformat_ctx);
2000                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2001                                                          dec->sample_fmt, 1, NULL, 0);
2002                 if (!is->reformat_ctx) {
2003                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2004                         av_get_sample_fmt_name(dec->sample_fmt),
2005                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2006                         break;
2007                 }
2008                 is->audio_src_fmt= dec->sample_fmt;
2009             }
2010
2011             if (is->reformat_ctx) {
2012                 const void *ibuf[6]= {is->audio_buf1};
2013                 void *obuf[6]= {is->audio_buf2};
2014                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2015                 int ostride[6]= {2};
2016                 int len= data_size/istride[0];
2017                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2018                     printf("av_audio_convert() failed\n");
2019                     break;
2020                 }
2021                 is->audio_buf= is->audio_buf2;
2022                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2023                           remove this legacy cruft */
2024                 data_size= len*2;
2025             }else{
2026                 is->audio_buf= is->audio_buf1;
2027             }
2028
2029             /* if no pts, then compute it */
2030             pts = is->audio_clock;
2031             *pts_ptr = pts;
2032             n = 2 * dec->channels;
2033             is->audio_clock += (double)data_size /
2034                 (double)(n * dec->sample_rate);
2035 #ifdef DEBUG
2036             {
2037                 static double last_clock;
2038                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2039                        is->audio_clock - last_clock,
2040                        is->audio_clock, pts);
2041                 last_clock = is->audio_clock;
2042             }
2043 #endif
2044             return data_size;
2045         }
2046
2047         /* free the current packet */
2048         if (pkt->data)
2049             av_free_packet(pkt);
2050
2051         if (is->paused || is->audioq.abort_request) {
2052             return -1;
2053         }
2054
2055         /* read next packet */
2056         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2057             return -1;
2058         if(pkt->data == flush_pkt.data){
2059             avcodec_flush_buffers(dec);
2060             continue;
2061         }
2062
2063         pkt_temp->data = pkt->data;
2064         pkt_temp->size = pkt->size;
2065
2066         /* if update the audio clock with the pts */
2067         if (pkt->pts != AV_NOPTS_VALUE) {
2068             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2069         }
2070     }
2071 }
2072
2073 /* prepare a new audio buffer */
2074 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2075 {
2076     VideoState *is = opaque;
2077     int audio_size, len1;
2078     double pts;
2079
2080     audio_callback_time = av_gettime();
2081
2082     while (len > 0) {
2083         if (is->audio_buf_index >= is->audio_buf_size) {
2084            audio_size = audio_decode_frame(is, &pts);
2085            if (audio_size < 0) {
2086                 /* if error, just output silence */
2087                is->audio_buf = is->audio_buf1;
2088                is->audio_buf_size = 1024;
2089                memset(is->audio_buf, 0, is->audio_buf_size);
2090            } else {
2091                if (is->show_mode != SHOW_MODE_VIDEO)
2092                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2093                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2094                                               pts);
2095                is->audio_buf_size = audio_size;
2096            }
2097            is->audio_buf_index = 0;
2098         }
2099         len1 = is->audio_buf_size - is->audio_buf_index;
2100         if (len1 > len)
2101             len1 = len;
2102         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2103         len -= len1;
2104         stream += len1;
2105         is->audio_buf_index += len1;
2106     }
2107 }
2108
2109 /* open a given stream. Return 0 if OK */
2110 static int stream_component_open(VideoState *is, int stream_index)
2111 {
2112     AVFormatContext *ic = is->ic;
2113     AVCodecContext *avctx;
2114     AVCodec *codec;
2115     SDL_AudioSpec wanted_spec, spec;
2116
2117     if (stream_index < 0 || stream_index >= ic->nb_streams)
2118         return -1;
2119     avctx = ic->streams[stream_index]->codec;
2120
2121     /* prepare audio output */
2122     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2123         if (avctx->channels > 0) {
2124             avctx->request_channels = FFMIN(2, avctx->channels);
2125         } else {
2126             avctx->request_channels = 2;
2127         }
2128     }
2129
2130     codec = avcodec_find_decoder(avctx->codec_id);
2131     if (!codec)
2132         return -1;
2133
2134     avctx->workaround_bugs = workaround_bugs;
2135     avctx->lowres = lowres;
2136     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2137     avctx->idct_algo= idct;
2138     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2139     avctx->skip_frame= skip_frame;
2140     avctx->skip_idct= skip_idct;
2141     avctx->skip_loop_filter= skip_loop_filter;
2142     avctx->error_recognition= error_recognition;
2143     avctx->error_concealment= error_concealment;
2144     avctx->thread_count= thread_count;
2145
2146     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2147
2148     if(codec->capabilities & CODEC_CAP_DR1)
2149         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2150
2151     if (avcodec_open(avctx, codec) < 0)
2152         return -1;
2153
2154     /* prepare audio output */
2155     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2156         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2157             fprintf(stderr, "Invalid sample rate or channel count\n");
2158             return -1;
2159         }
2160         wanted_spec.freq = avctx->sample_rate;
2161         wanted_spec.format = AUDIO_S16SYS;
2162         wanted_spec.channels = avctx->channels;
2163         wanted_spec.silence = 0;
2164         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2165         wanted_spec.callback = sdl_audio_callback;
2166         wanted_spec.userdata = is;
2167         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2168             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2169             return -1;
2170         }
2171         is->audio_hw_buf_size = spec.size;
2172         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2173     }
2174
2175     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2176     switch(avctx->codec_type) {
2177     case AVMEDIA_TYPE_AUDIO:
2178         is->audio_stream = stream_index;
2179         is->audio_st = ic->streams[stream_index];
2180         is->audio_buf_size = 0;
2181         is->audio_buf_index = 0;
2182
2183         /* init averaging filter */
2184         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2185         is->audio_diff_avg_count = 0;
2186         /* since we do not have a precise anough audio fifo fullness,
2187            we correct audio sync only if larger than this threshold */
2188         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2189
2190         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2191         packet_queue_init(&is->audioq);
2192         SDL_PauseAudio(0);
2193         break;
2194     case AVMEDIA_TYPE_VIDEO:
2195         is->video_stream = stream_index;
2196         is->video_st = ic->streams[stream_index];
2197
2198         packet_queue_init(&is->videoq);
2199         is->video_tid = SDL_CreateThread(video_thread, is);
2200         break;
2201     case AVMEDIA_TYPE_SUBTITLE:
2202         is->subtitle_stream = stream_index;
2203         is->subtitle_st = ic->streams[stream_index];
2204         packet_queue_init(&is->subtitleq);
2205
2206         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2207         break;
2208     default:
2209         break;
2210     }
2211     return 0;
2212 }
2213
2214 static void stream_component_close(VideoState *is, int stream_index)
2215 {
2216     AVFormatContext *ic = is->ic;
2217     AVCodecContext *avctx;
2218
2219     if (stream_index < 0 || stream_index >= ic->nb_streams)
2220         return;
2221     avctx = ic->streams[stream_index]->codec;
2222
2223     switch(avctx->codec_type) {
2224     case AVMEDIA_TYPE_AUDIO:
2225         packet_queue_abort(&is->audioq);
2226
2227         SDL_CloseAudio();
2228
2229         packet_queue_end(&is->audioq);
2230         if (is->reformat_ctx)
2231             av_audio_convert_free(is->reformat_ctx);
2232         is->reformat_ctx = NULL;
2233         break;
2234     case AVMEDIA_TYPE_VIDEO:
2235         packet_queue_abort(&is->videoq);
2236
2237         /* note: we also signal this mutex to make sure we deblock the
2238            video thread in all cases */
2239         SDL_LockMutex(is->pictq_mutex);
2240         SDL_CondSignal(is->pictq_cond);
2241         SDL_UnlockMutex(is->pictq_mutex);
2242
2243         SDL_WaitThread(is->video_tid, NULL);
2244
2245         packet_queue_end(&is->videoq);
2246         break;
2247     case AVMEDIA_TYPE_SUBTITLE:
2248         packet_queue_abort(&is->subtitleq);
2249
2250         /* note: we also signal this mutex to make sure we deblock the
2251            video thread in all cases */
2252         SDL_LockMutex(is->subpq_mutex);
2253         is->subtitle_stream_changed = 1;
2254
2255         SDL_CondSignal(is->subpq_cond);
2256         SDL_UnlockMutex(is->subpq_mutex);
2257
2258         SDL_WaitThread(is->subtitle_tid, NULL);
2259
2260         packet_queue_end(&is->subtitleq);
2261         break;
2262     default:
2263         break;
2264     }
2265
2266     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2267     avcodec_close(avctx);
2268     switch(avctx->codec_type) {
2269     case AVMEDIA_TYPE_AUDIO:
2270         is->audio_st = NULL;
2271         is->audio_stream = -1;
2272         break;
2273     case AVMEDIA_TYPE_VIDEO:
2274         is->video_st = NULL;
2275         is->video_stream = -1;
2276         break;
2277     case AVMEDIA_TYPE_SUBTITLE:
2278         is->subtitle_st = NULL;
2279         is->subtitle_stream = -1;
2280         break;
2281     default:
2282         break;
2283     }
2284 }
2285
2286 /* since we have only one decoding thread, we can use a global
2287    variable instead of a thread local variable */
2288 static VideoState *global_video_state;
2289
2290 static int decode_interrupt_cb(void)
2291 {
2292     return (global_video_state && global_video_state->abort_request);
2293 }
2294
2295 /* this thread gets the stream from the disk or the network */
2296 static int read_thread(void *arg)
2297 {
2298     VideoState *is = arg;
2299     AVFormatContext *ic = NULL;
2300     int err, i, ret;
2301     int st_index[AVMEDIA_TYPE_NB];
2302     AVPacket pkt1, *pkt = &pkt1;
2303     int eof=0;
2304     int pkt_in_play_range = 0;
2305     AVDictionaryEntry *t;
2306
2307     memset(st_index, -1, sizeof(st_index));
2308     is->video_stream = -1;
2309     is->audio_stream = -1;
2310     is->subtitle_stream = -1;
2311
2312     global_video_state = is;
2313     avio_set_interrupt_cb(decode_interrupt_cb);
2314
2315     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2316     if (err < 0) {
2317         print_error(is->filename, err);
2318         ret = -1;
2319         goto fail;
2320     }
2321     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2322         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2323         ret = AVERROR_OPTION_NOT_FOUND;
2324         goto fail;
2325     }
2326     is->ic = ic;
2327
2328     if(genpts)
2329         ic->flags |= AVFMT_FLAG_GENPTS;
2330
2331     err = av_find_stream_info(ic);
2332     if (err < 0) {
2333         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2334         ret = -1;
2335         goto fail;
2336     }
2337     if(ic->pb)
2338         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2339
2340     if(seek_by_bytes<0)
2341         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2342
2343     /* if seeking requested, we execute it */
2344     if (start_time != AV_NOPTS_VALUE) {
2345         int64_t timestamp;
2346
2347         timestamp = start_time;
2348         /* add the stream start time */
2349         if (ic->start_time != AV_NOPTS_VALUE)
2350             timestamp += ic->start_time;
2351         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2352         if (ret < 0) {
2353             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2354                     is->filename, (double)timestamp / AV_TIME_BASE);
2355         }
2356     }
2357
2358     for (i = 0; i < ic->nb_streams; i++)
2359         ic->streams[i]->discard = AVDISCARD_ALL;
2360     if (!video_disable)
2361         st_index[AVMEDIA_TYPE_VIDEO] =
2362             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2363                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2364     if (!audio_disable)
2365         st_index[AVMEDIA_TYPE_AUDIO] =
2366             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2367                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2368                                 st_index[AVMEDIA_TYPE_VIDEO],
2369                                 NULL, 0);
2370     if (!video_disable)
2371         st_index[AVMEDIA_TYPE_SUBTITLE] =
2372             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2373                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2374                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2375                                  st_index[AVMEDIA_TYPE_AUDIO] :
2376                                  st_index[AVMEDIA_TYPE_VIDEO]),
2377                                 NULL, 0);
2378     if (show_status) {
2379         av_dump_format(ic, 0, is->filename, 0);
2380     }
2381
2382     is->show_mode = show_mode;
2383
2384     /* open the streams */
2385     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2386         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2387     }
2388
2389     ret=-1;
2390     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2391         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2392     }
2393     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2394     if (is->show_mode == SHOW_MODE_NONE)
2395         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2396
2397     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2398         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2399     }
2400
2401     if (is->video_stream < 0 && is->audio_stream < 0) {
2402         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2403         ret = -1;
2404         goto fail;
2405     }
2406
2407     for(;;) {
2408         if (is->abort_request)
2409             break;
2410         if (is->paused != is->last_paused) {
2411             is->last_paused = is->paused;
2412             if (is->paused)
2413                 is->read_pause_return= av_read_pause(ic);
2414             else
2415                 av_read_play(ic);
2416         }
2417 #if CONFIG_RTSP_DEMUXER
2418         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2419             /* wait 10 ms to avoid trying to get another packet */
2420             /* XXX: horrible */
2421             SDL_Delay(10);
2422             continue;
2423         }
2424 #endif
2425         if (is->seek_req) {
2426             int64_t seek_target= is->seek_pos;
2427             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2428             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2429 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2430 //      of the seek_pos/seek_rel variables
2431
2432             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2433             if (ret < 0) {
2434                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2435             }else{
2436                 if (is->audio_stream >= 0) {
2437                     packet_queue_flush(&is->audioq);
2438                     packet_queue_put(&is->audioq, &flush_pkt);
2439                 }
2440                 if (is->subtitle_stream >= 0) {
2441                     packet_queue_flush(&is->subtitleq);
2442                     packet_queue_put(&is->subtitleq, &flush_pkt);
2443                 }
2444                 if (is->video_stream >= 0) {
2445                     packet_queue_flush(&is->videoq);
2446                     packet_queue_put(&is->videoq, &flush_pkt);
2447                 }
2448             }
2449             is->seek_req = 0;
2450             eof= 0;
2451         }
2452
2453         /* if the queue are full, no need to read more */
2454         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2455             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2456                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2457                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2458             /* wait 10 ms */
2459             SDL_Delay(10);
2460             continue;
2461         }
2462         if(eof) {
2463             if(is->video_stream >= 0){
2464                 av_init_packet(pkt);
2465                 pkt->data=NULL;
2466                 pkt->size=0;
2467                 pkt->stream_index= is->video_stream;
2468                 packet_queue_put(&is->videoq, pkt);
2469             }
2470             SDL_Delay(10);
2471             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2472                 if(loop!=1 && (!loop || --loop)){
2473                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2474                 }else if(autoexit){
2475                     ret=AVERROR_EOF;
2476                     goto fail;
2477                 }
2478             }
2479             eof=0;
2480             continue;
2481         }
2482         ret = av_read_frame(ic, pkt);
2483         if (ret < 0) {
2484             if (ret == AVERROR_EOF || url_feof(ic->pb))
2485                 eof=1;
2486             if (ic->pb && ic->pb->error)
2487                 break;
2488             SDL_Delay(100); /* wait for user event */
2489             continue;
2490         }
2491         /* check if packet is in play range specified by user, then queue, otherwise discard */
2492         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2493                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2494                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2495                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2496                 <= ((double)duration/1000000);
2497         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2498             packet_queue_put(&is->audioq, pkt);
2499         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2500             packet_queue_put(&is->videoq, pkt);
2501         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2502             packet_queue_put(&is->subtitleq, pkt);
2503         } else {
2504             av_free_packet(pkt);
2505         }
2506     }
2507     /* wait until the end */
2508     while (!is->abort_request) {
2509         SDL_Delay(100);
2510     }
2511
2512     ret = 0;
2513  fail:
2514     /* disable interrupting */
2515     global_video_state = NULL;
2516
2517     /* close each stream */
2518     if (is->audio_stream >= 0)
2519         stream_component_close(is, is->audio_stream);
2520     if (is->video_stream >= 0)
2521         stream_component_close(is, is->video_stream);
2522     if (is->subtitle_stream >= 0)
2523         stream_component_close(is, is->subtitle_stream);
2524     if (is->ic) {
2525         av_close_input_file(is->ic);
2526         is->ic = NULL; /* safety */
2527     }
2528     avio_set_interrupt_cb(NULL);
2529
2530     if (ret != 0) {
2531         SDL_Event event;
2532
2533         event.type = FF_QUIT_EVENT;
2534         event.user.data1 = is;
2535         SDL_PushEvent(&event);
2536     }
2537     return 0;
2538 }
2539
2540 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2541 {
2542     VideoState *is;
2543
2544     is = av_mallocz(sizeof(VideoState));
2545     if (!is)
2546         return NULL;
2547     av_strlcpy(is->filename, filename, sizeof(is->filename));
2548     is->iformat = iformat;
2549     is->ytop = 0;
2550     is->xleft = 0;
2551
2552     /* start video display */
2553     is->pictq_mutex = SDL_CreateMutex();
2554     is->pictq_cond = SDL_CreateCond();
2555
2556     is->subpq_mutex = SDL_CreateMutex();
2557     is->subpq_cond = SDL_CreateCond();
2558
2559     is->av_sync_type = av_sync_type;
2560     is->read_tid = SDL_CreateThread(read_thread, is);
2561     if (!is->read_tid) {
2562         av_free(is);
2563         return NULL;
2564     }
2565     return is;
2566 }
2567
2568 static void stream_cycle_channel(VideoState *is, int codec_type)
2569 {
2570     AVFormatContext *ic = is->ic;
2571     int start_index, stream_index;
2572     AVStream *st;
2573
2574     if (codec_type == AVMEDIA_TYPE_VIDEO)
2575         start_index = is->video_stream;
2576     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2577         start_index = is->audio_stream;
2578     else
2579         start_index = is->subtitle_stream;
2580     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2581         return;
2582     stream_index = start_index;
2583     for(;;) {
2584         if (++stream_index >= is->ic->nb_streams)
2585         {
2586             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2587             {
2588                 stream_index = -1;
2589                 goto the_end;
2590             } else
2591                 stream_index = 0;
2592         }
2593         if (stream_index == start_index)
2594             return;
2595         st = ic->streams[stream_index];
2596         if (st->codec->codec_type == codec_type) {
2597             /* check that parameters are OK */
2598             switch(codec_type) {
2599             case AVMEDIA_TYPE_AUDIO:
2600                 if (st->codec->sample_rate != 0 &&
2601                     st->codec->channels != 0)
2602                     goto the_end;
2603                 break;
2604             case AVMEDIA_TYPE_VIDEO:
2605             case AVMEDIA_TYPE_SUBTITLE:
2606                 goto the_end;
2607             default:
2608                 break;
2609             }
2610         }
2611     }
2612  the_end:
2613     stream_component_close(is, start_index);
2614     stream_component_open(is, stream_index);
2615 }
2616
2617
2618 static void toggle_full_screen(void)
2619 {
2620     is_full_screen = !is_full_screen;
2621     video_open(cur_stream);
2622 }
2623
2624 static void toggle_pause(void)
2625 {
2626     if (cur_stream)
2627         stream_toggle_pause(cur_stream);
2628     step = 0;
2629 }
2630
2631 static void step_to_next_frame(void)
2632 {
2633     if (cur_stream) {
2634         /* if the stream is paused unpause it, then step */
2635         if (cur_stream->paused)
2636             stream_toggle_pause(cur_stream);
2637     }
2638     step = 1;
2639 }
2640
2641 static void toggle_audio_display(void)
2642 {
2643     if (cur_stream) {
2644         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2645         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2646         fill_rectangle(screen,
2647                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2648                     bgcolor);
2649         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2650     }
2651 }
2652
2653 /* handle an event sent by the GUI */
2654 static void event_loop(void)
2655 {
2656     SDL_Event event;
2657     double incr, pos, frac;
2658
2659     for(;;) {
2660         double x;
2661         SDL_WaitEvent(&event);
2662         switch(event.type) {
2663         case SDL_KEYDOWN:
2664             if (exit_on_keydown) {
2665                 do_exit();
2666                 break;
2667             }
2668             switch(event.key.keysym.sym) {
2669             case SDLK_ESCAPE:
2670             case SDLK_q:
2671                 do_exit();
2672                 break;
2673             case SDLK_f:
2674                 toggle_full_screen();
2675                 break;
2676             case SDLK_p:
2677             case SDLK_SPACE:
2678                 toggle_pause();
2679                 break;
2680             case SDLK_s: //S: Step to next frame
2681                 step_to_next_frame();
2682                 break;
2683             case SDLK_a:
2684                 if (cur_stream)
2685                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2686                 break;
2687             case SDLK_v:
2688                 if (cur_stream)
2689                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2690                 break;
2691             case SDLK_t:
2692                 if (cur_stream)
2693                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2694                 break;
2695             case SDLK_w:
2696                 toggle_audio_display();
2697                 break;
2698             case SDLK_LEFT:
2699                 incr = -10.0;
2700                 goto do_seek;
2701             case SDLK_RIGHT:
2702                 incr = 10.0;
2703                 goto do_seek;
2704             case SDLK_UP:
2705                 incr = 60.0;
2706                 goto do_seek;
2707             case SDLK_DOWN:
2708                 incr = -60.0;
2709             do_seek:
2710                 if (cur_stream) {
2711                     if (seek_by_bytes) {
2712                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2713                             pos= cur_stream->video_current_pos;
2714                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2715                             pos= cur_stream->audio_pkt.pos;
2716                         }else
2717                             pos = avio_tell(cur_stream->ic->pb);
2718                         if (cur_stream->ic->bit_rate)
2719                             incr *= cur_stream->ic->bit_rate / 8.0;
2720                         else
2721                             incr *= 180000.0;
2722                         pos += incr;
2723                         stream_seek(cur_stream, pos, incr, 1);
2724                     } else {
2725                         pos = get_master_clock(cur_stream);
2726                         pos += incr;
2727                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2728                     }
2729                 }
2730                 break;
2731             default:
2732                 break;
2733             }
2734             break;
2735         case SDL_MOUSEBUTTONDOWN:
2736             if (exit_on_mousedown) {
2737                 do_exit();
2738                 break;
2739             }
2740         case SDL_MOUSEMOTION:
2741             if(event.type ==SDL_MOUSEBUTTONDOWN){
2742                 x= event.button.x;
2743             }else{
2744                 if(event.motion.state != SDL_PRESSED)
2745                     break;
2746                 x= event.motion.x;
2747             }
2748             if (cur_stream) {
2749                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2750                     uint64_t size=  avio_size(cur_stream->ic->pb);
2751                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2752                 }else{
2753                     int64_t ts;
2754                     int ns, hh, mm, ss;
2755                     int tns, thh, tmm, tss;
2756                     tns = cur_stream->ic->duration/1000000LL;
2757                     thh = tns/3600;
2758                     tmm = (tns%3600)/60;
2759                     tss = (tns%60);
2760                     frac = x/cur_stream->width;
2761                     ns = frac*tns;
2762                     hh = ns/3600;
2763                     mm = (ns%3600)/60;
2764                     ss = (ns%60);
2765                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2766                             hh, mm, ss, thh, tmm, tss);
2767                     ts = frac*cur_stream->ic->duration;
2768                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2769                         ts += cur_stream->ic->start_time;
2770                     stream_seek(cur_stream, ts, 0, 0);
2771                 }
2772             }
2773             break;
2774         case SDL_VIDEORESIZE:
2775             if (cur_stream) {
2776                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2777                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2778                 screen_width = cur_stream->width = event.resize.w;
2779                 screen_height= cur_stream->height= event.resize.h;
2780             }
2781             break;
2782         case SDL_QUIT:
2783         case FF_QUIT_EVENT:
2784             do_exit();
2785             break;
2786         case FF_ALLOC_EVENT:
2787             video_open(event.user.data1);
2788             alloc_picture(event.user.data1);
2789             break;
2790         case FF_REFRESH_EVENT:
2791             video_refresh(event.user.data1);
2792             cur_stream->refresh=0;
2793             break;
2794         default:
2795             break;
2796         }
2797     }
2798 }
2799
2800 static int opt_frame_size(const char *opt, const char *arg)
2801 {
2802     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2803         fprintf(stderr, "Incorrect frame size\n");
2804         return AVERROR(EINVAL);
2805     }
2806     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2807         fprintf(stderr, "Frame size must be a multiple of 2\n");
2808         return AVERROR(EINVAL);
2809     }
2810     return 0;
2811 }
2812
2813 static int opt_width(const char *opt, const char *arg)
2814 {
2815     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2816     return 0;
2817 }
2818
2819 static int opt_height(const char *opt, const char *arg)
2820 {
2821     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2822     return 0;
2823 }
2824
2825 static int opt_format(const char *opt, const char *arg)
2826 {
2827     file_iformat = av_find_input_format(arg);
2828     if (!file_iformat) {
2829         fprintf(stderr, "Unknown input format: %s\n", arg);
2830         return AVERROR(EINVAL);
2831     }
2832     return 0;
2833 }
2834
2835 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2836 {
2837     frame_pix_fmt = av_get_pix_fmt(arg);
2838     return 0;
2839 }
2840
2841 static int opt_sync(const char *opt, const char *arg)
2842 {
2843     if (!strcmp(arg, "audio"))
2844         av_sync_type = AV_SYNC_AUDIO_MASTER;
2845     else if (!strcmp(arg, "video"))
2846         av_sync_type = AV_SYNC_VIDEO_MASTER;
2847     else if (!strcmp(arg, "ext"))
2848         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2849     else {
2850         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2851         exit(1);
2852     }
2853     return 0;
2854 }
2855
2856 static int opt_seek(const char *opt, const char *arg)
2857 {
2858     start_time = parse_time_or_die(opt, arg, 1);
2859     return 0;
2860 }
2861
2862 static int opt_duration(const char *opt, const char *arg)
2863 {
2864     duration = parse_time_or_die(opt, arg, 1);
2865     return 0;
2866 }
2867
2868 static int opt_thread_count(const char *opt, const char *arg)
2869 {
2870     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2871 #if !HAVE_THREADS
2872     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2873 #endif
2874     return 0;
2875 }
2876
2877 static int opt_show_mode(const char *opt, const char *arg)
2878 {
2879     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2880                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2881                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2882                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2883     return 0;
2884 }
2885
2886 static int opt_input_file(const char *opt, const char *filename)
2887 {
2888     if (input_filename) {
2889         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2890                 filename, input_filename);
2891         exit(1);
2892     }
2893     if (!strcmp(filename, "-"))
2894         filename = "pipe:";
2895     input_filename = filename;
2896     return 0;
2897 }
2898
2899 static const OptionDef options[] = {
2900 #include "cmdutils_common_opts.h"
2901     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2902     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2903     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2904     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2905     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2906     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2907     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2908     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2909     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2910     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2911     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2912     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2913     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2914     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2915     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2916     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2917     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2918     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2919     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2920     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2921     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2922     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2923     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2924     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2925     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2926     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2927     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2928     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2929     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2930     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2931     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2932     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2933     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2934     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2935     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2936 #if CONFIG_AVFILTER
2937     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2938 #endif
2939     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2940     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2941     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2942     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2943     { NULL, },
2944 };
2945
2946 static void show_usage(void)
2947 {
2948     printf("Simple media player\n");
2949     printf("usage: ffplay [options] input_file\n");
2950     printf("\n");
2951 }
2952
2953 static void show_help(void)
2954 {
2955     av_log_set_callback(log_callback_help);
2956     show_usage();
2957     show_help_options(options, "Main options:\n",
2958                       OPT_EXPERT, 0);
2959     show_help_options(options, "\nAdvanced options:\n",
2960                       OPT_EXPERT, OPT_EXPERT);
2961     printf("\n");
2962     av_opt_show2(avcodec_opts[0], NULL,
2963                  AV_OPT_FLAG_DECODING_PARAM, 0);
2964     printf("\n");
2965     av_opt_show2(avformat_opts, NULL,
2966                  AV_OPT_FLAG_DECODING_PARAM, 0);
2967 #if !CONFIG_AVFILTER
2968     printf("\n");
2969     av_opt_show2(sws_opts, NULL,
2970                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2971 #endif
2972     printf("\nWhile playing:\n"
2973            "q, ESC              quit\n"
2974            "f                   toggle full screen\n"
2975            "p, SPC              pause\n"
2976            "a                   cycle audio channel\n"
2977            "v                   cycle video channel\n"
2978            "t                   cycle subtitle channel\n"
2979            "w                   show audio waves\n"
2980            "s                   activate frame-step mode\n"
2981            "left/right          seek backward/forward 10 seconds\n"
2982            "down/up             seek backward/forward 1 minute\n"
2983            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2984            );
2985 }
2986
2987 /* Called from the main */
2988 int main(int argc, char **argv)
2989 {
2990     int flags;
2991
2992     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2993
2994     /* register all codecs, demux and protocols */
2995     avcodec_register_all();
2996 #if CONFIG_AVDEVICE
2997     avdevice_register_all();
2998 #endif
2999 #if CONFIG_AVFILTER
3000     avfilter_register_all();
3001 #endif
3002     av_register_all();
3003
3004     init_opts();
3005
3006     show_banner();
3007
3008     parse_options(argc, argv, options, opt_input_file);
3009
3010     if (!input_filename) {
3011         show_usage();
3012         fprintf(stderr, "An input file must be specified\n");
3013         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3014         exit(1);
3015     }
3016
3017     if (display_disable) {
3018         video_disable = 1;
3019     }
3020     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3021     if (audio_disable)
3022         flags &= ~SDL_INIT_AUDIO;
3023 #if !defined(__MINGW32__) && !defined(__APPLE__)
3024     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3025 #endif
3026     if (SDL_Init (flags)) {
3027         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3028         exit(1);
3029     }
3030
3031     if (!display_disable) {
3032 #if HAVE_SDL_VIDEO_SIZE
3033         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3034         fs_screen_width = vi->current_w;
3035         fs_screen_height = vi->current_h;
3036 #endif
3037     }
3038
3039     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3040     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3041     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3042
3043     av_init_packet(&flush_pkt);
3044     flush_pkt.data= "FLUSH";
3045
3046     cur_stream = stream_open(input_filename, file_iformat);
3047
3048     event_loop();
3049
3050     /* never returns */
3051
3052     return 0;
3053 }