]> git.sesse.net Git - ffmpeg/blob - ffplay.c
lavdev: include libavformat/avformat.h in avdevice.h
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG
62 //#define DEBUG_SYNC
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2*65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///<presentation time stamp for this picture
104     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105     int64_t pos;                                 ///<byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *read_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     /* samples output by the codec. we reserve more space for avsync
158        compensation */
159     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     uint8_t *audio_buf;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     AVPacket audio_pkt_temp;
165     AVPacket audio_pkt;
166     enum AVSampleFormat audio_src_fmt;
167     AVAudioConvert *reformat_ctx;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int frame_width = 0;
231 static int frame_height = 0;
232 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB]={
236     [AVMEDIA_TYPE_AUDIO]=-1,
237     [AVMEDIA_TYPE_VIDEO]=-1,
238     [AVMEDIA_TYPE_SUBTITLE]=-1,
239 };
240 static int seek_by_bytes=-1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int exit_on_keydown;
263 static int exit_on_mousedown;
264 static int loop=1;
265 static int framedrop=1;
266 static enum ShowMode show_mode = SHOW_MODE_NONE;
267
268 static int rdftspeed=20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
287 {
288     AVPacketList *pkt1;
289
290     /* duplicate the packet */
291     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
292         return -1;
293
294     pkt1 = av_malloc(sizeof(AVPacketList));
295     if (!pkt1)
296         return -1;
297     pkt1->pkt = *pkt;
298     pkt1->next = NULL;
299
300
301     SDL_LockMutex(q->mutex);
302
303     if (!q->last_pkt)
304
305         q->first_pkt = pkt1;
306     else
307         q->last_pkt->next = pkt1;
308     q->last_pkt = pkt1;
309     q->nb_packets++;
310     q->size += pkt1->pkt.size + sizeof(*pkt1);
311     /* XXX: should duplicate packet data in DV case */
312     SDL_CondSignal(q->cond);
313
314     SDL_UnlockMutex(q->mutex);
315     return 0;
316 }
317
318 /* packet queue handling */
319 static void packet_queue_init(PacketQueue *q)
320 {
321     memset(q, 0, sizeof(PacketQueue));
322     q->mutex = SDL_CreateMutex();
323     q->cond = SDL_CreateCond();
324     packet_queue_put(q, &flush_pkt);
325 }
326
327 static void packet_queue_flush(PacketQueue *q)
328 {
329     AVPacketList *pkt, *pkt1;
330
331     SDL_LockMutex(q->mutex);
332     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
333         pkt1 = pkt->next;
334         av_free_packet(&pkt->pkt);
335         av_freep(&pkt);
336     }
337     q->last_pkt = NULL;
338     q->first_pkt = NULL;
339     q->nb_packets = 0;
340     q->size = 0;
341     SDL_UnlockMutex(q->mutex);
342 }
343
344 static void packet_queue_end(PacketQueue *q)
345 {
346     packet_queue_flush(q);
347     SDL_DestroyMutex(q->mutex);
348     SDL_DestroyCond(q->cond);
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for(;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414     unsigned int v = ((const uint32_t *)(s))[0];\
415     a = (v >> 24) & 0xff;\
416     r = (v >> 16) & 0xff;\
417     g = (v >> 8) & 0xff;\
418     b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424     a = (val >> 24) & 0xff;\
425     y = (val >> 16) & 0xff;\
426     u = (val >> 8) & 0xff;\
427     v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 {
440     int wrap, wrap3, width2, skip2;
441     int y, u, v, a, u1, v1, a1, w, h;
442     uint8_t *lum, *cb, *cr;
443     const uint8_t *p;
444     const uint32_t *pal;
445     int dstx, dsty, dstw, dsth;
446
447     dstw = av_clip(rect->w, 0, imgw);
448     dsth = av_clip(rect->h, 0, imgh);
449     dstx = av_clip(rect->x, 0, imgw - dstw);
450     dsty = av_clip(rect->y, 0, imgh - dsth);
451     lum = dst->data[0] + dsty * dst->linesize[0];
452     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
453     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454
455     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
456     skip2 = dstx >> 1;
457     wrap = dst->linesize[0];
458     wrap3 = rect->pict.linesize[0];
459     p = rect->pict.data[0];
460     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
461
462     if (dsty & 1) {
463         lum += dstx;
464         cb += skip2;
465         cr += skip2;
466
467         if (dstx & 1) {
468             YUVA_IN(y, u, v, a, p, pal);
469             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
471             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
472             cb++;
473             cr++;
474             lum++;
475             p += BPP;
476         }
477         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
478             YUVA_IN(y, u, v, a, p, pal);
479             u1 = u;
480             v1 = v;
481             a1 = a;
482             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
484             YUVA_IN(y, u, v, a, p + BPP, pal);
485             u1 += u;
486             v1 += v;
487             a1 += a;
488             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
489             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
490             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
491             cb++;
492             cr++;
493             p += 2 * BPP;
494             lum += 2;
495         }
496         if (w) {
497             YUVA_IN(y, u, v, a, p, pal);
498             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
500             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
501             p++;
502             lum++;
503         }
504         p += wrap3 - dstw * BPP;
505         lum += wrap - dstw - dstx;
506         cb += dst->linesize[1] - width2 - skip2;
507         cr += dst->linesize[2] - width2 - skip2;
508     }
509     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
510         lum += dstx;
511         cb += skip2;
512         cr += skip2;
513
514         if (dstx & 1) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             p += wrap3;
521             lum += wrap;
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += -wrap3 + BPP;
532             lum += -wrap + 1;
533         }
534         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
535             YUVA_IN(y, u, v, a, p, pal);
536             u1 = u;
537             v1 = v;
538             a1 = a;
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541             YUVA_IN(y, u, v, a, p + BPP, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546             p += wrap3;
547             lum += wrap;
548
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555             YUVA_IN(y, u, v, a, p + BPP, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560
561             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
562             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563
564             cb++;
565             cr++;
566             p += -wrap3 + 2 * BPP;
567             lum += -wrap + 2;
568         }
569         if (w) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             p += wrap3;
576             lum += wrap;
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 += u;
579             v1 += v;
580             a1 += a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
583             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
584             cb++;
585             cr++;
586             p += -wrap3 + BPP;
587             lum += -wrap + 1;
588         }
589         p += wrap3 + (wrap3 - dstw * BPP);
590         lum += wrap + (wrap - dstw - dstx);
591         cb += dst->linesize[1] - width2 - skip2;
592         cr += dst->linesize[2] - width2 - skip2;
593     }
594     /* handle odd height */
595     if (h) {
596         lum += dstx;
597         cb += skip2;
598         cr += skip2;
599
600         if (dstx & 1) {
601             YUVA_IN(y, u, v, a, p, pal);
602             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605             cb++;
606             cr++;
607             lum++;
608             p += BPP;
609         }
610         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
617             YUVA_IN(y, u, v, a, p + BPP, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
624             cb++;
625             cr++;
626             p += 2 * BPP;
627             lum += 2;
628         }
629         if (w) {
630             YUVA_IN(y, u, v, a, p, pal);
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
633             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634         }
635     }
636 }
637
638 static void free_subpicture(SubPicture *sp)
639 {
640     avsubtitle_free(&sp->sub);
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645     VideoPicture *vp;
646     SubPicture *sp;
647     AVPicture pict;
648     float aspect_ratio;
649     int width, height, x, y;
650     SDL_Rect rect;
651     int i;
652
653     vp = &is->pictq[is->pictq_rindex];
654     if (vp->bmp) {
655 #if CONFIG_AVFILTER
656          if (vp->picref->video->sample_aspect_ratio.num == 0)
657              aspect_ratio = 0;
658          else
659              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
660 #else
661
662         /* XXX: use variable in the frame */
663         if (is->video_st->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665         else if (is->video_st->codec->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667         else
668             aspect_ratio = 0;
669 #endif
670         if (aspect_ratio <= 0.0)
671             aspect_ratio = 1.0;
672         aspect_ratio *= (float)vp->width / (float)vp->height;
673
674         if (is->subtitle_st) {
675             if (is->subpq_size > 0) {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
679                     SDL_LockYUVOverlay (vp->bmp);
680
681                     pict.data[0] = vp->bmp->pixels[0];
682                     pict.data[1] = vp->bmp->pixels[2];
683                     pict.data[2] = vp->bmp->pixels[1];
684
685                     pict.linesize[0] = vp->bmp->pitches[0];
686                     pict.linesize[1] = vp->bmp->pitches[2];
687                     pict.linesize[2] = vp->bmp->pitches[1];
688
689                     for (i = 0; i < sp->sub.num_rects; i++)
690                         blend_subrect(&pict, sp->sub.rects[i],
691                                       vp->bmp->w, vp->bmp->h);
692
693                     SDL_UnlockYUVOverlay (vp->bmp);
694                 }
695             }
696         }
697
698
699         /* XXX: we suppose the screen has a 1.0 pixel ratio */
700         height = is->height;
701         width = ((int)rint(height * aspect_ratio)) & ~1;
702         if (width > is->width) {
703             width = is->width;
704             height = ((int)rint(width / aspect_ratio)) & ~1;
705         }
706         x = (is->width - width) / 2;
707         y = (is->height - height) / 2;
708         is->no_background = 0;
709         rect.x = is->xleft + x;
710         rect.y = is->ytop  + y;
711         rect.w = FFMAX(width,  1);
712         rect.h = FFMAX(height, 1);
713         SDL_DisplayYUVOverlay(vp->bmp, &rect);
714     }
715 }
716
717 /* get the current audio output buffer size, in samples. With SDL, we
718    cannot have a precise information */
719 static int audio_write_get_buf_size(VideoState *is)
720 {
721     return is->audio_buf_size - is->audio_buf_index;
722 }
723
724 static inline int compute_mod(int a, int b)
725 {
726     return a < 0 ? a%b + b : a%b;
727 }
728
729 static void video_audio_display(VideoState *s)
730 {
731     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
732     int ch, channels, h, h2, bgcolor, fgcolor;
733     int16_t time_diff;
734     int rdft_bits, nb_freq;
735
736     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
737         ;
738     nb_freq= 1<<(rdft_bits-1);
739
740     /* compute display index : center on currently output samples */
741     channels = s->audio_st->codec->channels;
742     nb_display_channels = channels;
743     if (!s->paused) {
744         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
745         n = 2 * channels;
746         delay = audio_write_get_buf_size(s);
747         delay /= n;
748
749         /* to be more precise, we take into account the time spent since
750            the last buffer computation */
751         if (audio_callback_time) {
752             time_diff = av_gettime() - audio_callback_time;
753             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
754         }
755
756         delay += 2*data_used;
757         if (delay < data_used)
758             delay = data_used;
759
760         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
761         if (s->show_mode == SHOW_MODE_WAVES) {
762             h= INT_MIN;
763             for(i=0; i<1000; i+=channels){
764                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
765                 int a= s->sample_array[idx];
766                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
767                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
768                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
769                 int score= a-d;
770                 if(h<score && (b^c)<0){
771                     h= score;
772                     i_start= idx;
773                 }
774             }
775         }
776
777         s->last_i_start = i_start;
778     } else {
779         i_start = s->last_i_start;
780     }
781
782     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
783     if (s->show_mode == SHOW_MODE_WAVES) {
784         fill_rectangle(screen,
785                        s->xleft, s->ytop, s->width, s->height,
786                        bgcolor);
787
788         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
789
790         /* total height for one channel */
791         h = s->height / nb_display_channels;
792         /* graph height / 2 */
793         h2 = (h * 9) / 20;
794         for(ch = 0;ch < nb_display_channels; ch++) {
795             i = i_start + ch;
796             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
797             for(x = 0; x < s->width; x++) {
798                 y = (s->sample_array[i] * h2) >> 15;
799                 if (y < 0) {
800                     y = -y;
801                     ys = y1 - y;
802                 } else {
803                     ys = y1;
804                 }
805                 fill_rectangle(screen,
806                                s->xleft + x, ys, 1, y,
807                                fgcolor);
808                 i += channels;
809                 if (i >= SAMPLE_ARRAY_SIZE)
810                     i -= SAMPLE_ARRAY_SIZE;
811             }
812         }
813
814         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
815
816         for(ch = 1;ch < nb_display_channels; ch++) {
817             y = s->ytop + ch * h;
818             fill_rectangle(screen,
819                            s->xleft, y, s->width, 1,
820                            fgcolor);
821         }
822         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
823     }else{
824         nb_display_channels= FFMIN(nb_display_channels, 2);
825         if(rdft_bits != s->rdft_bits){
826             av_rdft_end(s->rdft);
827             av_free(s->rdft_data);
828             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
829             s->rdft_bits= rdft_bits;
830             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
831         }
832         {
833             FFTSample *data[2];
834             for(ch = 0;ch < nb_display_channels; ch++) {
835                 data[ch] = s->rdft_data + 2*nb_freq*ch;
836                 i = i_start + ch;
837                 for(x = 0; x < 2*nb_freq; x++) {
838                     double w= (x-nb_freq)*(1.0/nb_freq);
839                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
840                     i += channels;
841                     if (i >= SAMPLE_ARRAY_SIZE)
842                         i -= SAMPLE_ARRAY_SIZE;
843                 }
844                 av_rdft_calc(s->rdft, data[ch]);
845             }
846             //least efficient way to do this, we should of course directly access it but its more than fast enough
847             for(y=0; y<s->height; y++){
848                 double w= 1/sqrt(nb_freq);
849                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
850                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
851                        + data[1][2*y+1]*data[1][2*y+1])) : a;
852                 a= FFMIN(a,255);
853                 b= FFMIN(b,255);
854                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
855
856                 fill_rectangle(screen,
857                             s->xpos, s->height-y, 1, 1,
858                             fgcolor);
859             }
860         }
861         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
862         s->xpos++;
863         if(s->xpos >= s->width)
864             s->xpos= s->xleft;
865     }
866 }
867
868 static int video_open(VideoState *is){
869     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
870     int w,h;
871
872     if(is_full_screen) flags |= SDL_FULLSCREEN;
873     else               flags |= SDL_RESIZABLE;
874
875     if (is_full_screen && fs_screen_width) {
876         w = fs_screen_width;
877         h = fs_screen_height;
878     } else if(!is_full_screen && screen_width){
879         w = screen_width;
880         h = screen_height;
881 #if CONFIG_AVFILTER
882     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
883         w = is->out_video_filter->inputs[0]->w;
884         h = is->out_video_filter->inputs[0]->h;
885 #else
886     }else if (is->video_st && is->video_st->codec->width){
887         w = is->video_st->codec->width;
888         h = is->video_st->codec->height;
889 #endif
890     } else {
891         w = 640;
892         h = 480;
893     }
894     if(screen && is->width == screen->w && screen->w == w
895        && is->height== screen->h && screen->h == h)
896         return 0;
897
898 #ifndef __APPLE__
899     screen = SDL_SetVideoMode(w, h, 0, flags);
900 #else
901     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
902     screen = SDL_SetVideoMode(w, h, 24, flags);
903 #endif
904     if (!screen) {
905         fprintf(stderr, "SDL: could not set video mode - exiting\n");
906         return -1;
907     }
908     if (!window_title)
909         window_title = input_filename;
910     SDL_WM_SetCaption(window_title, window_title);
911
912     is->width = screen->w;
913     is->height = screen->h;
914
915     return 0;
916 }
917
918 /* display the current picture, if any */
919 static void video_display(VideoState *is)
920 {
921     if(!screen)
922         video_open(cur_stream);
923     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
924         video_audio_display(is);
925     else if (is->video_st)
926         video_image_display(is);
927 }
928
929 static int refresh_thread(void *opaque)
930 {
931     VideoState *is= opaque;
932     while(!is->abort_request){
933         SDL_Event event;
934         event.type = FF_REFRESH_EVENT;
935         event.user.data1 = opaque;
936         if(!is->refresh){
937             is->refresh=1;
938             SDL_PushEvent(&event);
939         }
940         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
941         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
942     }
943     return 0;
944 }
945
946 /* get the current audio clock value */
947 static double get_audio_clock(VideoState *is)
948 {
949     double pts;
950     int hw_buf_size, bytes_per_sec;
951     pts = is->audio_clock;
952     hw_buf_size = audio_write_get_buf_size(is);
953     bytes_per_sec = 0;
954     if (is->audio_st) {
955         bytes_per_sec = is->audio_st->codec->sample_rate *
956             2 * is->audio_st->codec->channels;
957     }
958     if (bytes_per_sec)
959         pts -= (double)hw_buf_size / bytes_per_sec;
960     return pts;
961 }
962
963 /* get the current video clock value */
964 static double get_video_clock(VideoState *is)
965 {
966     if (is->paused) {
967         return is->video_current_pts;
968     } else {
969         return is->video_current_pts_drift + av_gettime() / 1000000.0;
970     }
971 }
972
973 /* get the current external clock value */
974 static double get_external_clock(VideoState *is)
975 {
976     int64_t ti;
977     ti = av_gettime();
978     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
979 }
980
981 /* get the current master clock value */
982 static double get_master_clock(VideoState *is)
983 {
984     double val;
985
986     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
987         if (is->video_st)
988             val = get_video_clock(is);
989         else
990             val = get_audio_clock(is);
991     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
992         if (is->audio_st)
993             val = get_audio_clock(is);
994         else
995             val = get_video_clock(is);
996     } else {
997         val = get_external_clock(is);
998     }
999     return val;
1000 }
1001
1002 /* seek in the stream */
1003 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1004 {
1005     if (!is->seek_req) {
1006         is->seek_pos = pos;
1007         is->seek_rel = rel;
1008         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1009         if (seek_by_bytes)
1010             is->seek_flags |= AVSEEK_FLAG_BYTE;
1011         is->seek_req = 1;
1012     }
1013 }
1014
1015 /* pause or resume the video */
1016 static void stream_toggle_pause(VideoState *is)
1017 {
1018     if (is->paused) {
1019         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1020         if(is->read_pause_return != AVERROR(ENOSYS)){
1021             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1022         }
1023         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1024     }
1025     is->paused = !is->paused;
1026 }
1027
1028 static double compute_target_time(double frame_current_pts, VideoState *is)
1029 {
1030     double delay, sync_threshold, diff;
1031
1032     /* compute nominal delay */
1033     delay = frame_current_pts - is->frame_last_pts;
1034     if (delay <= 0 || delay >= 10.0) {
1035         /* if incorrect delay, use previous one */
1036         delay = is->frame_last_delay;
1037     } else {
1038         is->frame_last_delay = delay;
1039     }
1040     is->frame_last_pts = frame_current_pts;
1041
1042     /* update delay to follow master synchronisation source */
1043     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1044          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1045         /* if video is slave, we try to correct big delays by
1046            duplicating or deleting a frame */
1047         diff = get_video_clock(is) - get_master_clock(is);
1048
1049         /* skip or repeat frame. We take into account the
1050            delay to compute the threshold. I still don't know
1051            if it is the best guess */
1052         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1053         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1054             if (diff <= -sync_threshold)
1055                 delay = 0;
1056             else if (diff >= sync_threshold)
1057                 delay = 2 * delay;
1058         }
1059     }
1060     is->frame_timer += delay;
1061 #if defined(DEBUG_SYNC)
1062     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1063             delay, actual_delay, frame_current_pts, -diff);
1064 #endif
1065
1066     return is->frame_timer;
1067 }
1068
1069 /* called to display each frame */
1070 static void video_refresh(void *opaque)
1071 {
1072     VideoState *is = opaque;
1073     VideoPicture *vp;
1074
1075     SubPicture *sp, *sp2;
1076
1077     if (is->video_st) {
1078 retry:
1079         if (is->pictq_size == 0) {
1080             //nothing to do, no picture to display in the que
1081         } else {
1082             double time= av_gettime()/1000000.0;
1083             double next_target;
1084             /* dequeue the picture */
1085             vp = &is->pictq[is->pictq_rindex];
1086
1087             if(time < vp->target_clock)
1088                 return;
1089             /* update current video pts */
1090             is->video_current_pts = vp->pts;
1091             is->video_current_pts_drift = is->video_current_pts - time;
1092             is->video_current_pos = vp->pos;
1093             if(is->pictq_size > 1){
1094                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1095                 assert(nextvp->target_clock >= vp->target_clock);
1096                 next_target= nextvp->target_clock;
1097             }else{
1098                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1099             }
1100             if(framedrop && time > next_target){
1101                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1102                 if(is->pictq_size > 1 || time > next_target + 0.5){
1103                     /* update queue size and signal for next picture */
1104                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1105                         is->pictq_rindex = 0;
1106
1107                     SDL_LockMutex(is->pictq_mutex);
1108                     is->pictq_size--;
1109                     SDL_CondSignal(is->pictq_cond);
1110                     SDL_UnlockMutex(is->pictq_mutex);
1111                     goto retry;
1112                 }
1113             }
1114
1115             if(is->subtitle_st) {
1116                 if (is->subtitle_stream_changed) {
1117                     SDL_LockMutex(is->subpq_mutex);
1118
1119                     while (is->subpq_size) {
1120                         free_subpicture(&is->subpq[is->subpq_rindex]);
1121
1122                         /* update queue size and signal for next picture */
1123                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1124                             is->subpq_rindex = 0;
1125
1126                         is->subpq_size--;
1127                     }
1128                     is->subtitle_stream_changed = 0;
1129
1130                     SDL_CondSignal(is->subpq_cond);
1131                     SDL_UnlockMutex(is->subpq_mutex);
1132                 } else {
1133                     if (is->subpq_size > 0) {
1134                         sp = &is->subpq[is->subpq_rindex];
1135
1136                         if (is->subpq_size > 1)
1137                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1138                         else
1139                             sp2 = NULL;
1140
1141                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1142                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1143                         {
1144                             free_subpicture(sp);
1145
1146                             /* update queue size and signal for next picture */
1147                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1148                                 is->subpq_rindex = 0;
1149
1150                             SDL_LockMutex(is->subpq_mutex);
1151                             is->subpq_size--;
1152                             SDL_CondSignal(is->subpq_cond);
1153                             SDL_UnlockMutex(is->subpq_mutex);
1154                         }
1155                     }
1156                 }
1157             }
1158
1159             /* display picture */
1160             if (!display_disable)
1161                 video_display(is);
1162
1163             /* update queue size and signal for next picture */
1164             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1165                 is->pictq_rindex = 0;
1166
1167             SDL_LockMutex(is->pictq_mutex);
1168             is->pictq_size--;
1169             SDL_CondSignal(is->pictq_cond);
1170             SDL_UnlockMutex(is->pictq_mutex);
1171         }
1172     } else if (is->audio_st) {
1173         /* draw the next audio frame */
1174
1175         /* if only audio stream, then display the audio bars (better
1176            than nothing, just to test the implementation */
1177
1178         /* display picture */
1179         if (!display_disable)
1180             video_display(is);
1181     }
1182     if (show_status) {
1183         static int64_t last_time;
1184         int64_t cur_time;
1185         int aqsize, vqsize, sqsize;
1186         double av_diff;
1187
1188         cur_time = av_gettime();
1189         if (!last_time || (cur_time - last_time) >= 30000) {
1190             aqsize = 0;
1191             vqsize = 0;
1192             sqsize = 0;
1193             if (is->audio_st)
1194                 aqsize = is->audioq.size;
1195             if (is->video_st)
1196                 vqsize = is->videoq.size;
1197             if (is->subtitle_st)
1198                 sqsize = is->subtitleq.size;
1199             av_diff = 0;
1200             if (is->audio_st && is->video_st)
1201                 av_diff = get_audio_clock(is) - get_video_clock(is);
1202             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1203                    get_master_clock(is),
1204                    av_diff,
1205                    FFMAX(is->skip_frames-1, 0),
1206                    aqsize / 1024,
1207                    vqsize / 1024,
1208                    sqsize,
1209                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1210                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1211             fflush(stdout);
1212             last_time = cur_time;
1213         }
1214     }
1215 }
1216
1217 static void stream_close(VideoState *is)
1218 {
1219     VideoPicture *vp;
1220     int i;
1221     /* XXX: use a special url_shutdown call to abort parse cleanly */
1222     is->abort_request = 1;
1223     SDL_WaitThread(is->read_tid, NULL);
1224     SDL_WaitThread(is->refresh_tid, NULL);
1225
1226     /* free all pictures */
1227     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1228         vp = &is->pictq[i];
1229 #if CONFIG_AVFILTER
1230         if (vp->picref) {
1231             avfilter_unref_buffer(vp->picref);
1232             vp->picref = NULL;
1233         }
1234 #endif
1235         if (vp->bmp) {
1236             SDL_FreeYUVOverlay(vp->bmp);
1237             vp->bmp = NULL;
1238         }
1239     }
1240     SDL_DestroyMutex(is->pictq_mutex);
1241     SDL_DestroyCond(is->pictq_cond);
1242     SDL_DestroyMutex(is->subpq_mutex);
1243     SDL_DestroyCond(is->subpq_cond);
1244 #if !CONFIG_AVFILTER
1245     if (is->img_convert_ctx)
1246         sws_freeContext(is->img_convert_ctx);
1247 #endif
1248     av_free(is);
1249 }
1250
1251 static void do_exit(void)
1252 {
1253     if (cur_stream) {
1254         stream_close(cur_stream);
1255         cur_stream = NULL;
1256     }
1257     uninit_opts();
1258 #if CONFIG_AVFILTER
1259     avfilter_uninit();
1260 #endif
1261     if (show_status)
1262         printf("\n");
1263     SDL_Quit();
1264     av_log(NULL, AV_LOG_QUIET, "");
1265     exit(0);
1266 }
1267
1268 /* allocate a picture (needs to do that in main thread to avoid
1269    potential locking problems */
1270 static void alloc_picture(void *opaque)
1271 {
1272     VideoState *is = opaque;
1273     VideoPicture *vp;
1274
1275     vp = &is->pictq[is->pictq_windex];
1276
1277     if (vp->bmp)
1278         SDL_FreeYUVOverlay(vp->bmp);
1279
1280 #if CONFIG_AVFILTER
1281     if (vp->picref)
1282         avfilter_unref_buffer(vp->picref);
1283     vp->picref = NULL;
1284
1285     vp->width   = is->out_video_filter->inputs[0]->w;
1286     vp->height  = is->out_video_filter->inputs[0]->h;
1287     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1288 #else
1289     vp->width   = is->video_st->codec->width;
1290     vp->height  = is->video_st->codec->height;
1291     vp->pix_fmt = is->video_st->codec->pix_fmt;
1292 #endif
1293
1294     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1295                                    SDL_YV12_OVERLAY,
1296                                    screen);
1297     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1298         /* SDL allocates a buffer smaller than requested if the video
1299          * overlay hardware is unable to support the requested size. */
1300         fprintf(stderr, "Error: the video system does not support an image\n"
1301                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1302                         "to reduce the image size.\n", vp->width, vp->height );
1303         do_exit();
1304     }
1305
1306     SDL_LockMutex(is->pictq_mutex);
1307     vp->allocated = 1;
1308     SDL_CondSignal(is->pictq_cond);
1309     SDL_UnlockMutex(is->pictq_mutex);
1310 }
1311
1312 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1313 {
1314     VideoPicture *vp;
1315     double frame_delay, pts = pts1;
1316
1317     /* compute the exact PTS for the picture if it is omitted in the stream
1318      * pts1 is the dts of the pkt / pts of the frame */
1319     if (pts != 0) {
1320         /* update video clock with pts, if present */
1321         is->video_clock = pts;
1322     } else {
1323         pts = is->video_clock;
1324     }
1325     /* update video clock for next frame */
1326     frame_delay = av_q2d(is->video_st->codec->time_base);
1327     /* for MPEG2, the frame can be repeated, so we update the
1328        clock accordingly */
1329     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1330     is->video_clock += frame_delay;
1331
1332 #if defined(DEBUG_SYNC) && 0
1333     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1334            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1335 #endif
1336
1337     /* wait until we have space to put a new picture */
1338     SDL_LockMutex(is->pictq_mutex);
1339
1340     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1341         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1342
1343     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1344            !is->videoq.abort_request) {
1345         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1346     }
1347     SDL_UnlockMutex(is->pictq_mutex);
1348
1349     if (is->videoq.abort_request)
1350         return -1;
1351
1352     vp = &is->pictq[is->pictq_windex];
1353
1354     /* alloc or resize hardware picture buffer */
1355     if (!vp->bmp ||
1356 #if CONFIG_AVFILTER
1357         vp->width  != is->out_video_filter->inputs[0]->w ||
1358         vp->height != is->out_video_filter->inputs[0]->h) {
1359 #else
1360         vp->width != is->video_st->codec->width ||
1361         vp->height != is->video_st->codec->height) {
1362 #endif
1363         SDL_Event event;
1364
1365         vp->allocated = 0;
1366
1367         /* the allocation must be done in the main thread to avoid
1368            locking problems */
1369         event.type = FF_ALLOC_EVENT;
1370         event.user.data1 = is;
1371         SDL_PushEvent(&event);
1372
1373         /* wait until the picture is allocated */
1374         SDL_LockMutex(is->pictq_mutex);
1375         while (!vp->allocated && !is->videoq.abort_request) {
1376             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1377         }
1378         SDL_UnlockMutex(is->pictq_mutex);
1379
1380         if (is->videoq.abort_request)
1381             return -1;
1382     }
1383
1384     /* if the frame is not skipped, then display it */
1385     if (vp->bmp) {
1386         AVPicture pict;
1387 #if CONFIG_AVFILTER
1388         if(vp->picref)
1389             avfilter_unref_buffer(vp->picref);
1390         vp->picref = src_frame->opaque;
1391 #endif
1392
1393         /* get a pointer on the bitmap */
1394         SDL_LockYUVOverlay (vp->bmp);
1395
1396         memset(&pict,0,sizeof(AVPicture));
1397         pict.data[0] = vp->bmp->pixels[0];
1398         pict.data[1] = vp->bmp->pixels[2];
1399         pict.data[2] = vp->bmp->pixels[1];
1400
1401         pict.linesize[0] = vp->bmp->pitches[0];
1402         pict.linesize[1] = vp->bmp->pitches[2];
1403         pict.linesize[2] = vp->bmp->pitches[1];
1404
1405 #if CONFIG_AVFILTER
1406         //FIXME use direct rendering
1407         av_picture_copy(&pict, (AVPicture *)src_frame,
1408                         vp->pix_fmt, vp->width, vp->height);
1409 #else
1410         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1411         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1412             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1413             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1414         if (is->img_convert_ctx == NULL) {
1415             fprintf(stderr, "Cannot initialize the conversion context\n");
1416             exit(1);
1417         }
1418         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1419                   0, vp->height, pict.data, pict.linesize);
1420 #endif
1421         /* update the bitmap content */
1422         SDL_UnlockYUVOverlay(vp->bmp);
1423
1424         vp->pts = pts;
1425         vp->pos = pos;
1426
1427         /* now we can update the picture count */
1428         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1429             is->pictq_windex = 0;
1430         SDL_LockMutex(is->pictq_mutex);
1431         vp->target_clock= compute_target_time(vp->pts, is);
1432
1433         is->pictq_size++;
1434         SDL_UnlockMutex(is->pictq_mutex);
1435     }
1436     return 0;
1437 }
1438
1439 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1440 {
1441     int len1, got_picture, i;
1442
1443     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1444         return -1;
1445
1446     if (pkt->data == flush_pkt.data) {
1447         avcodec_flush_buffers(is->video_st->codec);
1448
1449         SDL_LockMutex(is->pictq_mutex);
1450         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1451         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1452             is->pictq[i].target_clock= 0;
1453         }
1454         while (is->pictq_size && !is->videoq.abort_request) {
1455             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1456         }
1457         is->video_current_pos = -1;
1458         SDL_UnlockMutex(is->pictq_mutex);
1459
1460         is->frame_last_pts = AV_NOPTS_VALUE;
1461         is->frame_last_delay = 0;
1462         is->frame_timer = (double)av_gettime() / 1000000.0;
1463         is->skip_frames = 1;
1464         is->skip_frames_index = 0;
1465         return 0;
1466     }
1467
1468     len1 = avcodec_decode_video2(is->video_st->codec,
1469                                  frame, &got_picture,
1470                                  pkt);
1471
1472     if (got_picture) {
1473         if (decoder_reorder_pts == -1) {
1474             *pts = frame->best_effort_timestamp;
1475         } else if (decoder_reorder_pts) {
1476             *pts = frame->pkt_pts;
1477         } else {
1478             *pts = frame->pkt_dts;
1479         }
1480
1481         if (*pts == AV_NOPTS_VALUE) {
1482             *pts = 0;
1483         }
1484
1485         is->skip_frames_index += 1;
1486         if(is->skip_frames_index >= is->skip_frames){
1487             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1488             return 1;
1489         }
1490
1491     }
1492     return 0;
1493 }
1494
1495 #if CONFIG_AVFILTER
1496 typedef struct {
1497     VideoState *is;
1498     AVFrame *frame;
1499     int use_dr1;
1500 } FilterPriv;
1501
1502 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1503 {
1504     AVFilterContext *ctx = codec->opaque;
1505     AVFilterBufferRef  *ref;
1506     int perms = AV_PERM_WRITE;
1507     int i, w, h, stride[4];
1508     unsigned edge;
1509     int pixel_size;
1510
1511     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1512
1513     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1514         perms |= AV_PERM_NEG_LINESIZES;
1515
1516     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1517         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1518         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1519         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1520     }
1521     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1522
1523     w = codec->width;
1524     h = codec->height;
1525
1526     if(av_image_check_size(w, h, 0, codec))
1527         return -1;
1528
1529     avcodec_align_dimensions2(codec, &w, &h, stride);
1530     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1531     w += edge << 1;
1532     h += edge << 1;
1533
1534     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1535         return -1;
1536
1537     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1538     ref->video->w = codec->width;
1539     ref->video->h = codec->height;
1540     for(i = 0; i < 4; i ++) {
1541         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1542         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1543
1544         if (ref->data[i]) {
1545             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1546         }
1547         pic->data[i]     = ref->data[i];
1548         pic->linesize[i] = ref->linesize[i];
1549     }
1550     pic->opaque = ref;
1551     pic->age    = INT_MAX;
1552     pic->type   = FF_BUFFER_TYPE_USER;
1553     pic->reordered_opaque = codec->reordered_opaque;
1554     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1555     else           pic->pkt_pts = AV_NOPTS_VALUE;
1556     return 0;
1557 }
1558
1559 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1560 {
1561     memset(pic->data, 0, sizeof(pic->data));
1562     avfilter_unref_buffer(pic->opaque);
1563 }
1564
1565 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1566 {
1567     AVFilterBufferRef *ref = pic->opaque;
1568
1569     if (pic->data[0] == NULL) {
1570         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1571         return codec->get_buffer(codec, pic);
1572     }
1573
1574     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1575         (codec->pix_fmt != ref->format)) {
1576         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1577         return -1;
1578     }
1579
1580     pic->reordered_opaque = codec->reordered_opaque;
1581     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1582     else           pic->pkt_pts = AV_NOPTS_VALUE;
1583     return 0;
1584 }
1585
1586 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1587 {
1588     FilterPriv *priv = ctx->priv;
1589     AVCodecContext *codec;
1590     if(!opaque) return -1;
1591
1592     priv->is = opaque;
1593     codec    = priv->is->video_st->codec;
1594     codec->opaque = ctx;
1595     if((codec->codec->capabilities & CODEC_CAP_DR1)
1596     ) {
1597         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1598         priv->use_dr1 = 1;
1599         codec->get_buffer     = input_get_buffer;
1600         codec->release_buffer = input_release_buffer;
1601         codec->reget_buffer   = input_reget_buffer;
1602         codec->thread_safe_callbacks = 1;
1603     }
1604
1605     priv->frame = avcodec_alloc_frame();
1606
1607     return 0;
1608 }
1609
1610 static void input_uninit(AVFilterContext *ctx)
1611 {
1612     FilterPriv *priv = ctx->priv;
1613     av_free(priv->frame);
1614 }
1615
1616 static int input_request_frame(AVFilterLink *link)
1617 {
1618     FilterPriv *priv = link->src->priv;
1619     AVFilterBufferRef *picref;
1620     int64_t pts = 0;
1621     AVPacket pkt;
1622     int ret;
1623
1624     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1625         av_free_packet(&pkt);
1626     if (ret < 0)
1627         return -1;
1628
1629     if(priv->use_dr1) {
1630         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1631     } else {
1632         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1633         av_image_copy(picref->data, picref->linesize,
1634                       priv->frame->data, priv->frame->linesize,
1635                       picref->format, link->w, link->h);
1636     }
1637     av_free_packet(&pkt);
1638
1639     avfilter_copy_frame_props(picref, priv->frame);
1640     picref->pts = pts;
1641
1642     avfilter_start_frame(link, picref);
1643     avfilter_draw_slice(link, 0, link->h, 1);
1644     avfilter_end_frame(link);
1645
1646     return 0;
1647 }
1648
1649 static int input_query_formats(AVFilterContext *ctx)
1650 {
1651     FilterPriv *priv = ctx->priv;
1652     enum PixelFormat pix_fmts[] = {
1653         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1654     };
1655
1656     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1657     return 0;
1658 }
1659
1660 static int input_config_props(AVFilterLink *link)
1661 {
1662     FilterPriv *priv  = link->src->priv;
1663     AVCodecContext *c = priv->is->video_st->codec;
1664
1665     link->w = c->width;
1666     link->h = c->height;
1667     link->time_base = priv->is->video_st->time_base;
1668
1669     return 0;
1670 }
1671
1672 static AVFilter input_filter =
1673 {
1674     .name      = "ffplay_input",
1675
1676     .priv_size = sizeof(FilterPriv),
1677
1678     .init      = input_init,
1679     .uninit    = input_uninit,
1680
1681     .query_formats = input_query_formats,
1682
1683     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1684     .outputs   = (AVFilterPad[]) {{ .name = "default",
1685                                     .type = AVMEDIA_TYPE_VIDEO,
1686                                     .request_frame = input_request_frame,
1687                                     .config_props  = input_config_props, },
1688                                   { .name = NULL }},
1689 };
1690
1691 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1692 {
1693     char sws_flags_str[128];
1694     int ret;
1695     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1696     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1697     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1698     graph->scale_sws_opts = av_strdup(sws_flags_str);
1699
1700     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1701                                             NULL, is, graph)) < 0)
1702         goto the_end;
1703     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1704                                             NULL, &ffsink_ctx, graph)) < 0)
1705         goto the_end;
1706
1707     if(vfilters) {
1708         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1709         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1710
1711         outputs->name    = av_strdup("in");
1712         outputs->filter_ctx = filt_src;
1713         outputs->pad_idx = 0;
1714         outputs->next    = NULL;
1715
1716         inputs->name    = av_strdup("out");
1717         inputs->filter_ctx = filt_out;
1718         inputs->pad_idx = 0;
1719         inputs->next    = NULL;
1720
1721         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1722             goto the_end;
1723         av_freep(&vfilters);
1724     } else {
1725         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1726             goto the_end;
1727     }
1728
1729     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1730         goto the_end;
1731
1732     is->out_video_filter = filt_out;
1733 the_end:
1734     return ret;
1735 }
1736
1737 #endif  /* CONFIG_AVFILTER */
1738
1739 static int video_thread(void *arg)
1740 {
1741     VideoState *is = arg;
1742     AVFrame *frame= avcodec_alloc_frame();
1743     int64_t pts_int, pos;
1744     double pts;
1745     int ret;
1746
1747 #if CONFIG_AVFILTER
1748     AVFilterGraph *graph = avfilter_graph_alloc();
1749     AVFilterContext *filt_out = NULL;
1750
1751     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1752         goto the_end;
1753     filt_out = is->out_video_filter;
1754 #endif
1755
1756     for(;;) {
1757 #if !CONFIG_AVFILTER
1758         AVPacket pkt;
1759 #else
1760         AVFilterBufferRef *picref;
1761         AVRational tb;
1762 #endif
1763         while (is->paused && !is->videoq.abort_request)
1764             SDL_Delay(10);
1765 #if CONFIG_AVFILTER
1766         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1767         if (picref) {
1768             pts_int = picref->pts;
1769             pos     = picref->pos;
1770             frame->opaque = picref;
1771         }
1772
1773         if (av_cmp_q(tb, is->video_st->time_base)) {
1774             av_unused int64_t pts1 = pts_int;
1775             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1776             av_dlog(NULL, "video_thread(): "
1777                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1778                     tb.num, tb.den, pts1,
1779                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1780         }
1781 #else
1782         ret = get_video_frame(is, frame, &pts_int, &pkt);
1783         pos = pkt.pos;
1784         av_free_packet(&pkt);
1785 #endif
1786
1787         if (ret < 0) goto the_end;
1788
1789         if (!ret)
1790             continue;
1791
1792         pts = pts_int*av_q2d(is->video_st->time_base);
1793
1794         ret = queue_picture(is, frame, pts, pos);
1795
1796         if (ret < 0)
1797             goto the_end;
1798
1799         if (step)
1800             if (cur_stream)
1801                 stream_toggle_pause(cur_stream);
1802     }
1803  the_end:
1804 #if CONFIG_AVFILTER
1805     avfilter_graph_free(&graph);
1806 #endif
1807     av_free(frame);
1808     return 0;
1809 }
1810
1811 static int subtitle_thread(void *arg)
1812 {
1813     VideoState *is = arg;
1814     SubPicture *sp;
1815     AVPacket pkt1, *pkt = &pkt1;
1816     int len1, got_subtitle;
1817     double pts;
1818     int i, j;
1819     int r, g, b, y, u, v, a;
1820
1821     for(;;) {
1822         while (is->paused && !is->subtitleq.abort_request) {
1823             SDL_Delay(10);
1824         }
1825         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1826             break;
1827
1828         if(pkt->data == flush_pkt.data){
1829             avcodec_flush_buffers(is->subtitle_st->codec);
1830             continue;
1831         }
1832         SDL_LockMutex(is->subpq_mutex);
1833         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1834                !is->subtitleq.abort_request) {
1835             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1836         }
1837         SDL_UnlockMutex(is->subpq_mutex);
1838
1839         if (is->subtitleq.abort_request)
1840             goto the_end;
1841
1842         sp = &is->subpq[is->subpq_windex];
1843
1844        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1845            this packet, if any */
1846         pts = 0;
1847         if (pkt->pts != AV_NOPTS_VALUE)
1848             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1849
1850         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1851                                     &sp->sub, &got_subtitle,
1852                                     pkt);
1853         if (got_subtitle && sp->sub.format == 0) {
1854             sp->pts = pts;
1855
1856             for (i = 0; i < sp->sub.num_rects; i++)
1857             {
1858                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1859                 {
1860                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1861                     y = RGB_TO_Y_CCIR(r, g, b);
1862                     u = RGB_TO_U_CCIR(r, g, b, 0);
1863                     v = RGB_TO_V_CCIR(r, g, b, 0);
1864                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1865                 }
1866             }
1867
1868             /* now we can update the picture count */
1869             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1870                 is->subpq_windex = 0;
1871             SDL_LockMutex(is->subpq_mutex);
1872             is->subpq_size++;
1873             SDL_UnlockMutex(is->subpq_mutex);
1874         }
1875         av_free_packet(pkt);
1876     }
1877  the_end:
1878     return 0;
1879 }
1880
1881 /* copy samples for viewing in editor window */
1882 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1883 {
1884     int size, len;
1885
1886     size = samples_size / sizeof(short);
1887     while (size > 0) {
1888         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1889         if (len > size)
1890             len = size;
1891         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1892         samples += len;
1893         is->sample_array_index += len;
1894         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1895             is->sample_array_index = 0;
1896         size -= len;
1897     }
1898 }
1899
1900 /* return the new audio buffer size (samples can be added or deleted
1901    to get better sync if video or external master clock) */
1902 static int synchronize_audio(VideoState *is, short *samples,
1903                              int samples_size1, double pts)
1904 {
1905     int n, samples_size;
1906     double ref_clock;
1907
1908     n = 2 * is->audio_st->codec->channels;
1909     samples_size = samples_size1;
1910
1911     /* if not master, then we try to remove or add samples to correct the clock */
1912     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1913          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1914         double diff, avg_diff;
1915         int wanted_size, min_size, max_size, nb_samples;
1916
1917         ref_clock = get_master_clock(is);
1918         diff = get_audio_clock(is) - ref_clock;
1919
1920         if (diff < AV_NOSYNC_THRESHOLD) {
1921             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1922             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1923                 /* not enough measures to have a correct estimate */
1924                 is->audio_diff_avg_count++;
1925             } else {
1926                 /* estimate the A-V difference */
1927                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1928
1929                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1930                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1931                     nb_samples = samples_size / n;
1932
1933                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1934                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1935                     if (wanted_size < min_size)
1936                         wanted_size = min_size;
1937                     else if (wanted_size > max_size)
1938                         wanted_size = max_size;
1939
1940                     /* add or remove samples to correction the synchro */
1941                     if (wanted_size < samples_size) {
1942                         /* remove samples */
1943                         samples_size = wanted_size;
1944                     } else if (wanted_size > samples_size) {
1945                         uint8_t *samples_end, *q;
1946                         int nb;
1947
1948                         /* add samples */
1949                         nb = (samples_size - wanted_size);
1950                         samples_end = (uint8_t *)samples + samples_size - n;
1951                         q = samples_end + n;
1952                         while (nb > 0) {
1953                             memcpy(q, samples_end, n);
1954                             q += n;
1955                             nb -= n;
1956                         }
1957                         samples_size = wanted_size;
1958                     }
1959                 }
1960 #if 0
1961                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1962                        diff, avg_diff, samples_size - samples_size1,
1963                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1964 #endif
1965             }
1966         } else {
1967             /* too big difference : may be initial PTS errors, so
1968                reset A-V filter */
1969             is->audio_diff_avg_count = 0;
1970             is->audio_diff_cum = 0;
1971         }
1972     }
1973
1974     return samples_size;
1975 }
1976
1977 /* decode one audio frame and returns its uncompressed size */
1978 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1979 {
1980     AVPacket *pkt_temp = &is->audio_pkt_temp;
1981     AVPacket *pkt = &is->audio_pkt;
1982     AVCodecContext *dec= is->audio_st->codec;
1983     int n, len1, data_size;
1984     double pts;
1985
1986     for(;;) {
1987         /* NOTE: the audio packet can contain several frames */
1988         while (pkt_temp->size > 0) {
1989             data_size = sizeof(is->audio_buf1);
1990             len1 = avcodec_decode_audio3(dec,
1991                                         (int16_t *)is->audio_buf1, &data_size,
1992                                         pkt_temp);
1993             if (len1 < 0) {
1994                 /* if error, we skip the frame */
1995                 pkt_temp->size = 0;
1996                 break;
1997             }
1998
1999             pkt_temp->data += len1;
2000             pkt_temp->size -= len1;
2001             if (data_size <= 0)
2002                 continue;
2003
2004             if (dec->sample_fmt != is->audio_src_fmt) {
2005                 if (is->reformat_ctx)
2006                     av_audio_convert_free(is->reformat_ctx);
2007                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2008                                                          dec->sample_fmt, 1, NULL, 0);
2009                 if (!is->reformat_ctx) {
2010                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2011                         av_get_sample_fmt_name(dec->sample_fmt),
2012                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2013                         break;
2014                 }
2015                 is->audio_src_fmt= dec->sample_fmt;
2016             }
2017
2018             if (is->reformat_ctx) {
2019                 const void *ibuf[6]= {is->audio_buf1};
2020                 void *obuf[6]= {is->audio_buf2};
2021                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2022                 int ostride[6]= {2};
2023                 int len= data_size/istride[0];
2024                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2025                     printf("av_audio_convert() failed\n");
2026                     break;
2027                 }
2028                 is->audio_buf= is->audio_buf2;
2029                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2030                           remove this legacy cruft */
2031                 data_size= len*2;
2032             }else{
2033                 is->audio_buf= is->audio_buf1;
2034             }
2035
2036             /* if no pts, then compute it */
2037             pts = is->audio_clock;
2038             *pts_ptr = pts;
2039             n = 2 * dec->channels;
2040             is->audio_clock += (double)data_size /
2041                 (double)(n * dec->sample_rate);
2042 #if defined(DEBUG_SYNC)
2043             {
2044                 static double last_clock;
2045                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2046                        is->audio_clock - last_clock,
2047                        is->audio_clock, pts);
2048                 last_clock = is->audio_clock;
2049             }
2050 #endif
2051             return data_size;
2052         }
2053
2054         /* free the current packet */
2055         if (pkt->data)
2056             av_free_packet(pkt);
2057
2058         if (is->paused || is->audioq.abort_request) {
2059             return -1;
2060         }
2061
2062         /* read next packet */
2063         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2064             return -1;
2065         if(pkt->data == flush_pkt.data){
2066             avcodec_flush_buffers(dec);
2067             continue;
2068         }
2069
2070         pkt_temp->data = pkt->data;
2071         pkt_temp->size = pkt->size;
2072
2073         /* if update the audio clock with the pts */
2074         if (pkt->pts != AV_NOPTS_VALUE) {
2075             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2076         }
2077     }
2078 }
2079
2080 /* prepare a new audio buffer */
2081 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2082 {
2083     VideoState *is = opaque;
2084     int audio_size, len1;
2085     double pts;
2086
2087     audio_callback_time = av_gettime();
2088
2089     while (len > 0) {
2090         if (is->audio_buf_index >= is->audio_buf_size) {
2091            audio_size = audio_decode_frame(is, &pts);
2092            if (audio_size < 0) {
2093                 /* if error, just output silence */
2094                is->audio_buf = is->audio_buf1;
2095                is->audio_buf_size = 1024;
2096                memset(is->audio_buf, 0, is->audio_buf_size);
2097            } else {
2098                if (is->show_mode != SHOW_MODE_VIDEO)
2099                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2100                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2101                                               pts);
2102                is->audio_buf_size = audio_size;
2103            }
2104            is->audio_buf_index = 0;
2105         }
2106         len1 = is->audio_buf_size - is->audio_buf_index;
2107         if (len1 > len)
2108             len1 = len;
2109         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2110         len -= len1;
2111         stream += len1;
2112         is->audio_buf_index += len1;
2113     }
2114 }
2115
2116 /* open a given stream. Return 0 if OK */
2117 static int stream_component_open(VideoState *is, int stream_index)
2118 {
2119     AVFormatContext *ic = is->ic;
2120     AVCodecContext *avctx;
2121     AVCodec *codec;
2122     SDL_AudioSpec wanted_spec, spec;
2123
2124     if (stream_index < 0 || stream_index >= ic->nb_streams)
2125         return -1;
2126     avctx = ic->streams[stream_index]->codec;
2127
2128     /* prepare audio output */
2129     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2130         if (avctx->channels > 0) {
2131             avctx->request_channels = FFMIN(2, avctx->channels);
2132         } else {
2133             avctx->request_channels = 2;
2134         }
2135     }
2136
2137     codec = avcodec_find_decoder(avctx->codec_id);
2138     if (!codec)
2139         return -1;
2140
2141     avctx->debug_mv = debug_mv;
2142     avctx->debug = debug;
2143     avctx->workaround_bugs = workaround_bugs;
2144     avctx->lowres = lowres;
2145     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2146     avctx->idct_algo= idct;
2147     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2148     avctx->skip_frame= skip_frame;
2149     avctx->skip_idct= skip_idct;
2150     avctx->skip_loop_filter= skip_loop_filter;
2151     avctx->error_recognition= error_recognition;
2152     avctx->error_concealment= error_concealment;
2153     avctx->thread_count= thread_count;
2154
2155     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2156
2157     if(codec->capabilities & CODEC_CAP_DR1)
2158         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2159
2160     if (avcodec_open(avctx, codec) < 0)
2161         return -1;
2162
2163     /* prepare audio output */
2164     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2165         wanted_spec.freq = avctx->sample_rate;
2166         wanted_spec.format = AUDIO_S16SYS;
2167         wanted_spec.channels = avctx->channels;
2168         wanted_spec.silence = 0;
2169         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2170         wanted_spec.callback = sdl_audio_callback;
2171         wanted_spec.userdata = is;
2172         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2173             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2174             return -1;
2175         }
2176         is->audio_hw_buf_size = spec.size;
2177         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2178     }
2179
2180     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2181     switch(avctx->codec_type) {
2182     case AVMEDIA_TYPE_AUDIO:
2183         is->audio_stream = stream_index;
2184         is->audio_st = ic->streams[stream_index];
2185         is->audio_buf_size = 0;
2186         is->audio_buf_index = 0;
2187
2188         /* init averaging filter */
2189         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2190         is->audio_diff_avg_count = 0;
2191         /* since we do not have a precise anough audio fifo fullness,
2192            we correct audio sync only if larger than this threshold */
2193         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2194
2195         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2196         packet_queue_init(&is->audioq);
2197         SDL_PauseAudio(0);
2198         break;
2199     case AVMEDIA_TYPE_VIDEO:
2200         is->video_stream = stream_index;
2201         is->video_st = ic->streams[stream_index];
2202
2203         packet_queue_init(&is->videoq);
2204         is->video_tid = SDL_CreateThread(video_thread, is);
2205         break;
2206     case AVMEDIA_TYPE_SUBTITLE:
2207         is->subtitle_stream = stream_index;
2208         is->subtitle_st = ic->streams[stream_index];
2209         packet_queue_init(&is->subtitleq);
2210
2211         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2212         break;
2213     default:
2214         break;
2215     }
2216     return 0;
2217 }
2218
2219 static void stream_component_close(VideoState *is, int stream_index)
2220 {
2221     AVFormatContext *ic = is->ic;
2222     AVCodecContext *avctx;
2223
2224     if (stream_index < 0 || stream_index >= ic->nb_streams)
2225         return;
2226     avctx = ic->streams[stream_index]->codec;
2227
2228     switch(avctx->codec_type) {
2229     case AVMEDIA_TYPE_AUDIO:
2230         packet_queue_abort(&is->audioq);
2231
2232         SDL_CloseAudio();
2233
2234         packet_queue_end(&is->audioq);
2235         if (is->reformat_ctx)
2236             av_audio_convert_free(is->reformat_ctx);
2237         is->reformat_ctx = NULL;
2238         break;
2239     case AVMEDIA_TYPE_VIDEO:
2240         packet_queue_abort(&is->videoq);
2241
2242         /* note: we also signal this mutex to make sure we deblock the
2243            video thread in all cases */
2244         SDL_LockMutex(is->pictq_mutex);
2245         SDL_CondSignal(is->pictq_cond);
2246         SDL_UnlockMutex(is->pictq_mutex);
2247
2248         SDL_WaitThread(is->video_tid, NULL);
2249
2250         packet_queue_end(&is->videoq);
2251         break;
2252     case AVMEDIA_TYPE_SUBTITLE:
2253         packet_queue_abort(&is->subtitleq);
2254
2255         /* note: we also signal this mutex to make sure we deblock the
2256            video thread in all cases */
2257         SDL_LockMutex(is->subpq_mutex);
2258         is->subtitle_stream_changed = 1;
2259
2260         SDL_CondSignal(is->subpq_cond);
2261         SDL_UnlockMutex(is->subpq_mutex);
2262
2263         SDL_WaitThread(is->subtitle_tid, NULL);
2264
2265         packet_queue_end(&is->subtitleq);
2266         break;
2267     default:
2268         break;
2269     }
2270
2271     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2272     avcodec_close(avctx);
2273     switch(avctx->codec_type) {
2274     case AVMEDIA_TYPE_AUDIO:
2275         is->audio_st = NULL;
2276         is->audio_stream = -1;
2277         break;
2278     case AVMEDIA_TYPE_VIDEO:
2279         is->video_st = NULL;
2280         is->video_stream = -1;
2281         break;
2282     case AVMEDIA_TYPE_SUBTITLE:
2283         is->subtitle_st = NULL;
2284         is->subtitle_stream = -1;
2285         break;
2286     default:
2287         break;
2288     }
2289 }
2290
2291 /* since we have only one decoding thread, we can use a global
2292    variable instead of a thread local variable */
2293 static VideoState *global_video_state;
2294
2295 static int decode_interrupt_cb(void)
2296 {
2297     return (global_video_state && global_video_state->abort_request);
2298 }
2299
2300 /* this thread gets the stream from the disk or the network */
2301 static int read_thread(void *arg)
2302 {
2303     VideoState *is = arg;
2304     AVFormatContext *ic;
2305     int err, i, ret;
2306     int st_index[AVMEDIA_TYPE_NB];
2307     AVPacket pkt1, *pkt = &pkt1;
2308     AVFormatParameters params, *ap = &params;
2309     int eof=0;
2310     int pkt_in_play_range = 0;
2311
2312     ic = avformat_alloc_context();
2313
2314     memset(st_index, -1, sizeof(st_index));
2315     is->video_stream = -1;
2316     is->audio_stream = -1;
2317     is->subtitle_stream = -1;
2318
2319     global_video_state = is;
2320     avio_set_interrupt_cb(decode_interrupt_cb);
2321
2322     memset(ap, 0, sizeof(*ap));
2323
2324     ap->prealloced_context = 1;
2325     ap->width = frame_width;
2326     ap->height= frame_height;
2327     ap->time_base= (AVRational){1, 25};
2328     ap->pix_fmt = frame_pix_fmt;
2329     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2330
2331
2332     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2333     if (err >= 0) {
2334         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2335         err = av_demuxer_open(ic, ap);
2336         if(err < 0){
2337             avformat_free_context(ic);
2338             ic= NULL;
2339         }
2340     }
2341     if (err < 0) {
2342         print_error(is->filename, err);
2343         ret = -1;
2344         goto fail;
2345     }
2346     is->ic = ic;
2347
2348     if(genpts)
2349         ic->flags |= AVFMT_FLAG_GENPTS;
2350
2351     err = av_find_stream_info(ic);
2352     if (err < 0) {
2353         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2354         ret = -1;
2355         goto fail;
2356     }
2357     if(ic->pb)
2358         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2359
2360     if(seek_by_bytes<0)
2361         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2362
2363     /* if seeking requested, we execute it */
2364     if (start_time != AV_NOPTS_VALUE) {
2365         int64_t timestamp;
2366
2367         timestamp = start_time;
2368         /* add the stream start time */
2369         if (ic->start_time != AV_NOPTS_VALUE)
2370             timestamp += ic->start_time;
2371         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2372         if (ret < 0) {
2373             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2374                     is->filename, (double)timestamp / AV_TIME_BASE);
2375         }
2376     }
2377
2378     for (i = 0; i < ic->nb_streams; i++)
2379         ic->streams[i]->discard = AVDISCARD_ALL;
2380     if (!video_disable)
2381         st_index[AVMEDIA_TYPE_VIDEO] =
2382             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2383                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2384     if (!audio_disable)
2385         st_index[AVMEDIA_TYPE_AUDIO] =
2386             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2387                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2388                                 st_index[AVMEDIA_TYPE_VIDEO],
2389                                 NULL, 0);
2390     if (!video_disable)
2391         st_index[AVMEDIA_TYPE_SUBTITLE] =
2392             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2393                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2394                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2395                                  st_index[AVMEDIA_TYPE_AUDIO] :
2396                                  st_index[AVMEDIA_TYPE_VIDEO]),
2397                                 NULL, 0);
2398     if (show_status) {
2399         av_dump_format(ic, 0, is->filename, 0);
2400     }
2401
2402     is->show_mode = show_mode;
2403
2404     /* open the streams */
2405     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2406         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2407     }
2408
2409     ret=-1;
2410     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2411         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2412     }
2413     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2414     if (is->show_mode == SHOW_MODE_NONE)
2415         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2416
2417     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2418         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2419     }
2420
2421     if (is->video_stream < 0 && is->audio_stream < 0) {
2422         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2423         ret = -1;
2424         goto fail;
2425     }
2426
2427     for(;;) {
2428         if (is->abort_request)
2429             break;
2430         if (is->paused != is->last_paused) {
2431             is->last_paused = is->paused;
2432             if (is->paused)
2433                 is->read_pause_return= av_read_pause(ic);
2434             else
2435                 av_read_play(ic);
2436         }
2437 #if CONFIG_RTSP_DEMUXER
2438         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2439             /* wait 10 ms to avoid trying to get another packet */
2440             /* XXX: horrible */
2441             SDL_Delay(10);
2442             continue;
2443         }
2444 #endif
2445         if (is->seek_req) {
2446             int64_t seek_target= is->seek_pos;
2447             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2448             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2449 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2450 //      of the seek_pos/seek_rel variables
2451
2452             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2453             if (ret < 0) {
2454                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2455             }else{
2456                 if (is->audio_stream >= 0) {
2457                     packet_queue_flush(&is->audioq);
2458                     packet_queue_put(&is->audioq, &flush_pkt);
2459                 }
2460                 if (is->subtitle_stream >= 0) {
2461                     packet_queue_flush(&is->subtitleq);
2462                     packet_queue_put(&is->subtitleq, &flush_pkt);
2463                 }
2464                 if (is->video_stream >= 0) {
2465                     packet_queue_flush(&is->videoq);
2466                     packet_queue_put(&is->videoq, &flush_pkt);
2467                 }
2468             }
2469             is->seek_req = 0;
2470             eof= 0;
2471         }
2472
2473         /* if the queue are full, no need to read more */
2474         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2475             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2476                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2477                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2478             /* wait 10 ms */
2479             SDL_Delay(10);
2480             continue;
2481         }
2482         if(eof) {
2483             if(is->video_stream >= 0){
2484                 av_init_packet(pkt);
2485                 pkt->data=NULL;
2486                 pkt->size=0;
2487                 pkt->stream_index= is->video_stream;
2488                 packet_queue_put(&is->videoq, pkt);
2489             }
2490             SDL_Delay(10);
2491             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2492                 if(loop!=1 && (!loop || --loop)){
2493                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2494                 }else if(autoexit){
2495                     ret=AVERROR_EOF;
2496                     goto fail;
2497                 }
2498             }
2499             eof=0;
2500             continue;
2501         }
2502         ret = av_read_frame(ic, pkt);
2503         if (ret < 0) {
2504             if (ret == AVERROR_EOF || url_feof(ic->pb))
2505                 eof=1;
2506             if (ic->pb && ic->pb->error)
2507                 break;
2508             SDL_Delay(100); /* wait for user event */
2509             continue;
2510         }
2511         /* check if packet is in play range specified by user, then queue, otherwise discard */
2512         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2513                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2514                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2515                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2516                 <= ((double)duration/1000000);
2517         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2518             packet_queue_put(&is->audioq, pkt);
2519         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2520             packet_queue_put(&is->videoq, pkt);
2521         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2522             packet_queue_put(&is->subtitleq, pkt);
2523         } else {
2524             av_free_packet(pkt);
2525         }
2526     }
2527     /* wait until the end */
2528     while (!is->abort_request) {
2529         SDL_Delay(100);
2530     }
2531
2532     ret = 0;
2533  fail:
2534     /* disable interrupting */
2535     global_video_state = NULL;
2536
2537     /* close each stream */
2538     if (is->audio_stream >= 0)
2539         stream_component_close(is, is->audio_stream);
2540     if (is->video_stream >= 0)
2541         stream_component_close(is, is->video_stream);
2542     if (is->subtitle_stream >= 0)
2543         stream_component_close(is, is->subtitle_stream);
2544     if (is->ic) {
2545         av_close_input_file(is->ic);
2546         is->ic = NULL; /* safety */
2547     }
2548     avio_set_interrupt_cb(NULL);
2549
2550     if (ret != 0) {
2551         SDL_Event event;
2552
2553         event.type = FF_QUIT_EVENT;
2554         event.user.data1 = is;
2555         SDL_PushEvent(&event);
2556     }
2557     return 0;
2558 }
2559
2560 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2561 {
2562     VideoState *is;
2563
2564     is = av_mallocz(sizeof(VideoState));
2565     if (!is)
2566         return NULL;
2567     av_strlcpy(is->filename, filename, sizeof(is->filename));
2568     is->iformat = iformat;
2569     is->ytop = 0;
2570     is->xleft = 0;
2571
2572     /* start video display */
2573     is->pictq_mutex = SDL_CreateMutex();
2574     is->pictq_cond = SDL_CreateCond();
2575
2576     is->subpq_mutex = SDL_CreateMutex();
2577     is->subpq_cond = SDL_CreateCond();
2578
2579     is->av_sync_type = av_sync_type;
2580     is->read_tid = SDL_CreateThread(read_thread, is);
2581     if (!is->read_tid) {
2582         av_free(is);
2583         return NULL;
2584     }
2585     return is;
2586 }
2587
2588 static void stream_cycle_channel(VideoState *is, int codec_type)
2589 {
2590     AVFormatContext *ic = is->ic;
2591     int start_index, stream_index;
2592     AVStream *st;
2593
2594     if (codec_type == AVMEDIA_TYPE_VIDEO)
2595         start_index = is->video_stream;
2596     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2597         start_index = is->audio_stream;
2598     else
2599         start_index = is->subtitle_stream;
2600     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2601         return;
2602     stream_index = start_index;
2603     for(;;) {
2604         if (++stream_index >= is->ic->nb_streams)
2605         {
2606             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2607             {
2608                 stream_index = -1;
2609                 goto the_end;
2610             } else
2611                 stream_index = 0;
2612         }
2613         if (stream_index == start_index)
2614             return;
2615         st = ic->streams[stream_index];
2616         if (st->codec->codec_type == codec_type) {
2617             /* check that parameters are OK */
2618             switch(codec_type) {
2619             case AVMEDIA_TYPE_AUDIO:
2620                 if (st->codec->sample_rate != 0 &&
2621                     st->codec->channels != 0)
2622                     goto the_end;
2623                 break;
2624             case AVMEDIA_TYPE_VIDEO:
2625             case AVMEDIA_TYPE_SUBTITLE:
2626                 goto the_end;
2627             default:
2628                 break;
2629             }
2630         }
2631     }
2632  the_end:
2633     stream_component_close(is, start_index);
2634     stream_component_open(is, stream_index);
2635 }
2636
2637
2638 static void toggle_full_screen(void)
2639 {
2640     is_full_screen = !is_full_screen;
2641     video_open(cur_stream);
2642 }
2643
2644 static void toggle_pause(void)
2645 {
2646     if (cur_stream)
2647         stream_toggle_pause(cur_stream);
2648     step = 0;
2649 }
2650
2651 static void step_to_next_frame(void)
2652 {
2653     if (cur_stream) {
2654         /* if the stream is paused unpause it, then step */
2655         if (cur_stream->paused)
2656             stream_toggle_pause(cur_stream);
2657     }
2658     step = 1;
2659 }
2660
2661 static void toggle_audio_display(void)
2662 {
2663     if (cur_stream) {
2664         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2665         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2666         fill_rectangle(screen,
2667                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2668                     bgcolor);
2669         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2670     }
2671 }
2672
2673 /* handle an event sent by the GUI */
2674 static void event_loop(void)
2675 {
2676     SDL_Event event;
2677     double incr, pos, frac;
2678
2679     for(;;) {
2680         double x;
2681         SDL_WaitEvent(&event);
2682         switch(event.type) {
2683         case SDL_KEYDOWN:
2684             if (exit_on_keydown) {
2685                 do_exit();
2686                 break;
2687             }
2688             switch(event.key.keysym.sym) {
2689             case SDLK_ESCAPE:
2690             case SDLK_q:
2691                 do_exit();
2692                 break;
2693             case SDLK_f:
2694                 toggle_full_screen();
2695                 break;
2696             case SDLK_p:
2697             case SDLK_SPACE:
2698                 toggle_pause();
2699                 break;
2700             case SDLK_s: //S: Step to next frame
2701                 step_to_next_frame();
2702                 break;
2703             case SDLK_a:
2704                 if (cur_stream)
2705                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2706                 break;
2707             case SDLK_v:
2708                 if (cur_stream)
2709                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2710                 break;
2711             case SDLK_t:
2712                 if (cur_stream)
2713                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2714                 break;
2715             case SDLK_w:
2716                 toggle_audio_display();
2717                 break;
2718             case SDLK_LEFT:
2719                 incr = -10.0;
2720                 goto do_seek;
2721             case SDLK_RIGHT:
2722                 incr = 10.0;
2723                 goto do_seek;
2724             case SDLK_UP:
2725                 incr = 60.0;
2726                 goto do_seek;
2727             case SDLK_DOWN:
2728                 incr = -60.0;
2729             do_seek:
2730                 if (cur_stream) {
2731                     if (seek_by_bytes) {
2732                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2733                             pos= cur_stream->video_current_pos;
2734                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2735                             pos= cur_stream->audio_pkt.pos;
2736                         }else
2737                             pos = avio_tell(cur_stream->ic->pb);
2738                         if (cur_stream->ic->bit_rate)
2739                             incr *= cur_stream->ic->bit_rate / 8.0;
2740                         else
2741                             incr *= 180000.0;
2742                         pos += incr;
2743                         stream_seek(cur_stream, pos, incr, 1);
2744                     } else {
2745                         pos = get_master_clock(cur_stream);
2746                         pos += incr;
2747                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2748                     }
2749                 }
2750                 break;
2751             default:
2752                 break;
2753             }
2754             break;
2755         case SDL_MOUSEBUTTONDOWN:
2756             if (exit_on_mousedown) {
2757                 do_exit();
2758                 break;
2759             }
2760         case SDL_MOUSEMOTION:
2761             if(event.type ==SDL_MOUSEBUTTONDOWN){
2762                 x= event.button.x;
2763             }else{
2764                 if(event.motion.state != SDL_PRESSED)
2765                     break;
2766                 x= event.motion.x;
2767             }
2768             if (cur_stream) {
2769                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2770                     uint64_t size=  avio_size(cur_stream->ic->pb);
2771                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2772                 }else{
2773                     int64_t ts;
2774                     int ns, hh, mm, ss;
2775                     int tns, thh, tmm, tss;
2776                     tns = cur_stream->ic->duration/1000000LL;
2777                     thh = tns/3600;
2778                     tmm = (tns%3600)/60;
2779                     tss = (tns%60);
2780                     frac = x/cur_stream->width;
2781                     ns = frac*tns;
2782                     hh = ns/3600;
2783                     mm = (ns%3600)/60;
2784                     ss = (ns%60);
2785                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2786                             hh, mm, ss, thh, tmm, tss);
2787                     ts = frac*cur_stream->ic->duration;
2788                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2789                         ts += cur_stream->ic->start_time;
2790                     stream_seek(cur_stream, ts, 0, 0);
2791                 }
2792             }
2793             break;
2794         case SDL_VIDEORESIZE:
2795             if (cur_stream) {
2796                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2797                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2798                 screen_width = cur_stream->width = event.resize.w;
2799                 screen_height= cur_stream->height= event.resize.h;
2800             }
2801             break;
2802         case SDL_QUIT:
2803         case FF_QUIT_EVENT:
2804             do_exit();
2805             break;
2806         case FF_ALLOC_EVENT:
2807             video_open(event.user.data1);
2808             alloc_picture(event.user.data1);
2809             break;
2810         case FF_REFRESH_EVENT:
2811             video_refresh(event.user.data1);
2812             cur_stream->refresh=0;
2813             break;
2814         default:
2815             break;
2816         }
2817     }
2818 }
2819
2820 static void opt_frame_size(const char *arg)
2821 {
2822     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2823         fprintf(stderr, "Incorrect frame size\n");
2824         exit(1);
2825     }
2826     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2827         fprintf(stderr, "Frame size must be a multiple of 2\n");
2828         exit(1);
2829     }
2830 }
2831
2832 static int opt_width(const char *opt, const char *arg)
2833 {
2834     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2835     return 0;
2836 }
2837
2838 static int opt_height(const char *opt, const char *arg)
2839 {
2840     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2841     return 0;
2842 }
2843
2844 static void opt_format(const char *arg)
2845 {
2846     file_iformat = av_find_input_format(arg);
2847     if (!file_iformat) {
2848         fprintf(stderr, "Unknown input format: %s\n", arg);
2849         exit(1);
2850     }
2851 }
2852
2853 static void opt_frame_pix_fmt(const char *arg)
2854 {
2855     frame_pix_fmt = av_get_pix_fmt(arg);
2856 }
2857
2858 static int opt_sync(const char *opt, const char *arg)
2859 {
2860     if (!strcmp(arg, "audio"))
2861         av_sync_type = AV_SYNC_AUDIO_MASTER;
2862     else if (!strcmp(arg, "video"))
2863         av_sync_type = AV_SYNC_VIDEO_MASTER;
2864     else if (!strcmp(arg, "ext"))
2865         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2866     else {
2867         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2868         exit(1);
2869     }
2870     return 0;
2871 }
2872
2873 static int opt_seek(const char *opt, const char *arg)
2874 {
2875     start_time = parse_time_or_die(opt, arg, 1);
2876     return 0;
2877 }
2878
2879 static int opt_duration(const char *opt, const char *arg)
2880 {
2881     duration = parse_time_or_die(opt, arg, 1);
2882     return 0;
2883 }
2884
2885 static int opt_debug(const char *opt, const char *arg)
2886 {
2887     av_log_set_level(99);
2888     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2889     return 0;
2890 }
2891
2892 static int opt_vismv(const char *opt, const char *arg)
2893 {
2894     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2895     return 0;
2896 }
2897
2898 static int opt_thread_count(const char *opt, const char *arg)
2899 {
2900     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2901 #if !HAVE_THREADS
2902     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2903 #endif
2904     return 0;
2905 }
2906
2907 static int opt_show_mode(const char *opt, const char *arg)
2908 {
2909     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2910                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2911                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2912                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2913     return 0;
2914 }
2915
2916 static const OptionDef options[] = {
2917 #include "cmdutils_common_opts.h"
2918     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2919     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2920     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2921     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2922     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2923     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2924     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2925     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2926     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2927     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2928     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2929     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2930     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2931     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2932     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2933     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2934     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2935     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2936     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2937     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2938     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2939     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2940     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2941     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2942     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2943     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2944     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2945     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2946     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2947     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2948     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2949     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2950     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2951     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2952     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2953     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2954     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2955 #if CONFIG_AVFILTER
2956     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2957 #endif
2958     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2959     { "showmode", HAS_ARG | OPT_FUNC2, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2960     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2961     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
2962     { NULL, },
2963 };
2964
2965 static void show_usage(void)
2966 {
2967     printf("Simple media player\n");
2968     printf("usage: ffplay [options] input_file\n");
2969     printf("\n");
2970 }
2971
2972 static void show_help(void)
2973 {
2974     av_log_set_callback(log_callback_help);
2975     show_usage();
2976     show_help_options(options, "Main options:\n",
2977                       OPT_EXPERT, 0);
2978     show_help_options(options, "\nAdvanced options:\n",
2979                       OPT_EXPERT, OPT_EXPERT);
2980     printf("\n");
2981     av_opt_show2(avcodec_opts[0], NULL,
2982                  AV_OPT_FLAG_DECODING_PARAM, 0);
2983     printf("\n");
2984     av_opt_show2(avformat_opts, NULL,
2985                  AV_OPT_FLAG_DECODING_PARAM, 0);
2986 #if !CONFIG_AVFILTER
2987     printf("\n");
2988     av_opt_show2(sws_opts, NULL,
2989                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2990 #endif
2991     printf("\nWhile playing:\n"
2992            "q, ESC              quit\n"
2993            "f                   toggle full screen\n"
2994            "p, SPC              pause\n"
2995            "a                   cycle audio channel\n"
2996            "v                   cycle video channel\n"
2997            "t                   cycle subtitle channel\n"
2998            "w                   show audio waves\n"
2999            "s                   activate frame-step mode\n"
3000            "left/right          seek backward/forward 10 seconds\n"
3001            "down/up             seek backward/forward 1 minute\n"
3002            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3003            );
3004 }
3005
3006 static void opt_input_file(const char *filename)
3007 {
3008     if (input_filename) {
3009         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3010                 filename, input_filename);
3011         exit(1);
3012     }
3013     if (!strcmp(filename, "-"))
3014         filename = "pipe:";
3015     input_filename = filename;
3016 }
3017
3018 /* Called from the main */
3019 int main(int argc, char **argv)
3020 {
3021     int flags;
3022
3023     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3024
3025     /* register all codecs, demux and protocols */
3026     avcodec_register_all();
3027 #if CONFIG_AVDEVICE
3028     avdevice_register_all();
3029 #endif
3030 #if CONFIG_AVFILTER
3031     avfilter_register_all();
3032 #endif
3033     av_register_all();
3034
3035     init_opts();
3036
3037     show_banner();
3038
3039     parse_options(argc, argv, options, opt_input_file);
3040
3041     if (!input_filename) {
3042         show_usage();
3043         fprintf(stderr, "An input file must be specified\n");
3044         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3045         exit(1);
3046     }
3047
3048     if (display_disable) {
3049         video_disable = 1;
3050     }
3051     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3052 #if !defined(__MINGW32__) && !defined(__APPLE__)
3053     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3054 #endif
3055     if (SDL_Init (flags)) {
3056         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3057         exit(1);
3058     }
3059
3060     if (!display_disable) {
3061 #if HAVE_SDL_VIDEO_SIZE
3062         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3063         fs_screen_width = vi->current_w;
3064         fs_screen_height = vi->current_h;
3065 #endif
3066     }
3067
3068     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3069     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3070     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3071
3072     av_init_packet(&flush_pkt);
3073     flush_pkt.data= "FLUSH";
3074
3075     cur_stream = stream_open(input_filename, file_iformat);
3076
3077     event_loop();
3078
3079     /* never returns */
3080
3081     return 0;
3082 }