]> git.sesse.net Git - ffmpeg/blob - ffplay.c
avfiltergraph: change the syntax of avfilter_graph_parse()
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #include "cmdutils.h"
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "ffplay";
55 const int program_birth_year = 2003;
56
57 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
58 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
59 #define MIN_FRAMES 5
60
61 /* SDL audio buffer size, in samples. Should be small to have precise
62    A/V sync as SDL does not have hardware buffer fullness info. */
63 #define SDL_AUDIO_BUFFER_SIZE 1024
64
65 /* no AV sync correction is done if below the AV sync threshold */
66 #define AV_SYNC_THRESHOLD 0.01
67 /* no AV correction is done if too big error */
68 #define AV_NOSYNC_THRESHOLD 10.0
69
70 #define FRAME_SKIP_FACTOR 0.05
71
72 /* maximum audio speed change to get correct sync */
73 #define SAMPLE_CORRECTION_PERCENT_MAX 10
74
75 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
76 #define AUDIO_DIFF_AVG_NB   20
77
78 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
79 #define SAMPLE_ARRAY_SIZE (2*65536)
80
81 static int sws_flags = SWS_BICUBIC;
82
83 typedef struct PacketQueue {
84     AVPacketList *first_pkt, *last_pkt;
85     int nb_packets;
86     int size;
87     int abort_request;
88     SDL_mutex *mutex;
89     SDL_cond *cond;
90 } PacketQueue;
91
92 #define VIDEO_PICTURE_QUEUE_SIZE 2
93 #define SUBPICTURE_QUEUE_SIZE 4
94
95 typedef struct VideoPicture {
96     double pts;                                  ///<presentation time stamp for this picture
97     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
98     int64_t pos;                                 ///<byte position in file
99     SDL_Overlay *bmp;
100     int width, height; /* source height & width */
101     int allocated;
102     enum PixelFormat pix_fmt;
103
104 #if CONFIG_AVFILTER
105     AVFilterBufferRef *picref;
106 #endif
107 } VideoPicture;
108
109 typedef struct SubPicture {
110     double pts; /* presentation time stamp for this picture */
111     AVSubtitle sub;
112 } SubPicture;
113
114 enum {
115     AV_SYNC_AUDIO_MASTER, /* default choice */
116     AV_SYNC_VIDEO_MASTER,
117     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
118 };
119
120 typedef struct VideoState {
121     SDL_Thread *read_tid;
122     SDL_Thread *video_tid;
123     SDL_Thread *refresh_tid;
124     AVInputFormat *iformat;
125     int no_background;
126     int abort_request;
127     int paused;
128     int last_paused;
129     int seek_req;
130     int seek_flags;
131     int64_t seek_pos;
132     int64_t seek_rel;
133     int read_pause_return;
134     AVFormatContext *ic;
135
136     int audio_stream;
137
138     int av_sync_type;
139     double external_clock; /* external clock base */
140     int64_t external_clock_time;
141
142     double audio_clock;
143     double audio_diff_cum; /* used for AV difference average computation */
144     double audio_diff_avg_coef;
145     double audio_diff_threshold;
146     int audio_diff_avg_count;
147     AVStream *audio_st;
148     PacketQueue audioq;
149     int audio_hw_buf_size;
150     /* samples output by the codec. we reserve more space for avsync
151        compensation */
152     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
153     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
154     uint8_t *audio_buf;
155     unsigned int audio_buf_size; /* in bytes */
156     int audio_buf_index; /* in bytes */
157     AVPacket audio_pkt_temp;
158     AVPacket audio_pkt;
159     enum AVSampleFormat audio_src_fmt;
160     AVAudioConvert *reformat_ctx;
161
162     enum ShowMode {
163         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
164     } show_mode;
165     int16_t sample_array[SAMPLE_ARRAY_SIZE];
166     int sample_array_index;
167     int last_i_start;
168     RDFTContext *rdft;
169     int rdft_bits;
170     FFTSample *rdft_data;
171     int xpos;
172
173     SDL_Thread *subtitle_tid;
174     int subtitle_stream;
175     int subtitle_stream_changed;
176     AVStream *subtitle_st;
177     PacketQueue subtitleq;
178     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
179     int subpq_size, subpq_rindex, subpq_windex;
180     SDL_mutex *subpq_mutex;
181     SDL_cond *subpq_cond;
182
183     double frame_timer;
184     double frame_last_pts;
185     double frame_last_delay;
186     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
187     int video_stream;
188     AVStream *video_st;
189     PacketQueue videoq;
190     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
191     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
192     int64_t video_current_pos;                   ///<current displayed file pos
193     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
194     int pictq_size, pictq_rindex, pictq_windex;
195     SDL_mutex *pictq_mutex;
196     SDL_cond *pictq_cond;
197 #if !CONFIG_AVFILTER
198     struct SwsContext *img_convert_ctx;
199 #endif
200
201     char filename[1024];
202     int width, height, xleft, ytop;
203
204 #if CONFIG_AVFILTER
205     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
206 #endif
207
208     float skip_frames;
209     float skip_frames_index;
210     int refresh;
211 } VideoState;
212
213 static void show_help(void);
214
215 /* options specified by the user */
216 static AVInputFormat *file_iformat;
217 static const char *input_filename;
218 static const char *window_title;
219 static int fs_screen_width;
220 static int fs_screen_height;
221 static int screen_width = 0;
222 static int screen_height = 0;
223 static int frame_width = 0;
224 static int frame_height = 0;
225 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
226 static int audio_disable;
227 static int video_disable;
228 static int wanted_stream[AVMEDIA_TYPE_NB]={
229     [AVMEDIA_TYPE_AUDIO]=-1,
230     [AVMEDIA_TYPE_VIDEO]=-1,
231     [AVMEDIA_TYPE_SUBTITLE]=-1,
232 };
233 static int seek_by_bytes=-1;
234 static int display_disable;
235 static int show_status = 1;
236 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
237 static int64_t start_time = AV_NOPTS_VALUE;
238 static int64_t duration = AV_NOPTS_VALUE;
239 static int step = 0;
240 static int thread_count = 1;
241 static int workaround_bugs = 1;
242 static int fast = 0;
243 static int genpts = 0;
244 static int lowres = 0;
245 static int idct = FF_IDCT_AUTO;
246 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
247 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
248 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
249 static int error_recognition = FF_ER_CAREFUL;
250 static int error_concealment = 3;
251 static int decoder_reorder_pts= -1;
252 static int autoexit;
253 static int exit_on_keydown;
254 static int exit_on_mousedown;
255 static int loop=1;
256 static int framedrop=1;
257 static enum ShowMode show_mode = SHOW_MODE_NONE;
258
259 static int rdftspeed=20;
260 #if CONFIG_AVFILTER
261 static char *vfilters = NULL;
262 #endif
263
264 /* current context */
265 static int is_full_screen;
266 static VideoState *cur_stream;
267 static int64_t audio_callback_time;
268
269 static AVPacket flush_pkt;
270
271 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
272 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
273 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
274
275 static SDL_Surface *screen;
276
277 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
278 {
279     AVPacketList *pkt1;
280
281     /* duplicate the packet */
282     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
283         return -1;
284
285     pkt1 = av_malloc(sizeof(AVPacketList));
286     if (!pkt1)
287         return -1;
288     pkt1->pkt = *pkt;
289     pkt1->next = NULL;
290
291
292     SDL_LockMutex(q->mutex);
293
294     if (!q->last_pkt)
295
296         q->first_pkt = pkt1;
297     else
298         q->last_pkt->next = pkt1;
299     q->last_pkt = pkt1;
300     q->nb_packets++;
301     q->size += pkt1->pkt.size + sizeof(*pkt1);
302     /* XXX: should duplicate packet data in DV case */
303     SDL_CondSignal(q->cond);
304
305     SDL_UnlockMutex(q->mutex);
306     return 0;
307 }
308
309 /* packet queue handling */
310 static void packet_queue_init(PacketQueue *q)
311 {
312     memset(q, 0, sizeof(PacketQueue));
313     q->mutex = SDL_CreateMutex();
314     q->cond = SDL_CreateCond();
315     packet_queue_put(q, &flush_pkt);
316 }
317
318 static void packet_queue_flush(PacketQueue *q)
319 {
320     AVPacketList *pkt, *pkt1;
321
322     SDL_LockMutex(q->mutex);
323     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
324         pkt1 = pkt->next;
325         av_free_packet(&pkt->pkt);
326         av_freep(&pkt);
327     }
328     q->last_pkt = NULL;
329     q->first_pkt = NULL;
330     q->nb_packets = 0;
331     q->size = 0;
332     SDL_UnlockMutex(q->mutex);
333 }
334
335 static void packet_queue_end(PacketQueue *q)
336 {
337     packet_queue_flush(q);
338     SDL_DestroyMutex(q->mutex);
339     SDL_DestroyCond(q->cond);
340 }
341
342 static void packet_queue_abort(PacketQueue *q)
343 {
344     SDL_LockMutex(q->mutex);
345
346     q->abort_request = 1;
347
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351 }
352
353 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
354 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
355 {
356     AVPacketList *pkt1;
357     int ret;
358
359     SDL_LockMutex(q->mutex);
360
361     for(;;) {
362         if (q->abort_request) {
363             ret = -1;
364             break;
365         }
366
367         pkt1 = q->first_pkt;
368         if (pkt1) {
369             q->first_pkt = pkt1->next;
370             if (!q->first_pkt)
371                 q->last_pkt = NULL;
372             q->nb_packets--;
373             q->size -= pkt1->pkt.size + sizeof(*pkt1);
374             *pkt = pkt1->pkt;
375             av_free(pkt1);
376             ret = 1;
377             break;
378         } else if (!block) {
379             ret = 0;
380             break;
381         } else {
382             SDL_CondWait(q->cond, q->mutex);
383         }
384     }
385     SDL_UnlockMutex(q->mutex);
386     return ret;
387 }
388
389 static inline void fill_rectangle(SDL_Surface *screen,
390                                   int x, int y, int w, int h, int color)
391 {
392     SDL_Rect rect;
393     rect.x = x;
394     rect.y = y;
395     rect.w = w;
396     rect.h = h;
397     SDL_FillRect(screen, &rect, color);
398 }
399
400 #define ALPHA_BLEND(a, oldp, newp, s)\
401 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
402
403 #define RGBA_IN(r, g, b, a, s)\
404 {\
405     unsigned int v = ((const uint32_t *)(s))[0];\
406     a = (v >> 24) & 0xff;\
407     r = (v >> 16) & 0xff;\
408     g = (v >> 8) & 0xff;\
409     b = v & 0xff;\
410 }
411
412 #define YUVA_IN(y, u, v, a, s, pal)\
413 {\
414     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415     a = (val >> 24) & 0xff;\
416     y = (val >> 16) & 0xff;\
417     u = (val >> 8) & 0xff;\
418     v = val & 0xff;\
419 }
420
421 #define YUVA_OUT(d, y, u, v, a)\
422 {\
423     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
424 }
425
426
427 #define BPP 1
428
429 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
430 {
431     int wrap, wrap3, width2, skip2;
432     int y, u, v, a, u1, v1, a1, w, h;
433     uint8_t *lum, *cb, *cr;
434     const uint8_t *p;
435     const uint32_t *pal;
436     int dstx, dsty, dstw, dsth;
437
438     dstw = av_clip(rect->w, 0, imgw);
439     dsth = av_clip(rect->h, 0, imgh);
440     dstx = av_clip(rect->x, 0, imgw - dstw);
441     dsty = av_clip(rect->y, 0, imgh - dsth);
442     lum = dst->data[0] + dsty * dst->linesize[0];
443     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
445
446     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
447     skip2 = dstx >> 1;
448     wrap = dst->linesize[0];
449     wrap3 = rect->pict.linesize[0];
450     p = rect->pict.data[0];
451     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
452
453     if (dsty & 1) {
454         lum += dstx;
455         cb += skip2;
456         cr += skip2;
457
458         if (dstx & 1) {
459             YUVA_IN(y, u, v, a, p, pal);
460             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463             cb++;
464             cr++;
465             lum++;
466             p += BPP;
467         }
468         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469             YUVA_IN(y, u, v, a, p, pal);
470             u1 = u;
471             v1 = v;
472             a1 = a;
473             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474
475             YUVA_IN(y, u, v, a, p + BPP, pal);
476             u1 += u;
477             v1 += v;
478             a1 += a;
479             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482             cb++;
483             cr++;
484             p += 2 * BPP;
485             lum += 2;
486         }
487         if (w) {
488             YUVA_IN(y, u, v, a, p, pal);
489             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
492             p++;
493             lum++;
494         }
495         p += wrap3 - dstw * BPP;
496         lum += wrap - dstw - dstx;
497         cb += dst->linesize[1] - width2 - skip2;
498         cr += dst->linesize[2] - width2 - skip2;
499     }
500     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             u1 = u;
508             v1 = v;
509             a1 = a;
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             p += wrap3;
512             lum += wrap;
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 += u;
515             v1 += v;
516             a1 += a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
519             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
520             cb++;
521             cr++;
522             p += -wrap3 + BPP;
523             lum += -wrap + 1;
524         }
525         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
526             YUVA_IN(y, u, v, a, p, pal);
527             u1 = u;
528             v1 = v;
529             a1 = a;
530             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531
532             YUVA_IN(y, u, v, a, p + BPP, pal);
533             u1 += u;
534             v1 += v;
535             a1 += a;
536             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
537             p += wrap3;
538             lum += wrap;
539
540             YUVA_IN(y, u, v, a, p, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545
546             YUVA_IN(y, u, v, a, p + BPP, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551
552             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
553             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
554
555             cb++;
556             cr++;
557             p += -wrap3 + 2 * BPP;
558             lum += -wrap + 2;
559         }
560         if (w) {
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 = u;
563             v1 = v;
564             a1 = a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             p += wrap3;
567             lum += wrap;
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 += u;
570             v1 += v;
571             a1 += a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
574             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
575             cb++;
576             cr++;
577             p += -wrap3 + BPP;
578             lum += -wrap + 1;
579         }
580         p += wrap3 + (wrap3 - dstw * BPP);
581         lum += wrap + (wrap - dstw - dstx);
582         cb += dst->linesize[1] - width2 - skip2;
583         cr += dst->linesize[2] - width2 - skip2;
584     }
585     /* handle odd height */
586     if (h) {
587         lum += dstx;
588         cb += skip2;
589         cr += skip2;
590
591         if (dstx & 1) {
592             YUVA_IN(y, u, v, a, p, pal);
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
595             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
596             cb++;
597             cr++;
598             lum++;
599             p += BPP;
600         }
601         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
602             YUVA_IN(y, u, v, a, p, pal);
603             u1 = u;
604             v1 = v;
605             a1 = a;
606             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607
608             YUVA_IN(y, u, v, a, p + BPP, pal);
609             u1 += u;
610             v1 += v;
611             a1 += a;
612             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
614             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
615             cb++;
616             cr++;
617             p += 2 * BPP;
618             lum += 2;
619         }
620         if (w) {
621             YUVA_IN(y, u, v, a, p, pal);
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
624             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
625         }
626     }
627 }
628
629 static void free_subpicture(SubPicture *sp)
630 {
631     avsubtitle_free(&sp->sub);
632 }
633
634 static void video_image_display(VideoState *is)
635 {
636     VideoPicture *vp;
637     SubPicture *sp;
638     AVPicture pict;
639     float aspect_ratio;
640     int width, height, x, y;
641     SDL_Rect rect;
642     int i;
643
644     vp = &is->pictq[is->pictq_rindex];
645     if (vp->bmp) {
646 #if CONFIG_AVFILTER
647          if (vp->picref->video->sample_aspect_ratio.num == 0)
648              aspect_ratio = 0;
649          else
650              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
651 #else
652
653         /* XXX: use variable in the frame */
654         if (is->video_st->sample_aspect_ratio.num)
655             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
656         else if (is->video_st->codec->sample_aspect_ratio.num)
657             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
658         else
659             aspect_ratio = 0;
660 #endif
661         if (aspect_ratio <= 0.0)
662             aspect_ratio = 1.0;
663         aspect_ratio *= (float)vp->width / (float)vp->height;
664
665         if (is->subtitle_st) {
666             if (is->subpq_size > 0) {
667                 sp = &is->subpq[is->subpq_rindex];
668
669                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
670                     SDL_LockYUVOverlay (vp->bmp);
671
672                     pict.data[0] = vp->bmp->pixels[0];
673                     pict.data[1] = vp->bmp->pixels[2];
674                     pict.data[2] = vp->bmp->pixels[1];
675
676                     pict.linesize[0] = vp->bmp->pitches[0];
677                     pict.linesize[1] = vp->bmp->pitches[2];
678                     pict.linesize[2] = vp->bmp->pitches[1];
679
680                     for (i = 0; i < sp->sub.num_rects; i++)
681                         blend_subrect(&pict, sp->sub.rects[i],
682                                       vp->bmp->w, vp->bmp->h);
683
684                     SDL_UnlockYUVOverlay (vp->bmp);
685                 }
686             }
687         }
688
689
690         /* XXX: we suppose the screen has a 1.0 pixel ratio */
691         height = is->height;
692         width = ((int)rint(height * aspect_ratio)) & ~1;
693         if (width > is->width) {
694             width = is->width;
695             height = ((int)rint(width / aspect_ratio)) & ~1;
696         }
697         x = (is->width - width) / 2;
698         y = (is->height - height) / 2;
699         is->no_background = 0;
700         rect.x = is->xleft + x;
701         rect.y = is->ytop  + y;
702         rect.w = FFMAX(width,  1);
703         rect.h = FFMAX(height, 1);
704         SDL_DisplayYUVOverlay(vp->bmp, &rect);
705     }
706 }
707
708 /* get the current audio output buffer size, in samples. With SDL, we
709    cannot have a precise information */
710 static int audio_write_get_buf_size(VideoState *is)
711 {
712     return is->audio_buf_size - is->audio_buf_index;
713 }
714
715 static inline int compute_mod(int a, int b)
716 {
717     return a < 0 ? a%b + b : a%b;
718 }
719
720 static void video_audio_display(VideoState *s)
721 {
722     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
723     int ch, channels, h, h2, bgcolor, fgcolor;
724     int16_t time_diff;
725     int rdft_bits, nb_freq;
726
727     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
728         ;
729     nb_freq= 1<<(rdft_bits-1);
730
731     /* compute display index : center on currently output samples */
732     channels = s->audio_st->codec->channels;
733     nb_display_channels = channels;
734     if (!s->paused) {
735         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
736         n = 2 * channels;
737         delay = audio_write_get_buf_size(s);
738         delay /= n;
739
740         /* to be more precise, we take into account the time spent since
741            the last buffer computation */
742         if (audio_callback_time) {
743             time_diff = av_gettime() - audio_callback_time;
744             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
745         }
746
747         delay += 2*data_used;
748         if (delay < data_used)
749             delay = data_used;
750
751         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
752         if (s->show_mode == SHOW_MODE_WAVES) {
753             h= INT_MIN;
754             for(i=0; i<1000; i+=channels){
755                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
756                 int a= s->sample_array[idx];
757                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
758                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
759                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
760                 int score= a-d;
761                 if(h<score && (b^c)<0){
762                     h= score;
763                     i_start= idx;
764                 }
765             }
766         }
767
768         s->last_i_start = i_start;
769     } else {
770         i_start = s->last_i_start;
771     }
772
773     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
774     if (s->show_mode == SHOW_MODE_WAVES) {
775         fill_rectangle(screen,
776                        s->xleft, s->ytop, s->width, s->height,
777                        bgcolor);
778
779         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
780
781         /* total height for one channel */
782         h = s->height / nb_display_channels;
783         /* graph height / 2 */
784         h2 = (h * 9) / 20;
785         for(ch = 0;ch < nb_display_channels; ch++) {
786             i = i_start + ch;
787             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
788             for(x = 0; x < s->width; x++) {
789                 y = (s->sample_array[i] * h2) >> 15;
790                 if (y < 0) {
791                     y = -y;
792                     ys = y1 - y;
793                 } else {
794                     ys = y1;
795                 }
796                 fill_rectangle(screen,
797                                s->xleft + x, ys, 1, y,
798                                fgcolor);
799                 i += channels;
800                 if (i >= SAMPLE_ARRAY_SIZE)
801                     i -= SAMPLE_ARRAY_SIZE;
802             }
803         }
804
805         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
806
807         for(ch = 1;ch < nb_display_channels; ch++) {
808             y = s->ytop + ch * h;
809             fill_rectangle(screen,
810                            s->xleft, y, s->width, 1,
811                            fgcolor);
812         }
813         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
814     }else{
815         nb_display_channels= FFMIN(nb_display_channels, 2);
816         if(rdft_bits != s->rdft_bits){
817             av_rdft_end(s->rdft);
818             av_free(s->rdft_data);
819             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
820             s->rdft_bits= rdft_bits;
821             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
822         }
823         {
824             FFTSample *data[2];
825             for(ch = 0;ch < nb_display_channels; ch++) {
826                 data[ch] = s->rdft_data + 2*nb_freq*ch;
827                 i = i_start + ch;
828                 for(x = 0; x < 2*nb_freq; x++) {
829                     double w= (x-nb_freq)*(1.0/nb_freq);
830                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
831                     i += channels;
832                     if (i >= SAMPLE_ARRAY_SIZE)
833                         i -= SAMPLE_ARRAY_SIZE;
834                 }
835                 av_rdft_calc(s->rdft, data[ch]);
836             }
837             //least efficient way to do this, we should of course directly access it but its more than fast enough
838             for(y=0; y<s->height; y++){
839                 double w= 1/sqrt(nb_freq);
840                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
841                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
842                        + data[1][2*y+1]*data[1][2*y+1])) : a;
843                 a= FFMIN(a,255);
844                 b= FFMIN(b,255);
845                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
846
847                 fill_rectangle(screen,
848                             s->xpos, s->height-y, 1, 1,
849                             fgcolor);
850             }
851         }
852         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
853         s->xpos++;
854         if(s->xpos >= s->width)
855             s->xpos= s->xleft;
856     }
857 }
858
859 static int video_open(VideoState *is){
860     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
861     int w,h;
862
863     if(is_full_screen) flags |= SDL_FULLSCREEN;
864     else               flags |= SDL_RESIZABLE;
865
866     if (is_full_screen && fs_screen_width) {
867         w = fs_screen_width;
868         h = fs_screen_height;
869     } else if(!is_full_screen && screen_width){
870         w = screen_width;
871         h = screen_height;
872 #if CONFIG_AVFILTER
873     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
874         w = is->out_video_filter->inputs[0]->w;
875         h = is->out_video_filter->inputs[0]->h;
876 #else
877     }else if (is->video_st && is->video_st->codec->width){
878         w = is->video_st->codec->width;
879         h = is->video_st->codec->height;
880 #endif
881     } else {
882         w = 640;
883         h = 480;
884     }
885     if(screen && is->width == screen->w && screen->w == w
886        && is->height== screen->h && screen->h == h)
887         return 0;
888
889 #ifndef __APPLE__
890     screen = SDL_SetVideoMode(w, h, 0, flags);
891 #else
892     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
893     screen = SDL_SetVideoMode(w, h, 24, flags);
894 #endif
895     if (!screen) {
896         fprintf(stderr, "SDL: could not set video mode - exiting\n");
897         return -1;
898     }
899     if (!window_title)
900         window_title = input_filename;
901     SDL_WM_SetCaption(window_title, window_title);
902
903     is->width = screen->w;
904     is->height = screen->h;
905
906     return 0;
907 }
908
909 /* display the current picture, if any */
910 static void video_display(VideoState *is)
911 {
912     if(!screen)
913         video_open(cur_stream);
914     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
915         video_audio_display(is);
916     else if (is->video_st)
917         video_image_display(is);
918 }
919
920 static int refresh_thread(void *opaque)
921 {
922     VideoState *is= opaque;
923     while(!is->abort_request){
924         SDL_Event event;
925         event.type = FF_REFRESH_EVENT;
926         event.user.data1 = opaque;
927         if(!is->refresh){
928             is->refresh=1;
929             SDL_PushEvent(&event);
930         }
931         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
932         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
933     }
934     return 0;
935 }
936
937 /* get the current audio clock value */
938 static double get_audio_clock(VideoState *is)
939 {
940     double pts;
941     int hw_buf_size, bytes_per_sec;
942     pts = is->audio_clock;
943     hw_buf_size = audio_write_get_buf_size(is);
944     bytes_per_sec = 0;
945     if (is->audio_st) {
946         bytes_per_sec = is->audio_st->codec->sample_rate *
947             2 * is->audio_st->codec->channels;
948     }
949     if (bytes_per_sec)
950         pts -= (double)hw_buf_size / bytes_per_sec;
951     return pts;
952 }
953
954 /* get the current video clock value */
955 static double get_video_clock(VideoState *is)
956 {
957     if (is->paused) {
958         return is->video_current_pts;
959     } else {
960         return is->video_current_pts_drift + av_gettime() / 1000000.0;
961     }
962 }
963
964 /* get the current external clock value */
965 static double get_external_clock(VideoState *is)
966 {
967     int64_t ti;
968     ti = av_gettime();
969     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
970 }
971
972 /* get the current master clock value */
973 static double get_master_clock(VideoState *is)
974 {
975     double val;
976
977     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
978         if (is->video_st)
979             val = get_video_clock(is);
980         else
981             val = get_audio_clock(is);
982     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
983         if (is->audio_st)
984             val = get_audio_clock(is);
985         else
986             val = get_video_clock(is);
987     } else {
988         val = get_external_clock(is);
989     }
990     return val;
991 }
992
993 /* seek in the stream */
994 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
995 {
996     if (!is->seek_req) {
997         is->seek_pos = pos;
998         is->seek_rel = rel;
999         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1000         if (seek_by_bytes)
1001             is->seek_flags |= AVSEEK_FLAG_BYTE;
1002         is->seek_req = 1;
1003     }
1004 }
1005
1006 /* pause or resume the video */
1007 static void stream_toggle_pause(VideoState *is)
1008 {
1009     if (is->paused) {
1010         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1011         if(is->read_pause_return != AVERROR(ENOSYS)){
1012             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1013         }
1014         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1015     }
1016     is->paused = !is->paused;
1017 }
1018
1019 static double compute_target_time(double frame_current_pts, VideoState *is)
1020 {
1021     double delay, sync_threshold, diff;
1022
1023     /* compute nominal delay */
1024     delay = frame_current_pts - is->frame_last_pts;
1025     if (delay <= 0 || delay >= 10.0) {
1026         /* if incorrect delay, use previous one */
1027         delay = is->frame_last_delay;
1028     } else {
1029         is->frame_last_delay = delay;
1030     }
1031     is->frame_last_pts = frame_current_pts;
1032
1033     /* update delay to follow master synchronisation source */
1034     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1035          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1036         /* if video is slave, we try to correct big delays by
1037            duplicating or deleting a frame */
1038         diff = get_video_clock(is) - get_master_clock(is);
1039
1040         /* skip or repeat frame. We take into account the
1041            delay to compute the threshold. I still don't know
1042            if it is the best guess */
1043         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1044         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1045             if (diff <= -sync_threshold)
1046                 delay = 0;
1047             else if (diff >= sync_threshold)
1048                 delay = 2 * delay;
1049         }
1050     }
1051     is->frame_timer += delay;
1052
1053     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1054             delay, frame_current_pts, -diff);
1055
1056     return is->frame_timer;
1057 }
1058
1059 /* called to display each frame */
1060 static void video_refresh(void *opaque)
1061 {
1062     VideoState *is = opaque;
1063     VideoPicture *vp;
1064
1065     SubPicture *sp, *sp2;
1066
1067     if (is->video_st) {
1068 retry:
1069         if (is->pictq_size == 0) {
1070             //nothing to do, no picture to display in the que
1071         } else {
1072             double time= av_gettime()/1000000.0;
1073             double next_target;
1074             /* dequeue the picture */
1075             vp = &is->pictq[is->pictq_rindex];
1076
1077             if(time < vp->target_clock)
1078                 return;
1079             /* update current video pts */
1080             is->video_current_pts = vp->pts;
1081             is->video_current_pts_drift = is->video_current_pts - time;
1082             is->video_current_pos = vp->pos;
1083             if(is->pictq_size > 1){
1084                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1085                 assert(nextvp->target_clock >= vp->target_clock);
1086                 next_target= nextvp->target_clock;
1087             }else{
1088                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1089             }
1090             if(framedrop && time > next_target){
1091                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1092                 if(is->pictq_size > 1 || time > next_target + 0.5){
1093                     /* update queue size and signal for next picture */
1094                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1095                         is->pictq_rindex = 0;
1096
1097                     SDL_LockMutex(is->pictq_mutex);
1098                     is->pictq_size--;
1099                     SDL_CondSignal(is->pictq_cond);
1100                     SDL_UnlockMutex(is->pictq_mutex);
1101                     goto retry;
1102                 }
1103             }
1104
1105             if(is->subtitle_st) {
1106                 if (is->subtitle_stream_changed) {
1107                     SDL_LockMutex(is->subpq_mutex);
1108
1109                     while (is->subpq_size) {
1110                         free_subpicture(&is->subpq[is->subpq_rindex]);
1111
1112                         /* update queue size and signal for next picture */
1113                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1114                             is->subpq_rindex = 0;
1115
1116                         is->subpq_size--;
1117                     }
1118                     is->subtitle_stream_changed = 0;
1119
1120                     SDL_CondSignal(is->subpq_cond);
1121                     SDL_UnlockMutex(is->subpq_mutex);
1122                 } else {
1123                     if (is->subpq_size > 0) {
1124                         sp = &is->subpq[is->subpq_rindex];
1125
1126                         if (is->subpq_size > 1)
1127                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1128                         else
1129                             sp2 = NULL;
1130
1131                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1132                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1133                         {
1134                             free_subpicture(sp);
1135
1136                             /* update queue size and signal for next picture */
1137                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1138                                 is->subpq_rindex = 0;
1139
1140                             SDL_LockMutex(is->subpq_mutex);
1141                             is->subpq_size--;
1142                             SDL_CondSignal(is->subpq_cond);
1143                             SDL_UnlockMutex(is->subpq_mutex);
1144                         }
1145                     }
1146                 }
1147             }
1148
1149             /* display picture */
1150             if (!display_disable)
1151                 video_display(is);
1152
1153             /* update queue size and signal for next picture */
1154             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1155                 is->pictq_rindex = 0;
1156
1157             SDL_LockMutex(is->pictq_mutex);
1158             is->pictq_size--;
1159             SDL_CondSignal(is->pictq_cond);
1160             SDL_UnlockMutex(is->pictq_mutex);
1161         }
1162     } else if (is->audio_st) {
1163         /* draw the next audio frame */
1164
1165         /* if only audio stream, then display the audio bars (better
1166            than nothing, just to test the implementation */
1167
1168         /* display picture */
1169         if (!display_disable)
1170             video_display(is);
1171     }
1172     if (show_status) {
1173         static int64_t last_time;
1174         int64_t cur_time;
1175         int aqsize, vqsize, sqsize;
1176         double av_diff;
1177
1178         cur_time = av_gettime();
1179         if (!last_time || (cur_time - last_time) >= 30000) {
1180             aqsize = 0;
1181             vqsize = 0;
1182             sqsize = 0;
1183             if (is->audio_st)
1184                 aqsize = is->audioq.size;
1185             if (is->video_st)
1186                 vqsize = is->videoq.size;
1187             if (is->subtitle_st)
1188                 sqsize = is->subtitleq.size;
1189             av_diff = 0;
1190             if (is->audio_st && is->video_st)
1191                 av_diff = get_audio_clock(is) - get_video_clock(is);
1192             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1193                    get_master_clock(is),
1194                    av_diff,
1195                    FFMAX(is->skip_frames-1, 0),
1196                    aqsize / 1024,
1197                    vqsize / 1024,
1198                    sqsize,
1199                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1200                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1201             fflush(stdout);
1202             last_time = cur_time;
1203         }
1204     }
1205 }
1206
1207 static void stream_close(VideoState *is)
1208 {
1209     VideoPicture *vp;
1210     int i;
1211     /* XXX: use a special url_shutdown call to abort parse cleanly */
1212     is->abort_request = 1;
1213     SDL_WaitThread(is->read_tid, NULL);
1214     SDL_WaitThread(is->refresh_tid, NULL);
1215
1216     /* free all pictures */
1217     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1218         vp = &is->pictq[i];
1219 #if CONFIG_AVFILTER
1220         if (vp->picref) {
1221             avfilter_unref_buffer(vp->picref);
1222             vp->picref = NULL;
1223         }
1224 #endif
1225         if (vp->bmp) {
1226             SDL_FreeYUVOverlay(vp->bmp);
1227             vp->bmp = NULL;
1228         }
1229     }
1230     SDL_DestroyMutex(is->pictq_mutex);
1231     SDL_DestroyCond(is->pictq_cond);
1232     SDL_DestroyMutex(is->subpq_mutex);
1233     SDL_DestroyCond(is->subpq_cond);
1234 #if !CONFIG_AVFILTER
1235     if (is->img_convert_ctx)
1236         sws_freeContext(is->img_convert_ctx);
1237 #endif
1238     av_free(is);
1239 }
1240
1241 static void do_exit(void)
1242 {
1243     if (cur_stream) {
1244         stream_close(cur_stream);
1245         cur_stream = NULL;
1246     }
1247     uninit_opts();
1248 #if CONFIG_AVFILTER
1249     avfilter_uninit();
1250 #endif
1251     if (show_status)
1252         printf("\n");
1253     SDL_Quit();
1254     av_log(NULL, AV_LOG_QUIET, "");
1255     exit(0);
1256 }
1257
1258 /* allocate a picture (needs to do that in main thread to avoid
1259    potential locking problems */
1260 static void alloc_picture(void *opaque)
1261 {
1262     VideoState *is = opaque;
1263     VideoPicture *vp;
1264
1265     vp = &is->pictq[is->pictq_windex];
1266
1267     if (vp->bmp)
1268         SDL_FreeYUVOverlay(vp->bmp);
1269
1270 #if CONFIG_AVFILTER
1271     if (vp->picref)
1272         avfilter_unref_buffer(vp->picref);
1273     vp->picref = NULL;
1274
1275     vp->width   = is->out_video_filter->inputs[0]->w;
1276     vp->height  = is->out_video_filter->inputs[0]->h;
1277     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1278 #else
1279     vp->width   = is->video_st->codec->width;
1280     vp->height  = is->video_st->codec->height;
1281     vp->pix_fmt = is->video_st->codec->pix_fmt;
1282 #endif
1283
1284     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1285                                    SDL_YV12_OVERLAY,
1286                                    screen);
1287     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1288         /* SDL allocates a buffer smaller than requested if the video
1289          * overlay hardware is unable to support the requested size. */
1290         fprintf(stderr, "Error: the video system does not support an image\n"
1291                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1292                         "to reduce the image size.\n", vp->width, vp->height );
1293         do_exit();
1294     }
1295
1296     SDL_LockMutex(is->pictq_mutex);
1297     vp->allocated = 1;
1298     SDL_CondSignal(is->pictq_cond);
1299     SDL_UnlockMutex(is->pictq_mutex);
1300 }
1301
1302 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1303 {
1304     VideoPicture *vp;
1305     double frame_delay, pts = pts1;
1306
1307     /* compute the exact PTS for the picture if it is omitted in the stream
1308      * pts1 is the dts of the pkt / pts of the frame */
1309     if (pts != 0) {
1310         /* update video clock with pts, if present */
1311         is->video_clock = pts;
1312     } else {
1313         pts = is->video_clock;
1314     }
1315     /* update video clock for next frame */
1316     frame_delay = av_q2d(is->video_st->codec->time_base);
1317     /* for MPEG2, the frame can be repeated, so we update the
1318        clock accordingly */
1319     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1320     is->video_clock += frame_delay;
1321
1322 #if defined(DEBUG_SYNC) && 0
1323     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1324            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1325 #endif
1326
1327     /* wait until we have space to put a new picture */
1328     SDL_LockMutex(is->pictq_mutex);
1329
1330     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1331         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1332
1333     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1334            !is->videoq.abort_request) {
1335         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1336     }
1337     SDL_UnlockMutex(is->pictq_mutex);
1338
1339     if (is->videoq.abort_request)
1340         return -1;
1341
1342     vp = &is->pictq[is->pictq_windex];
1343
1344     /* alloc or resize hardware picture buffer */
1345     if (!vp->bmp ||
1346 #if CONFIG_AVFILTER
1347         vp->width  != is->out_video_filter->inputs[0]->w ||
1348         vp->height != is->out_video_filter->inputs[0]->h) {
1349 #else
1350         vp->width != is->video_st->codec->width ||
1351         vp->height != is->video_st->codec->height) {
1352 #endif
1353         SDL_Event event;
1354
1355         vp->allocated = 0;
1356
1357         /* the allocation must be done in the main thread to avoid
1358            locking problems */
1359         event.type = FF_ALLOC_EVENT;
1360         event.user.data1 = is;
1361         SDL_PushEvent(&event);
1362
1363         /* wait until the picture is allocated */
1364         SDL_LockMutex(is->pictq_mutex);
1365         while (!vp->allocated && !is->videoq.abort_request) {
1366             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367         }
1368         SDL_UnlockMutex(is->pictq_mutex);
1369
1370         if (is->videoq.abort_request)
1371             return -1;
1372     }
1373
1374     /* if the frame is not skipped, then display it */
1375     if (vp->bmp) {
1376         AVPicture pict;
1377 #if CONFIG_AVFILTER
1378         if(vp->picref)
1379             avfilter_unref_buffer(vp->picref);
1380         vp->picref = src_frame->opaque;
1381 #endif
1382
1383         /* get a pointer on the bitmap */
1384         SDL_LockYUVOverlay (vp->bmp);
1385
1386         memset(&pict,0,sizeof(AVPicture));
1387         pict.data[0] = vp->bmp->pixels[0];
1388         pict.data[1] = vp->bmp->pixels[2];
1389         pict.data[2] = vp->bmp->pixels[1];
1390
1391         pict.linesize[0] = vp->bmp->pitches[0];
1392         pict.linesize[1] = vp->bmp->pitches[2];
1393         pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396         //FIXME use direct rendering
1397         av_picture_copy(&pict, (AVPicture *)src_frame,
1398                         vp->pix_fmt, vp->width, vp->height);
1399 #else
1400         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1401         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1402             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1403             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1404         if (is->img_convert_ctx == NULL) {
1405             fprintf(stderr, "Cannot initialize the conversion context\n");
1406             exit(1);
1407         }
1408         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1409                   0, vp->height, pict.data, pict.linesize);
1410 #endif
1411         /* update the bitmap content */
1412         SDL_UnlockYUVOverlay(vp->bmp);
1413
1414         vp->pts = pts;
1415         vp->pos = pos;
1416
1417         /* now we can update the picture count */
1418         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1419             is->pictq_windex = 0;
1420         SDL_LockMutex(is->pictq_mutex);
1421         vp->target_clock= compute_target_time(vp->pts, is);
1422
1423         is->pictq_size++;
1424         SDL_UnlockMutex(is->pictq_mutex);
1425     }
1426     return 0;
1427 }
1428
1429 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1430 {
1431     int len1 av_unused, got_picture, i;
1432
1433     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1434         return -1;
1435
1436     if (pkt->data == flush_pkt.data) {
1437         avcodec_flush_buffers(is->video_st->codec);
1438
1439         SDL_LockMutex(is->pictq_mutex);
1440         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1441         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1442             is->pictq[i].target_clock= 0;
1443         }
1444         while (is->pictq_size && !is->videoq.abort_request) {
1445             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1446         }
1447         is->video_current_pos = -1;
1448         SDL_UnlockMutex(is->pictq_mutex);
1449
1450         is->frame_last_pts = AV_NOPTS_VALUE;
1451         is->frame_last_delay = 0;
1452         is->frame_timer = (double)av_gettime() / 1000000.0;
1453         is->skip_frames = 1;
1454         is->skip_frames_index = 0;
1455         return 0;
1456     }
1457
1458     len1 = avcodec_decode_video2(is->video_st->codec,
1459                                  frame, &got_picture,
1460                                  pkt);
1461
1462     if (got_picture) {
1463         if (decoder_reorder_pts == -1) {
1464             *pts = frame->best_effort_timestamp;
1465         } else if (decoder_reorder_pts) {
1466             *pts = frame->pkt_pts;
1467         } else {
1468             *pts = frame->pkt_dts;
1469         }
1470
1471         if (*pts == AV_NOPTS_VALUE) {
1472             *pts = 0;
1473         }
1474
1475         is->skip_frames_index += 1;
1476         if(is->skip_frames_index >= is->skip_frames){
1477             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1478             return 1;
1479         }
1480
1481     }
1482     return 0;
1483 }
1484
1485 #if CONFIG_AVFILTER
1486 typedef struct {
1487     VideoState *is;
1488     AVFrame *frame;
1489     int use_dr1;
1490 } FilterPriv;
1491
1492 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1493 {
1494     AVFilterContext *ctx = codec->opaque;
1495     AVFilterBufferRef  *ref;
1496     int perms = AV_PERM_WRITE;
1497     int i, w, h, stride[4];
1498     unsigned edge;
1499     int pixel_size;
1500
1501     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1502
1503     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1504         perms |= AV_PERM_NEG_LINESIZES;
1505
1506     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1510     }
1511     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1512
1513     w = codec->width;
1514     h = codec->height;
1515
1516     if(av_image_check_size(w, h, 0, codec))
1517         return -1;
1518
1519     avcodec_align_dimensions2(codec, &w, &h, stride);
1520     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1521     w += edge << 1;
1522     h += edge << 1;
1523
1524     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1525         return -1;
1526
1527     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1528     ref->video->w = codec->width;
1529     ref->video->h = codec->height;
1530     for(i = 0; i < 4; i ++) {
1531         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1532         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1533
1534         if (ref->data[i]) {
1535             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1536         }
1537         pic->data[i]     = ref->data[i];
1538         pic->linesize[i] = ref->linesize[i];
1539     }
1540     pic->opaque = ref;
1541     pic->age    = INT_MAX;
1542     pic->type   = FF_BUFFER_TYPE_USER;
1543     pic->reordered_opaque = codec->reordered_opaque;
1544     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1545     else           pic->pkt_pts = AV_NOPTS_VALUE;
1546     return 0;
1547 }
1548
1549 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1550 {
1551     memset(pic->data, 0, sizeof(pic->data));
1552     avfilter_unref_buffer(pic->opaque);
1553 }
1554
1555 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1556 {
1557     AVFilterBufferRef *ref = pic->opaque;
1558
1559     if (pic->data[0] == NULL) {
1560         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1561         return codec->get_buffer(codec, pic);
1562     }
1563
1564     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1565         (codec->pix_fmt != ref->format)) {
1566         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1567         return -1;
1568     }
1569
1570     pic->reordered_opaque = codec->reordered_opaque;
1571     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1572     else           pic->pkt_pts = AV_NOPTS_VALUE;
1573     return 0;
1574 }
1575
1576 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1577 {
1578     FilterPriv *priv = ctx->priv;
1579     AVCodecContext *codec;
1580     if(!opaque) return -1;
1581
1582     priv->is = opaque;
1583     codec    = priv->is->video_st->codec;
1584     codec->opaque = ctx;
1585     if((codec->codec->capabilities & CODEC_CAP_DR1)
1586     ) {
1587         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1588         priv->use_dr1 = 1;
1589         codec->get_buffer     = input_get_buffer;
1590         codec->release_buffer = input_release_buffer;
1591         codec->reget_buffer   = input_reget_buffer;
1592         codec->thread_safe_callbacks = 1;
1593     }
1594
1595     priv->frame = avcodec_alloc_frame();
1596
1597     return 0;
1598 }
1599
1600 static void input_uninit(AVFilterContext *ctx)
1601 {
1602     FilterPriv *priv = ctx->priv;
1603     av_free(priv->frame);
1604 }
1605
1606 static int input_request_frame(AVFilterLink *link)
1607 {
1608     FilterPriv *priv = link->src->priv;
1609     AVFilterBufferRef *picref;
1610     int64_t pts = 0;
1611     AVPacket pkt;
1612     int ret;
1613
1614     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1615         av_free_packet(&pkt);
1616     if (ret < 0)
1617         return -1;
1618
1619     if(priv->use_dr1 && priv->frame->opaque) {
1620         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1621     } else {
1622         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1623         av_image_copy(picref->data, picref->linesize,
1624                       priv->frame->data, priv->frame->linesize,
1625                       picref->format, link->w, link->h);
1626     }
1627     av_free_packet(&pkt);
1628
1629     avfilter_copy_frame_props(picref, priv->frame);
1630     picref->pts = pts;
1631
1632     avfilter_start_frame(link, picref);
1633     avfilter_draw_slice(link, 0, link->h, 1);
1634     avfilter_end_frame(link);
1635
1636     return 0;
1637 }
1638
1639 static int input_query_formats(AVFilterContext *ctx)
1640 {
1641     FilterPriv *priv = ctx->priv;
1642     enum PixelFormat pix_fmts[] = {
1643         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1644     };
1645
1646     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1647     return 0;
1648 }
1649
1650 static int input_config_props(AVFilterLink *link)
1651 {
1652     FilterPriv *priv  = link->src->priv;
1653     AVCodecContext *c = priv->is->video_st->codec;
1654
1655     link->w = c->width;
1656     link->h = c->height;
1657     link->time_base = priv->is->video_st->time_base;
1658
1659     return 0;
1660 }
1661
1662 static AVFilter input_filter =
1663 {
1664     .name      = "ffplay_input",
1665
1666     .priv_size = sizeof(FilterPriv),
1667
1668     .init      = input_init,
1669     .uninit    = input_uninit,
1670
1671     .query_formats = input_query_formats,
1672
1673     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1674     .outputs   = (AVFilterPad[]) {{ .name = "default",
1675                                     .type = AVMEDIA_TYPE_VIDEO,
1676                                     .request_frame = input_request_frame,
1677                                     .config_props  = input_config_props, },
1678                                   { .name = NULL }},
1679 };
1680
1681 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1682 {
1683     char sws_flags_str[128];
1684     int ret;
1685     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1686     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1687     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1688     graph->scale_sws_opts = av_strdup(sws_flags_str);
1689
1690     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1691                                             NULL, is, graph)) < 0)
1692         goto the_end;
1693     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1694                                             NULL, &ffsink_ctx, graph)) < 0)
1695         goto the_end;
1696
1697     if(vfilters) {
1698         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1699         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1700
1701         outputs->name    = av_strdup("in");
1702         outputs->filter_ctx = filt_src;
1703         outputs->pad_idx = 0;
1704         outputs->next    = NULL;
1705
1706         inputs->name    = av_strdup("out");
1707         inputs->filter_ctx = filt_out;
1708         inputs->pad_idx = 0;
1709         inputs->next    = NULL;
1710
1711         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1712             goto the_end;
1713         av_freep(&vfilters);
1714     } else {
1715         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1716             goto the_end;
1717     }
1718
1719     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1720         goto the_end;
1721
1722     is->out_video_filter = filt_out;
1723 the_end:
1724     return ret;
1725 }
1726
1727 #endif  /* CONFIG_AVFILTER */
1728
1729 static int video_thread(void *arg)
1730 {
1731     VideoState *is = arg;
1732     AVFrame *frame= avcodec_alloc_frame();
1733     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1734     double pts;
1735     int ret;
1736
1737 #if CONFIG_AVFILTER
1738     AVFilterGraph *graph = avfilter_graph_alloc();
1739     AVFilterContext *filt_out = NULL;
1740
1741     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1742         goto the_end;
1743     filt_out = is->out_video_filter;
1744 #endif
1745
1746     for(;;) {
1747 #if !CONFIG_AVFILTER
1748         AVPacket pkt;
1749 #else
1750         AVFilterBufferRef *picref;
1751         AVRational tb;
1752 #endif
1753         while (is->paused && !is->videoq.abort_request)
1754             SDL_Delay(10);
1755 #if CONFIG_AVFILTER
1756         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1757         if (picref) {
1758             pts_int = picref->pts;
1759             pos     = picref->pos;
1760             frame->opaque = picref;
1761         }
1762
1763         if (av_cmp_q(tb, is->video_st->time_base)) {
1764             av_unused int64_t pts1 = pts_int;
1765             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1766             av_dlog(NULL, "video_thread(): "
1767                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1768                     tb.num, tb.den, pts1,
1769                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1770         }
1771 #else
1772         ret = get_video_frame(is, frame, &pts_int, &pkt);
1773         pos = pkt.pos;
1774         av_free_packet(&pkt);
1775 #endif
1776
1777         if (ret < 0) goto the_end;
1778
1779         if (!ret)
1780             continue;
1781
1782         pts = pts_int*av_q2d(is->video_st->time_base);
1783
1784         ret = queue_picture(is, frame, pts, pos);
1785
1786         if (ret < 0)
1787             goto the_end;
1788
1789         if (step)
1790             if (cur_stream)
1791                 stream_toggle_pause(cur_stream);
1792     }
1793  the_end:
1794 #if CONFIG_AVFILTER
1795     avfilter_graph_free(&graph);
1796 #endif
1797     av_free(frame);
1798     return 0;
1799 }
1800
1801 static int subtitle_thread(void *arg)
1802 {
1803     VideoState *is = arg;
1804     SubPicture *sp;
1805     AVPacket pkt1, *pkt = &pkt1;
1806     int len1 av_unused, got_subtitle;
1807     double pts;
1808     int i, j;
1809     int r, g, b, y, u, v, a;
1810
1811     for(;;) {
1812         while (is->paused && !is->subtitleq.abort_request) {
1813             SDL_Delay(10);
1814         }
1815         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1816             break;
1817
1818         if(pkt->data == flush_pkt.data){
1819             avcodec_flush_buffers(is->subtitle_st->codec);
1820             continue;
1821         }
1822         SDL_LockMutex(is->subpq_mutex);
1823         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1824                !is->subtitleq.abort_request) {
1825             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1826         }
1827         SDL_UnlockMutex(is->subpq_mutex);
1828
1829         if (is->subtitleq.abort_request)
1830             goto the_end;
1831
1832         sp = &is->subpq[is->subpq_windex];
1833
1834        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1835            this packet, if any */
1836         pts = 0;
1837         if (pkt->pts != AV_NOPTS_VALUE)
1838             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1839
1840         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1841                                     &sp->sub, &got_subtitle,
1842                                     pkt);
1843         if (got_subtitle && sp->sub.format == 0) {
1844             sp->pts = pts;
1845
1846             for (i = 0; i < sp->sub.num_rects; i++)
1847             {
1848                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1849                 {
1850                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1851                     y = RGB_TO_Y_CCIR(r, g, b);
1852                     u = RGB_TO_U_CCIR(r, g, b, 0);
1853                     v = RGB_TO_V_CCIR(r, g, b, 0);
1854                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1855                 }
1856             }
1857
1858             /* now we can update the picture count */
1859             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1860                 is->subpq_windex = 0;
1861             SDL_LockMutex(is->subpq_mutex);
1862             is->subpq_size++;
1863             SDL_UnlockMutex(is->subpq_mutex);
1864         }
1865         av_free_packet(pkt);
1866     }
1867  the_end:
1868     return 0;
1869 }
1870
1871 /* copy samples for viewing in editor window */
1872 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1873 {
1874     int size, len;
1875
1876     size = samples_size / sizeof(short);
1877     while (size > 0) {
1878         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1879         if (len > size)
1880             len = size;
1881         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1882         samples += len;
1883         is->sample_array_index += len;
1884         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1885             is->sample_array_index = 0;
1886         size -= len;
1887     }
1888 }
1889
1890 /* return the new audio buffer size (samples can be added or deleted
1891    to get better sync if video or external master clock) */
1892 static int synchronize_audio(VideoState *is, short *samples,
1893                              int samples_size1, double pts)
1894 {
1895     int n, samples_size;
1896     double ref_clock;
1897
1898     n = 2 * is->audio_st->codec->channels;
1899     samples_size = samples_size1;
1900
1901     /* if not master, then we try to remove or add samples to correct the clock */
1902     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1903          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1904         double diff, avg_diff;
1905         int wanted_size, min_size, max_size, nb_samples;
1906
1907         ref_clock = get_master_clock(is);
1908         diff = get_audio_clock(is) - ref_clock;
1909
1910         if (diff < AV_NOSYNC_THRESHOLD) {
1911             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1912             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1913                 /* not enough measures to have a correct estimate */
1914                 is->audio_diff_avg_count++;
1915             } else {
1916                 /* estimate the A-V difference */
1917                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1918
1919                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1920                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1921                     nb_samples = samples_size / n;
1922
1923                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1924                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1925                     if (wanted_size < min_size)
1926                         wanted_size = min_size;
1927                     else if (wanted_size > max_size)
1928                         wanted_size = max_size;
1929
1930                     /* add or remove samples to correction the synchro */
1931                     if (wanted_size < samples_size) {
1932                         /* remove samples */
1933                         samples_size = wanted_size;
1934                     } else if (wanted_size > samples_size) {
1935                         uint8_t *samples_end, *q;
1936                         int nb;
1937
1938                         /* add samples */
1939                         nb = (samples_size - wanted_size);
1940                         samples_end = (uint8_t *)samples + samples_size - n;
1941                         q = samples_end + n;
1942                         while (nb > 0) {
1943                             memcpy(q, samples_end, n);
1944                             q += n;
1945                             nb -= n;
1946                         }
1947                         samples_size = wanted_size;
1948                     }
1949                 }
1950 #if 0
1951                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1952                        diff, avg_diff, samples_size - samples_size1,
1953                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1954 #endif
1955             }
1956         } else {
1957             /* too big difference : may be initial PTS errors, so
1958                reset A-V filter */
1959             is->audio_diff_avg_count = 0;
1960             is->audio_diff_cum = 0;
1961         }
1962     }
1963
1964     return samples_size;
1965 }
1966
1967 /* decode one audio frame and returns its uncompressed size */
1968 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1969 {
1970     AVPacket *pkt_temp = &is->audio_pkt_temp;
1971     AVPacket *pkt = &is->audio_pkt;
1972     AVCodecContext *dec= is->audio_st->codec;
1973     int n, len1, data_size;
1974     double pts;
1975
1976     for(;;) {
1977         /* NOTE: the audio packet can contain several frames */
1978         while (pkt_temp->size > 0) {
1979             data_size = sizeof(is->audio_buf1);
1980             len1 = avcodec_decode_audio3(dec,
1981                                         (int16_t *)is->audio_buf1, &data_size,
1982                                         pkt_temp);
1983             if (len1 < 0) {
1984                 /* if error, we skip the frame */
1985                 pkt_temp->size = 0;
1986                 break;
1987             }
1988
1989             pkt_temp->data += len1;
1990             pkt_temp->size -= len1;
1991             if (data_size <= 0)
1992                 continue;
1993
1994             if (dec->sample_fmt != is->audio_src_fmt) {
1995                 if (is->reformat_ctx)
1996                     av_audio_convert_free(is->reformat_ctx);
1997                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
1998                                                          dec->sample_fmt, 1, NULL, 0);
1999                 if (!is->reformat_ctx) {
2000                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2001                         av_get_sample_fmt_name(dec->sample_fmt),
2002                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2003                         break;
2004                 }
2005                 is->audio_src_fmt= dec->sample_fmt;
2006             }
2007
2008             if (is->reformat_ctx) {
2009                 const void *ibuf[6]= {is->audio_buf1};
2010                 void *obuf[6]= {is->audio_buf2};
2011                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2012                 int ostride[6]= {2};
2013                 int len= data_size/istride[0];
2014                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2015                     printf("av_audio_convert() failed\n");
2016                     break;
2017                 }
2018                 is->audio_buf= is->audio_buf2;
2019                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2020                           remove this legacy cruft */
2021                 data_size= len*2;
2022             }else{
2023                 is->audio_buf= is->audio_buf1;
2024             }
2025
2026             /* if no pts, then compute it */
2027             pts = is->audio_clock;
2028             *pts_ptr = pts;
2029             n = 2 * dec->channels;
2030             is->audio_clock += (double)data_size /
2031                 (double)(n * dec->sample_rate);
2032 #ifdef DEBUG
2033             {
2034                 static double last_clock;
2035                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2036                        is->audio_clock - last_clock,
2037                        is->audio_clock, pts);
2038                 last_clock = is->audio_clock;
2039             }
2040 #endif
2041             return data_size;
2042         }
2043
2044         /* free the current packet */
2045         if (pkt->data)
2046             av_free_packet(pkt);
2047
2048         if (is->paused || is->audioq.abort_request) {
2049             return -1;
2050         }
2051
2052         /* read next packet */
2053         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2054             return -1;
2055         if(pkt->data == flush_pkt.data){
2056             avcodec_flush_buffers(dec);
2057             continue;
2058         }
2059
2060         pkt_temp->data = pkt->data;
2061         pkt_temp->size = pkt->size;
2062
2063         /* if update the audio clock with the pts */
2064         if (pkt->pts != AV_NOPTS_VALUE) {
2065             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2066         }
2067     }
2068 }
2069
2070 /* prepare a new audio buffer */
2071 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2072 {
2073     VideoState *is = opaque;
2074     int audio_size, len1;
2075     double pts;
2076
2077     audio_callback_time = av_gettime();
2078
2079     while (len > 0) {
2080         if (is->audio_buf_index >= is->audio_buf_size) {
2081            audio_size = audio_decode_frame(is, &pts);
2082            if (audio_size < 0) {
2083                 /* if error, just output silence */
2084                is->audio_buf = is->audio_buf1;
2085                is->audio_buf_size = 1024;
2086                memset(is->audio_buf, 0, is->audio_buf_size);
2087            } else {
2088                if (is->show_mode != SHOW_MODE_VIDEO)
2089                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2090                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2091                                               pts);
2092                is->audio_buf_size = audio_size;
2093            }
2094            is->audio_buf_index = 0;
2095         }
2096         len1 = is->audio_buf_size - is->audio_buf_index;
2097         if (len1 > len)
2098             len1 = len;
2099         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2100         len -= len1;
2101         stream += len1;
2102         is->audio_buf_index += len1;
2103     }
2104 }
2105
2106 /* open a given stream. Return 0 if OK */
2107 static int stream_component_open(VideoState *is, int stream_index)
2108 {
2109     AVFormatContext *ic = is->ic;
2110     AVCodecContext *avctx;
2111     AVCodec *codec;
2112     SDL_AudioSpec wanted_spec, spec;
2113
2114     if (stream_index < 0 || stream_index >= ic->nb_streams)
2115         return -1;
2116     avctx = ic->streams[stream_index]->codec;
2117
2118     /* prepare audio output */
2119     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2120         if (avctx->channels > 0) {
2121             avctx->request_channels = FFMIN(2, avctx->channels);
2122         } else {
2123             avctx->request_channels = 2;
2124         }
2125     }
2126
2127     codec = avcodec_find_decoder(avctx->codec_id);
2128     if (!codec)
2129         return -1;
2130
2131     avctx->workaround_bugs = workaround_bugs;
2132     avctx->lowres = lowres;
2133     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2134     avctx->idct_algo= idct;
2135     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2136     avctx->skip_frame= skip_frame;
2137     avctx->skip_idct= skip_idct;
2138     avctx->skip_loop_filter= skip_loop_filter;
2139     avctx->error_recognition= error_recognition;
2140     avctx->error_concealment= error_concealment;
2141     avctx->thread_count= thread_count;
2142
2143     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2144
2145     if(codec->capabilities & CODEC_CAP_DR1)
2146         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2147
2148     if (avcodec_open(avctx, codec) < 0)
2149         return -1;
2150
2151     /* prepare audio output */
2152     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2153         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2154             fprintf(stderr, "Invalid sample rate or channel count\n");
2155             return -1;
2156         }
2157         wanted_spec.freq = avctx->sample_rate;
2158         wanted_spec.format = AUDIO_S16SYS;
2159         wanted_spec.channels = avctx->channels;
2160         wanted_spec.silence = 0;
2161         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2162         wanted_spec.callback = sdl_audio_callback;
2163         wanted_spec.userdata = is;
2164         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2165             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2166             return -1;
2167         }
2168         is->audio_hw_buf_size = spec.size;
2169         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2170     }
2171
2172     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2173     switch(avctx->codec_type) {
2174     case AVMEDIA_TYPE_AUDIO:
2175         is->audio_stream = stream_index;
2176         is->audio_st = ic->streams[stream_index];
2177         is->audio_buf_size = 0;
2178         is->audio_buf_index = 0;
2179
2180         /* init averaging filter */
2181         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2182         is->audio_diff_avg_count = 0;
2183         /* since we do not have a precise anough audio fifo fullness,
2184            we correct audio sync only if larger than this threshold */
2185         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2186
2187         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2188         packet_queue_init(&is->audioq);
2189         SDL_PauseAudio(0);
2190         break;
2191     case AVMEDIA_TYPE_VIDEO:
2192         is->video_stream = stream_index;
2193         is->video_st = ic->streams[stream_index];
2194
2195         packet_queue_init(&is->videoq);
2196         is->video_tid = SDL_CreateThread(video_thread, is);
2197         break;
2198     case AVMEDIA_TYPE_SUBTITLE:
2199         is->subtitle_stream = stream_index;
2200         is->subtitle_st = ic->streams[stream_index];
2201         packet_queue_init(&is->subtitleq);
2202
2203         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2204         break;
2205     default:
2206         break;
2207     }
2208     return 0;
2209 }
2210
2211 static void stream_component_close(VideoState *is, int stream_index)
2212 {
2213     AVFormatContext *ic = is->ic;
2214     AVCodecContext *avctx;
2215
2216     if (stream_index < 0 || stream_index >= ic->nb_streams)
2217         return;
2218     avctx = ic->streams[stream_index]->codec;
2219
2220     switch(avctx->codec_type) {
2221     case AVMEDIA_TYPE_AUDIO:
2222         packet_queue_abort(&is->audioq);
2223
2224         SDL_CloseAudio();
2225
2226         packet_queue_end(&is->audioq);
2227         if (is->reformat_ctx)
2228             av_audio_convert_free(is->reformat_ctx);
2229         is->reformat_ctx = NULL;
2230         break;
2231     case AVMEDIA_TYPE_VIDEO:
2232         packet_queue_abort(&is->videoq);
2233
2234         /* note: we also signal this mutex to make sure we deblock the
2235            video thread in all cases */
2236         SDL_LockMutex(is->pictq_mutex);
2237         SDL_CondSignal(is->pictq_cond);
2238         SDL_UnlockMutex(is->pictq_mutex);
2239
2240         SDL_WaitThread(is->video_tid, NULL);
2241
2242         packet_queue_end(&is->videoq);
2243         break;
2244     case AVMEDIA_TYPE_SUBTITLE:
2245         packet_queue_abort(&is->subtitleq);
2246
2247         /* note: we also signal this mutex to make sure we deblock the
2248            video thread in all cases */
2249         SDL_LockMutex(is->subpq_mutex);
2250         is->subtitle_stream_changed = 1;
2251
2252         SDL_CondSignal(is->subpq_cond);
2253         SDL_UnlockMutex(is->subpq_mutex);
2254
2255         SDL_WaitThread(is->subtitle_tid, NULL);
2256
2257         packet_queue_end(&is->subtitleq);
2258         break;
2259     default:
2260         break;
2261     }
2262
2263     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2264     avcodec_close(avctx);
2265     switch(avctx->codec_type) {
2266     case AVMEDIA_TYPE_AUDIO:
2267         is->audio_st = NULL;
2268         is->audio_stream = -1;
2269         break;
2270     case AVMEDIA_TYPE_VIDEO:
2271         is->video_st = NULL;
2272         is->video_stream = -1;
2273         break;
2274     case AVMEDIA_TYPE_SUBTITLE:
2275         is->subtitle_st = NULL;
2276         is->subtitle_stream = -1;
2277         break;
2278     default:
2279         break;
2280     }
2281 }
2282
2283 /* since we have only one decoding thread, we can use a global
2284    variable instead of a thread local variable */
2285 static VideoState *global_video_state;
2286
2287 static int decode_interrupt_cb(void)
2288 {
2289     return (global_video_state && global_video_state->abort_request);
2290 }
2291
2292 /* this thread gets the stream from the disk or the network */
2293 static int read_thread(void *arg)
2294 {
2295     VideoState *is = arg;
2296     AVFormatContext *ic;
2297     int err, i, ret;
2298     int st_index[AVMEDIA_TYPE_NB];
2299     AVPacket pkt1, *pkt = &pkt1;
2300     AVFormatParameters params, *ap = &params;
2301     int eof=0;
2302     int pkt_in_play_range = 0;
2303
2304     ic = avformat_alloc_context();
2305
2306     memset(st_index, -1, sizeof(st_index));
2307     is->video_stream = -1;
2308     is->audio_stream = -1;
2309     is->subtitle_stream = -1;
2310
2311     global_video_state = is;
2312     avio_set_interrupt_cb(decode_interrupt_cb);
2313
2314     memset(ap, 0, sizeof(*ap));
2315
2316     ap->prealloced_context = 1;
2317     ap->width = frame_width;
2318     ap->height= frame_height;
2319     ap->time_base= (AVRational){1, 25};
2320     ap->pix_fmt = frame_pix_fmt;
2321     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2322
2323
2324     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2325     if (err >= 0) {
2326         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2327         err = av_demuxer_open(ic, ap);
2328         if(err < 0){
2329             avformat_free_context(ic);
2330             ic= NULL;
2331         }
2332     }
2333     if (err < 0) {
2334         print_error(is->filename, err);
2335         ret = -1;
2336         goto fail;
2337     }
2338     is->ic = ic;
2339
2340     if(genpts)
2341         ic->flags |= AVFMT_FLAG_GENPTS;
2342
2343     err = av_find_stream_info(ic);
2344     if (err < 0) {
2345         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2346         ret = -1;
2347         goto fail;
2348     }
2349     if(ic->pb)
2350         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2351
2352     if(seek_by_bytes<0)
2353         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2354
2355     /* if seeking requested, we execute it */
2356     if (start_time != AV_NOPTS_VALUE) {
2357         int64_t timestamp;
2358
2359         timestamp = start_time;
2360         /* add the stream start time */
2361         if (ic->start_time != AV_NOPTS_VALUE)
2362             timestamp += ic->start_time;
2363         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2364         if (ret < 0) {
2365             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2366                     is->filename, (double)timestamp / AV_TIME_BASE);
2367         }
2368     }
2369
2370     for (i = 0; i < ic->nb_streams; i++)
2371         ic->streams[i]->discard = AVDISCARD_ALL;
2372     if (!video_disable)
2373         st_index[AVMEDIA_TYPE_VIDEO] =
2374             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2375                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2376     if (!audio_disable)
2377         st_index[AVMEDIA_TYPE_AUDIO] =
2378             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2379                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2380                                 st_index[AVMEDIA_TYPE_VIDEO],
2381                                 NULL, 0);
2382     if (!video_disable)
2383         st_index[AVMEDIA_TYPE_SUBTITLE] =
2384             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2385                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2386                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2387                                  st_index[AVMEDIA_TYPE_AUDIO] :
2388                                  st_index[AVMEDIA_TYPE_VIDEO]),
2389                                 NULL, 0);
2390     if (show_status) {
2391         av_dump_format(ic, 0, is->filename, 0);
2392     }
2393
2394     is->show_mode = show_mode;
2395
2396     /* open the streams */
2397     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2398         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2399     }
2400
2401     ret=-1;
2402     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2403         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2404     }
2405     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2406     if (is->show_mode == SHOW_MODE_NONE)
2407         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2408
2409     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2410         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2411     }
2412
2413     if (is->video_stream < 0 && is->audio_stream < 0) {
2414         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2415         ret = -1;
2416         goto fail;
2417     }
2418
2419     for(;;) {
2420         if (is->abort_request)
2421             break;
2422         if (is->paused != is->last_paused) {
2423             is->last_paused = is->paused;
2424             if (is->paused)
2425                 is->read_pause_return= av_read_pause(ic);
2426             else
2427                 av_read_play(ic);
2428         }
2429 #if CONFIG_RTSP_DEMUXER
2430         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2431             /* wait 10 ms to avoid trying to get another packet */
2432             /* XXX: horrible */
2433             SDL_Delay(10);
2434             continue;
2435         }
2436 #endif
2437         if (is->seek_req) {
2438             int64_t seek_target= is->seek_pos;
2439             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2440             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2441 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2442 //      of the seek_pos/seek_rel variables
2443
2444             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2445             if (ret < 0) {
2446                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2447             }else{
2448                 if (is->audio_stream >= 0) {
2449                     packet_queue_flush(&is->audioq);
2450                     packet_queue_put(&is->audioq, &flush_pkt);
2451                 }
2452                 if (is->subtitle_stream >= 0) {
2453                     packet_queue_flush(&is->subtitleq);
2454                     packet_queue_put(&is->subtitleq, &flush_pkt);
2455                 }
2456                 if (is->video_stream >= 0) {
2457                     packet_queue_flush(&is->videoq);
2458                     packet_queue_put(&is->videoq, &flush_pkt);
2459                 }
2460             }
2461             is->seek_req = 0;
2462             eof= 0;
2463         }
2464
2465         /* if the queue are full, no need to read more */
2466         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2467             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2468                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2469                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2470             /* wait 10 ms */
2471             SDL_Delay(10);
2472             continue;
2473         }
2474         if(eof) {
2475             if(is->video_stream >= 0){
2476                 av_init_packet(pkt);
2477                 pkt->data=NULL;
2478                 pkt->size=0;
2479                 pkt->stream_index= is->video_stream;
2480                 packet_queue_put(&is->videoq, pkt);
2481             }
2482             SDL_Delay(10);
2483             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2484                 if(loop!=1 && (!loop || --loop)){
2485                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2486                 }else if(autoexit){
2487                     ret=AVERROR_EOF;
2488                     goto fail;
2489                 }
2490             }
2491             eof=0;
2492             continue;
2493         }
2494         ret = av_read_frame(ic, pkt);
2495         if (ret < 0) {
2496             if (ret == AVERROR_EOF || url_feof(ic->pb))
2497                 eof=1;
2498             if (ic->pb && ic->pb->error)
2499                 break;
2500             SDL_Delay(100); /* wait for user event */
2501             continue;
2502         }
2503         /* check if packet is in play range specified by user, then queue, otherwise discard */
2504         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2505                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2506                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2507                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2508                 <= ((double)duration/1000000);
2509         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2510             packet_queue_put(&is->audioq, pkt);
2511         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2512             packet_queue_put(&is->videoq, pkt);
2513         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2514             packet_queue_put(&is->subtitleq, pkt);
2515         } else {
2516             av_free_packet(pkt);
2517         }
2518     }
2519     /* wait until the end */
2520     while (!is->abort_request) {
2521         SDL_Delay(100);
2522     }
2523
2524     ret = 0;
2525  fail:
2526     /* disable interrupting */
2527     global_video_state = NULL;
2528
2529     /* close each stream */
2530     if (is->audio_stream >= 0)
2531         stream_component_close(is, is->audio_stream);
2532     if (is->video_stream >= 0)
2533         stream_component_close(is, is->video_stream);
2534     if (is->subtitle_stream >= 0)
2535         stream_component_close(is, is->subtitle_stream);
2536     if (is->ic) {
2537         av_close_input_file(is->ic);
2538         is->ic = NULL; /* safety */
2539     }
2540     avio_set_interrupt_cb(NULL);
2541
2542     if (ret != 0) {
2543         SDL_Event event;
2544
2545         event.type = FF_QUIT_EVENT;
2546         event.user.data1 = is;
2547         SDL_PushEvent(&event);
2548     }
2549     return 0;
2550 }
2551
2552 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2553 {
2554     VideoState *is;
2555
2556     is = av_mallocz(sizeof(VideoState));
2557     if (!is)
2558         return NULL;
2559     av_strlcpy(is->filename, filename, sizeof(is->filename));
2560     is->iformat = iformat;
2561     is->ytop = 0;
2562     is->xleft = 0;
2563
2564     /* start video display */
2565     is->pictq_mutex = SDL_CreateMutex();
2566     is->pictq_cond = SDL_CreateCond();
2567
2568     is->subpq_mutex = SDL_CreateMutex();
2569     is->subpq_cond = SDL_CreateCond();
2570
2571     is->av_sync_type = av_sync_type;
2572     is->read_tid = SDL_CreateThread(read_thread, is);
2573     if (!is->read_tid) {
2574         av_free(is);
2575         return NULL;
2576     }
2577     return is;
2578 }
2579
2580 static void stream_cycle_channel(VideoState *is, int codec_type)
2581 {
2582     AVFormatContext *ic = is->ic;
2583     int start_index, stream_index;
2584     AVStream *st;
2585
2586     if (codec_type == AVMEDIA_TYPE_VIDEO)
2587         start_index = is->video_stream;
2588     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2589         start_index = is->audio_stream;
2590     else
2591         start_index = is->subtitle_stream;
2592     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2593         return;
2594     stream_index = start_index;
2595     for(;;) {
2596         if (++stream_index >= is->ic->nb_streams)
2597         {
2598             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2599             {
2600                 stream_index = -1;
2601                 goto the_end;
2602             } else
2603                 stream_index = 0;
2604         }
2605         if (stream_index == start_index)
2606             return;
2607         st = ic->streams[stream_index];
2608         if (st->codec->codec_type == codec_type) {
2609             /* check that parameters are OK */
2610             switch(codec_type) {
2611             case AVMEDIA_TYPE_AUDIO:
2612                 if (st->codec->sample_rate != 0 &&
2613                     st->codec->channels != 0)
2614                     goto the_end;
2615                 break;
2616             case AVMEDIA_TYPE_VIDEO:
2617             case AVMEDIA_TYPE_SUBTITLE:
2618                 goto the_end;
2619             default:
2620                 break;
2621             }
2622         }
2623     }
2624  the_end:
2625     stream_component_close(is, start_index);
2626     stream_component_open(is, stream_index);
2627 }
2628
2629
2630 static void toggle_full_screen(void)
2631 {
2632     is_full_screen = !is_full_screen;
2633     video_open(cur_stream);
2634 }
2635
2636 static void toggle_pause(void)
2637 {
2638     if (cur_stream)
2639         stream_toggle_pause(cur_stream);
2640     step = 0;
2641 }
2642
2643 static void step_to_next_frame(void)
2644 {
2645     if (cur_stream) {
2646         /* if the stream is paused unpause it, then step */
2647         if (cur_stream->paused)
2648             stream_toggle_pause(cur_stream);
2649     }
2650     step = 1;
2651 }
2652
2653 static void toggle_audio_display(void)
2654 {
2655     if (cur_stream) {
2656         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2657         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2658         fill_rectangle(screen,
2659                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2660                     bgcolor);
2661         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2662     }
2663 }
2664
2665 /* handle an event sent by the GUI */
2666 static void event_loop(void)
2667 {
2668     SDL_Event event;
2669     double incr, pos, frac;
2670
2671     for(;;) {
2672         double x;
2673         SDL_WaitEvent(&event);
2674         switch(event.type) {
2675         case SDL_KEYDOWN:
2676             if (exit_on_keydown) {
2677                 do_exit();
2678                 break;
2679             }
2680             switch(event.key.keysym.sym) {
2681             case SDLK_ESCAPE:
2682             case SDLK_q:
2683                 do_exit();
2684                 break;
2685             case SDLK_f:
2686                 toggle_full_screen();
2687                 break;
2688             case SDLK_p:
2689             case SDLK_SPACE:
2690                 toggle_pause();
2691                 break;
2692             case SDLK_s: //S: Step to next frame
2693                 step_to_next_frame();
2694                 break;
2695             case SDLK_a:
2696                 if (cur_stream)
2697                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2698                 break;
2699             case SDLK_v:
2700                 if (cur_stream)
2701                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2702                 break;
2703             case SDLK_t:
2704                 if (cur_stream)
2705                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2706                 break;
2707             case SDLK_w:
2708                 toggle_audio_display();
2709                 break;
2710             case SDLK_LEFT:
2711                 incr = -10.0;
2712                 goto do_seek;
2713             case SDLK_RIGHT:
2714                 incr = 10.0;
2715                 goto do_seek;
2716             case SDLK_UP:
2717                 incr = 60.0;
2718                 goto do_seek;
2719             case SDLK_DOWN:
2720                 incr = -60.0;
2721             do_seek:
2722                 if (cur_stream) {
2723                     if (seek_by_bytes) {
2724                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2725                             pos= cur_stream->video_current_pos;
2726                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2727                             pos= cur_stream->audio_pkt.pos;
2728                         }else
2729                             pos = avio_tell(cur_stream->ic->pb);
2730                         if (cur_stream->ic->bit_rate)
2731                             incr *= cur_stream->ic->bit_rate / 8.0;
2732                         else
2733                             incr *= 180000.0;
2734                         pos += incr;
2735                         stream_seek(cur_stream, pos, incr, 1);
2736                     } else {
2737                         pos = get_master_clock(cur_stream);
2738                         pos += incr;
2739                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2740                     }
2741                 }
2742                 break;
2743             default:
2744                 break;
2745             }
2746             break;
2747         case SDL_MOUSEBUTTONDOWN:
2748             if (exit_on_mousedown) {
2749                 do_exit();
2750                 break;
2751             }
2752         case SDL_MOUSEMOTION:
2753             if(event.type ==SDL_MOUSEBUTTONDOWN){
2754                 x= event.button.x;
2755             }else{
2756                 if(event.motion.state != SDL_PRESSED)
2757                     break;
2758                 x= event.motion.x;
2759             }
2760             if (cur_stream) {
2761                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2762                     uint64_t size=  avio_size(cur_stream->ic->pb);
2763                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2764                 }else{
2765                     int64_t ts;
2766                     int ns, hh, mm, ss;
2767                     int tns, thh, tmm, tss;
2768                     tns = cur_stream->ic->duration/1000000LL;
2769                     thh = tns/3600;
2770                     tmm = (tns%3600)/60;
2771                     tss = (tns%60);
2772                     frac = x/cur_stream->width;
2773                     ns = frac*tns;
2774                     hh = ns/3600;
2775                     mm = (ns%3600)/60;
2776                     ss = (ns%60);
2777                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2778                             hh, mm, ss, thh, tmm, tss);
2779                     ts = frac*cur_stream->ic->duration;
2780                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2781                         ts += cur_stream->ic->start_time;
2782                     stream_seek(cur_stream, ts, 0, 0);
2783                 }
2784             }
2785             break;
2786         case SDL_VIDEORESIZE:
2787             if (cur_stream) {
2788                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2789                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2790                 screen_width = cur_stream->width = event.resize.w;
2791                 screen_height= cur_stream->height= event.resize.h;
2792             }
2793             break;
2794         case SDL_QUIT:
2795         case FF_QUIT_EVENT:
2796             do_exit();
2797             break;
2798         case FF_ALLOC_EVENT:
2799             video_open(event.user.data1);
2800             alloc_picture(event.user.data1);
2801             break;
2802         case FF_REFRESH_EVENT:
2803             video_refresh(event.user.data1);
2804             cur_stream->refresh=0;
2805             break;
2806         default:
2807             break;
2808         }
2809     }
2810 }
2811
2812 static int opt_frame_size(const char *opt, const char *arg)
2813 {
2814     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2815         fprintf(stderr, "Incorrect frame size\n");
2816         return AVERROR(EINVAL);
2817     }
2818     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2819         fprintf(stderr, "Frame size must be a multiple of 2\n");
2820         return AVERROR(EINVAL);
2821     }
2822     return 0;
2823 }
2824
2825 static int opt_width(const char *opt, const char *arg)
2826 {
2827     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2828     return 0;
2829 }
2830
2831 static int opt_height(const char *opt, const char *arg)
2832 {
2833     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2834     return 0;
2835 }
2836
2837 static int opt_format(const char *opt, const char *arg)
2838 {
2839     file_iformat = av_find_input_format(arg);
2840     if (!file_iformat) {
2841         fprintf(stderr, "Unknown input format: %s\n", arg);
2842         return AVERROR(EINVAL);
2843     }
2844     return 0;
2845 }
2846
2847 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2848 {
2849     frame_pix_fmt = av_get_pix_fmt(arg);
2850     return 0;
2851 }
2852
2853 static int opt_sync(const char *opt, const char *arg)
2854 {
2855     if (!strcmp(arg, "audio"))
2856         av_sync_type = AV_SYNC_AUDIO_MASTER;
2857     else if (!strcmp(arg, "video"))
2858         av_sync_type = AV_SYNC_VIDEO_MASTER;
2859     else if (!strcmp(arg, "ext"))
2860         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2861     else {
2862         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2863         exit(1);
2864     }
2865     return 0;
2866 }
2867
2868 static int opt_seek(const char *opt, const char *arg)
2869 {
2870     start_time = parse_time_or_die(opt, arg, 1);
2871     return 0;
2872 }
2873
2874 static int opt_duration(const char *opt, const char *arg)
2875 {
2876     duration = parse_time_or_die(opt, arg, 1);
2877     return 0;
2878 }
2879
2880 static int opt_thread_count(const char *opt, const char *arg)
2881 {
2882     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2883 #if !HAVE_THREADS
2884     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2885 #endif
2886     return 0;
2887 }
2888
2889 static int opt_show_mode(const char *opt, const char *arg)
2890 {
2891     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2892                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2893                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2894                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2895     return 0;
2896 }
2897
2898 static int opt_input_file(const char *opt, const char *filename)
2899 {
2900     if (input_filename) {
2901         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2902                 filename, input_filename);
2903         exit(1);
2904     }
2905     if (!strcmp(filename, "-"))
2906         filename = "pipe:";
2907     input_filename = filename;
2908     return 0;
2909 }
2910
2911 static const OptionDef options[] = {
2912 #include "cmdutils_common_opts.h"
2913     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2914     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2915     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2916     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2917     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2918     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2919     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2920     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2921     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2922     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2923     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2924     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2925     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2926     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2927     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2928     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2929     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2930     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2931     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2932     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2933     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2934     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2935     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2936     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2937     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2938     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2939     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2940     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2941     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2942     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2943     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2944     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2945     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2946     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2947     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2948 #if CONFIG_AVFILTER
2949     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2950 #endif
2951     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2952     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2953     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2954     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2955     { NULL, },
2956 };
2957
2958 static void show_usage(void)
2959 {
2960     printf("Simple media player\n");
2961     printf("usage: ffplay [options] input_file\n");
2962     printf("\n");
2963 }
2964
2965 static void show_help(void)
2966 {
2967     av_log_set_callback(log_callback_help);
2968     show_usage();
2969     show_help_options(options, "Main options:\n",
2970                       OPT_EXPERT, 0);
2971     show_help_options(options, "\nAdvanced options:\n",
2972                       OPT_EXPERT, OPT_EXPERT);
2973     printf("\n");
2974     av_opt_show2(avcodec_opts[0], NULL,
2975                  AV_OPT_FLAG_DECODING_PARAM, 0);
2976     printf("\n");
2977     av_opt_show2(avformat_opts, NULL,
2978                  AV_OPT_FLAG_DECODING_PARAM, 0);
2979 #if !CONFIG_AVFILTER
2980     printf("\n");
2981     av_opt_show2(sws_opts, NULL,
2982                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2983 #endif
2984     printf("\nWhile playing:\n"
2985            "q, ESC              quit\n"
2986            "f                   toggle full screen\n"
2987            "p, SPC              pause\n"
2988            "a                   cycle audio channel\n"
2989            "v                   cycle video channel\n"
2990            "t                   cycle subtitle channel\n"
2991            "w                   show audio waves\n"
2992            "s                   activate frame-step mode\n"
2993            "left/right          seek backward/forward 10 seconds\n"
2994            "down/up             seek backward/forward 1 minute\n"
2995            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2996            );
2997 }
2998
2999 /* Called from the main */
3000 int main(int argc, char **argv)
3001 {
3002     int flags;
3003
3004     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3005
3006     /* register all codecs, demux and protocols */
3007     avcodec_register_all();
3008 #if CONFIG_AVDEVICE
3009     avdevice_register_all();
3010 #endif
3011 #if CONFIG_AVFILTER
3012     avfilter_register_all();
3013 #endif
3014     av_register_all();
3015
3016     init_opts();
3017
3018     show_banner();
3019
3020     parse_options(argc, argv, options, opt_input_file);
3021
3022     if (!input_filename) {
3023         show_usage();
3024         fprintf(stderr, "An input file must be specified\n");
3025         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3026         exit(1);
3027     }
3028
3029     if (display_disable) {
3030         video_disable = 1;
3031     }
3032     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3033 #if !defined(__MINGW32__) && !defined(__APPLE__)
3034     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3035 #endif
3036     if (SDL_Init (flags)) {
3037         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3038         exit(1);
3039     }
3040
3041     if (!display_disable) {
3042 #if HAVE_SDL_VIDEO_SIZE
3043         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3044         fs_screen_width = vi->current_w;
3045         fs_screen_height = vi->current_h;
3046 #endif
3047     }
3048
3049     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3050     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3051     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3052
3053     av_init_packet(&flush_pkt);
3054     flush_pkt.data= "FLUSH";
3055
3056     cur_stream = stream_open(input_filename, file_iformat);
3057
3058     event_loop();
3059
3060     /* never returns */
3061
3062     return 0;
3063 }