]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: Fix -vismv
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #include "cmdutils.h"
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "ffplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG
58 //#define DEBUG_SYNC
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static void show_help(void);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int frame_width = 0;
227 static int frame_height = 0;
228 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int debug = 0;
243 static int debug_mv = 0;
244 static int step = 0;
245 static int thread_count = 1;
246 static int workaround_bugs = 1;
247 static int fast = 0;
248 static int genpts = 0;
249 static int lowres = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
254 static int error_recognition = FF_ER_CAREFUL;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts= -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop=1;
261 static int framedrop=1;
262 static enum ShowMode show_mode = SHOW_MODE_NONE;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
283 {
284     AVPacketList *pkt1;
285
286     /* duplicate the packet */
287     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
288         return -1;
289
290     pkt1 = av_malloc(sizeof(AVPacketList));
291     if (!pkt1)
292         return -1;
293     pkt1->pkt = *pkt;
294     pkt1->next = NULL;
295
296
297     SDL_LockMutex(q->mutex);
298
299     if (!q->last_pkt)
300
301         q->first_pkt = pkt1;
302     else
303         q->last_pkt->next = pkt1;
304     q->last_pkt = pkt1;
305     q->nb_packets++;
306     q->size += pkt1->pkt.size + sizeof(*pkt1);
307     /* XXX: should duplicate packet data in DV case */
308     SDL_CondSignal(q->cond);
309
310     SDL_UnlockMutex(q->mutex);
311     return 0;
312 }
313
314 /* packet queue handling */
315 static void packet_queue_init(PacketQueue *q)
316 {
317     memset(q, 0, sizeof(PacketQueue));
318     q->mutex = SDL_CreateMutex();
319     q->cond = SDL_CreateCond();
320     packet_queue_put(q, &flush_pkt);
321 }
322
323 static void packet_queue_flush(PacketQueue *q)
324 {
325     AVPacketList *pkt, *pkt1;
326
327     SDL_LockMutex(q->mutex);
328     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
329         pkt1 = pkt->next;
330         av_free_packet(&pkt->pkt);
331         av_freep(&pkt);
332     }
333     q->last_pkt = NULL;
334     q->first_pkt = NULL;
335     q->nb_packets = 0;
336     q->size = 0;
337     SDL_UnlockMutex(q->mutex);
338 }
339
340 static void packet_queue_end(PacketQueue *q)
341 {
342     packet_queue_flush(q);
343     SDL_DestroyMutex(q->mutex);
344     SDL_DestroyCond(q->cond);
345 }
346
347 static void packet_queue_abort(PacketQueue *q)
348 {
349     SDL_LockMutex(q->mutex);
350
351     q->abort_request = 1;
352
353     SDL_CondSignal(q->cond);
354
355     SDL_UnlockMutex(q->mutex);
356 }
357
358 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
359 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
360 {
361     AVPacketList *pkt1;
362     int ret;
363
364     SDL_LockMutex(q->mutex);
365
366     for(;;) {
367         if (q->abort_request) {
368             ret = -1;
369             break;
370         }
371
372         pkt1 = q->first_pkt;
373         if (pkt1) {
374             q->first_pkt = pkt1->next;
375             if (!q->first_pkt)
376                 q->last_pkt = NULL;
377             q->nb_packets--;
378             q->size -= pkt1->pkt.size + sizeof(*pkt1);
379             *pkt = pkt1->pkt;
380             av_free(pkt1);
381             ret = 1;
382             break;
383         } else if (!block) {
384             ret = 0;
385             break;
386         } else {
387             SDL_CondWait(q->cond, q->mutex);
388         }
389     }
390     SDL_UnlockMutex(q->mutex);
391     return ret;
392 }
393
394 static inline void fill_rectangle(SDL_Surface *screen,
395                                   int x, int y, int w, int h, int color)
396 {
397     SDL_Rect rect;
398     rect.x = x;
399     rect.y = y;
400     rect.w = w;
401     rect.h = h;
402     SDL_FillRect(screen, &rect, color);
403 }
404
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
407
408 #define RGBA_IN(r, g, b, a, s)\
409 {\
410     unsigned int v = ((const uint32_t *)(s))[0];\
411     a = (v >> 24) & 0xff;\
412     r = (v >> 16) & 0xff;\
413     g = (v >> 8) & 0xff;\
414     b = v & 0xff;\
415 }
416
417 #define YUVA_IN(y, u, v, a, s, pal)\
418 {\
419     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420     a = (val >> 24) & 0xff;\
421     y = (val >> 16) & 0xff;\
422     u = (val >> 8) & 0xff;\
423     v = val & 0xff;\
424 }
425
426 #define YUVA_OUT(d, y, u, v, a)\
427 {\
428     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
429 }
430
431
432 #define BPP 1
433
434 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
435 {
436     int wrap, wrap3, width2, skip2;
437     int y, u, v, a, u1, v1, a1, w, h;
438     uint8_t *lum, *cb, *cr;
439     const uint8_t *p;
440     const uint32_t *pal;
441     int dstx, dsty, dstw, dsth;
442
443     dstw = av_clip(rect->w, 0, imgw);
444     dsth = av_clip(rect->h, 0, imgh);
445     dstx = av_clip(rect->x, 0, imgw - dstw);
446     dsty = av_clip(rect->y, 0, imgh - dsth);
447     lum = dst->data[0] + dsty * dst->linesize[0];
448     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
450
451     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
452     skip2 = dstx >> 1;
453     wrap = dst->linesize[0];
454     wrap3 = rect->pict.linesize[0];
455     p = rect->pict.data[0];
456     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
457
458     if (dsty & 1) {
459         lum += dstx;
460         cb += skip2;
461         cr += skip2;
462
463         if (dstx & 1) {
464             YUVA_IN(y, u, v, a, p, pal);
465             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468             cb++;
469             cr++;
470             lum++;
471             p += BPP;
472         }
473         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474             YUVA_IN(y, u, v, a, p, pal);
475             u1 = u;
476             v1 = v;
477             a1 = a;
478             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
479
480             YUVA_IN(y, u, v, a, p + BPP, pal);
481             u1 += u;
482             v1 += v;
483             a1 += a;
484             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487             cb++;
488             cr++;
489             p += 2 * BPP;
490             lum += 2;
491         }
492         if (w) {
493             YUVA_IN(y, u, v, a, p, pal);
494             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
497             p++;
498             lum++;
499         }
500         p += wrap3 - dstw * BPP;
501         lum += wrap - dstw - dstx;
502         cb += dst->linesize[1] - width2 - skip2;
503         cr += dst->linesize[2] - width2 - skip2;
504     }
505     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
506         lum += dstx;
507         cb += skip2;
508         cr += skip2;
509
510         if (dstx & 1) {
511             YUVA_IN(y, u, v, a, p, pal);
512             u1 = u;
513             v1 = v;
514             a1 = a;
515             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
516             p += wrap3;
517             lum += wrap;
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 += u;
520             v1 += v;
521             a1 += a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
524             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
525             cb++;
526             cr++;
527             p += -wrap3 + BPP;
528             lum += -wrap + 1;
529         }
530         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
531             YUVA_IN(y, u, v, a, p, pal);
532             u1 = u;
533             v1 = v;
534             a1 = a;
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
537             YUVA_IN(y, u, v, a, p + BPP, pal);
538             u1 += u;
539             v1 += v;
540             a1 += a;
541             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542             p += wrap3;
543             lum += wrap;
544
545             YUVA_IN(y, u, v, a, p, pal);
546             u1 += u;
547             v1 += v;
548             a1 += a;
549             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
550
551             YUVA_IN(y, u, v, a, p + BPP, pal);
552             u1 += u;
553             v1 += v;
554             a1 += a;
555             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
556
557             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
558             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
559
560             cb++;
561             cr++;
562             p += -wrap3 + 2 * BPP;
563             lum += -wrap + 2;
564         }
565         if (w) {
566             YUVA_IN(y, u, v, a, p, pal);
567             u1 = u;
568             v1 = v;
569             a1 = a;
570             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571             p += wrap3;
572             lum += wrap;
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 += u;
575             v1 += v;
576             a1 += a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
579             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
580             cb++;
581             cr++;
582             p += -wrap3 + BPP;
583             lum += -wrap + 1;
584         }
585         p += wrap3 + (wrap3 - dstw * BPP);
586         lum += wrap + (wrap - dstw - dstx);
587         cb += dst->linesize[1] - width2 - skip2;
588         cr += dst->linesize[2] - width2 - skip2;
589     }
590     /* handle odd height */
591     if (h) {
592         lum += dstx;
593         cb += skip2;
594         cr += skip2;
595
596         if (dstx & 1) {
597             YUVA_IN(y, u, v, a, p, pal);
598             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601             cb++;
602             cr++;
603             lum++;
604             p += BPP;
605         }
606         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612
613             YUVA_IN(y, u, v, a, p + BPP, pal);
614             u1 += u;
615             v1 += v;
616             a1 += a;
617             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
619             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
620             cb++;
621             cr++;
622             p += 2 * BPP;
623             lum += 2;
624         }
625         if (w) {
626             YUVA_IN(y, u, v, a, p, pal);
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
630         }
631     }
632 }
633
634 static void free_subpicture(SubPicture *sp)
635 {
636     avsubtitle_free(&sp->sub);
637 }
638
639 static void video_image_display(VideoState *is)
640 {
641     VideoPicture *vp;
642     SubPicture *sp;
643     AVPicture pict;
644     float aspect_ratio;
645     int width, height, x, y;
646     SDL_Rect rect;
647     int i;
648
649     vp = &is->pictq[is->pictq_rindex];
650     if (vp->bmp) {
651 #if CONFIG_AVFILTER
652          if (vp->picref->video->sample_aspect_ratio.num == 0)
653              aspect_ratio = 0;
654          else
655              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
656 #else
657
658         /* XXX: use variable in the frame */
659         if (is->video_st->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
661         else if (is->video_st->codec->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
663         else
664             aspect_ratio = 0;
665 #endif
666         if (aspect_ratio <= 0.0)
667             aspect_ratio = 1.0;
668         aspect_ratio *= (float)vp->width / (float)vp->height;
669
670         if (is->subtitle_st) {
671             if (is->subpq_size > 0) {
672                 sp = &is->subpq[is->subpq_rindex];
673
674                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
675                     SDL_LockYUVOverlay (vp->bmp);
676
677                     pict.data[0] = vp->bmp->pixels[0];
678                     pict.data[1] = vp->bmp->pixels[2];
679                     pict.data[2] = vp->bmp->pixels[1];
680
681                     pict.linesize[0] = vp->bmp->pitches[0];
682                     pict.linesize[1] = vp->bmp->pitches[2];
683                     pict.linesize[2] = vp->bmp->pitches[1];
684
685                     for (i = 0; i < sp->sub.num_rects; i++)
686                         blend_subrect(&pict, sp->sub.rects[i],
687                                       vp->bmp->w, vp->bmp->h);
688
689                     SDL_UnlockYUVOverlay (vp->bmp);
690                 }
691             }
692         }
693
694
695         /* XXX: we suppose the screen has a 1.0 pixel ratio */
696         height = is->height;
697         width = ((int)rint(height * aspect_ratio)) & ~1;
698         if (width > is->width) {
699             width = is->width;
700             height = ((int)rint(width / aspect_ratio)) & ~1;
701         }
702         x = (is->width - width) / 2;
703         y = (is->height - height) / 2;
704         is->no_background = 0;
705         rect.x = is->xleft + x;
706         rect.y = is->ytop  + y;
707         rect.w = FFMAX(width,  1);
708         rect.h = FFMAX(height, 1);
709         SDL_DisplayYUVOverlay(vp->bmp, &rect);
710     }
711 }
712
713 /* get the current audio output buffer size, in samples. With SDL, we
714    cannot have a precise information */
715 static int audio_write_get_buf_size(VideoState *is)
716 {
717     return is->audio_buf_size - is->audio_buf_index;
718 }
719
720 static inline int compute_mod(int a, int b)
721 {
722     return a < 0 ? a%b + b : a%b;
723 }
724
725 static void video_audio_display(VideoState *s)
726 {
727     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
728     int ch, channels, h, h2, bgcolor, fgcolor;
729     int16_t time_diff;
730     int rdft_bits, nb_freq;
731
732     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
733         ;
734     nb_freq= 1<<(rdft_bits-1);
735
736     /* compute display index : center on currently output samples */
737     channels = s->audio_st->codec->channels;
738     nb_display_channels = channels;
739     if (!s->paused) {
740         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
741         n = 2 * channels;
742         delay = audio_write_get_buf_size(s);
743         delay /= n;
744
745         /* to be more precise, we take into account the time spent since
746            the last buffer computation */
747         if (audio_callback_time) {
748             time_diff = av_gettime() - audio_callback_time;
749             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
750         }
751
752         delay += 2*data_used;
753         if (delay < data_used)
754             delay = data_used;
755
756         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
757         if (s->show_mode == SHOW_MODE_WAVES) {
758             h= INT_MIN;
759             for(i=0; i<1000; i+=channels){
760                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
761                 int a= s->sample_array[idx];
762                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
763                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
764                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
765                 int score= a-d;
766                 if(h<score && (b^c)<0){
767                     h= score;
768                     i_start= idx;
769                 }
770             }
771         }
772
773         s->last_i_start = i_start;
774     } else {
775         i_start = s->last_i_start;
776     }
777
778     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
779     if (s->show_mode == SHOW_MODE_WAVES) {
780         fill_rectangle(screen,
781                        s->xleft, s->ytop, s->width, s->height,
782                        bgcolor);
783
784         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
785
786         /* total height for one channel */
787         h = s->height / nb_display_channels;
788         /* graph height / 2 */
789         h2 = (h * 9) / 20;
790         for(ch = 0;ch < nb_display_channels; ch++) {
791             i = i_start + ch;
792             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
793             for(x = 0; x < s->width; x++) {
794                 y = (s->sample_array[i] * h2) >> 15;
795                 if (y < 0) {
796                     y = -y;
797                     ys = y1 - y;
798                 } else {
799                     ys = y1;
800                 }
801                 fill_rectangle(screen,
802                                s->xleft + x, ys, 1, y,
803                                fgcolor);
804                 i += channels;
805                 if (i >= SAMPLE_ARRAY_SIZE)
806                     i -= SAMPLE_ARRAY_SIZE;
807             }
808         }
809
810         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
811
812         for(ch = 1;ch < nb_display_channels; ch++) {
813             y = s->ytop + ch * h;
814             fill_rectangle(screen,
815                            s->xleft, y, s->width, 1,
816                            fgcolor);
817         }
818         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
819     }else{
820         nb_display_channels= FFMIN(nb_display_channels, 2);
821         if(rdft_bits != s->rdft_bits){
822             av_rdft_end(s->rdft);
823             av_free(s->rdft_data);
824             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
825             s->rdft_bits= rdft_bits;
826             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
827         }
828         {
829             FFTSample *data[2];
830             for(ch = 0;ch < nb_display_channels; ch++) {
831                 data[ch] = s->rdft_data + 2*nb_freq*ch;
832                 i = i_start + ch;
833                 for(x = 0; x < 2*nb_freq; x++) {
834                     double w= (x-nb_freq)*(1.0/nb_freq);
835                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
836                     i += channels;
837                     if (i >= SAMPLE_ARRAY_SIZE)
838                         i -= SAMPLE_ARRAY_SIZE;
839                 }
840                 av_rdft_calc(s->rdft, data[ch]);
841             }
842             //least efficient way to do this, we should of course directly access it but its more than fast enough
843             for(y=0; y<s->height; y++){
844                 double w= 1/sqrt(nb_freq);
845                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
846                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
847                        + data[1][2*y+1]*data[1][2*y+1])) : a;
848                 a= FFMIN(a,255);
849                 b= FFMIN(b,255);
850                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
851
852                 fill_rectangle(screen,
853                             s->xpos, s->height-y, 1, 1,
854                             fgcolor);
855             }
856         }
857         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
858         s->xpos++;
859         if(s->xpos >= s->width)
860             s->xpos= s->xleft;
861     }
862 }
863
864 static int video_open(VideoState *is){
865     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
866     int w,h;
867
868     if(is_full_screen) flags |= SDL_FULLSCREEN;
869     else               flags |= SDL_RESIZABLE;
870
871     if (is_full_screen && fs_screen_width) {
872         w = fs_screen_width;
873         h = fs_screen_height;
874     } else if(!is_full_screen && screen_width){
875         w = screen_width;
876         h = screen_height;
877 #if CONFIG_AVFILTER
878     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
879         w = is->out_video_filter->inputs[0]->w;
880         h = is->out_video_filter->inputs[0]->h;
881 #else
882     }else if (is->video_st && is->video_st->codec->width){
883         w = is->video_st->codec->width;
884         h = is->video_st->codec->height;
885 #endif
886     } else {
887         w = 640;
888         h = 480;
889     }
890     if(screen && is->width == screen->w && screen->w == w
891        && is->height== screen->h && screen->h == h)
892         return 0;
893
894 #ifndef __APPLE__
895     screen = SDL_SetVideoMode(w, h, 0, flags);
896 #else
897     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
898     screen = SDL_SetVideoMode(w, h, 24, flags);
899 #endif
900     if (!screen) {
901         fprintf(stderr, "SDL: could not set video mode - exiting\n");
902         return -1;
903     }
904     if (!window_title)
905         window_title = input_filename;
906     SDL_WM_SetCaption(window_title, window_title);
907
908     is->width = screen->w;
909     is->height = screen->h;
910
911     return 0;
912 }
913
914 /* display the current picture, if any */
915 static void video_display(VideoState *is)
916 {
917     if(!screen)
918         video_open(cur_stream);
919     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
920         video_audio_display(is);
921     else if (is->video_st)
922         video_image_display(is);
923 }
924
925 static int refresh_thread(void *opaque)
926 {
927     VideoState *is= opaque;
928     while(!is->abort_request){
929         SDL_Event event;
930         event.type = FF_REFRESH_EVENT;
931         event.user.data1 = opaque;
932         if(!is->refresh){
933             is->refresh=1;
934             SDL_PushEvent(&event);
935         }
936         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
937         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
938     }
939     return 0;
940 }
941
942 /* get the current audio clock value */
943 static double get_audio_clock(VideoState *is)
944 {
945     double pts;
946     int hw_buf_size, bytes_per_sec;
947     pts = is->audio_clock;
948     hw_buf_size = audio_write_get_buf_size(is);
949     bytes_per_sec = 0;
950     if (is->audio_st) {
951         bytes_per_sec = is->audio_st->codec->sample_rate *
952             2 * is->audio_st->codec->channels;
953     }
954     if (bytes_per_sec)
955         pts -= (double)hw_buf_size / bytes_per_sec;
956     return pts;
957 }
958
959 /* get the current video clock value */
960 static double get_video_clock(VideoState *is)
961 {
962     if (is->paused) {
963         return is->video_current_pts;
964     } else {
965         return is->video_current_pts_drift + av_gettime() / 1000000.0;
966     }
967 }
968
969 /* get the current external clock value */
970 static double get_external_clock(VideoState *is)
971 {
972     int64_t ti;
973     ti = av_gettime();
974     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
975 }
976
977 /* get the current master clock value */
978 static double get_master_clock(VideoState *is)
979 {
980     double val;
981
982     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
983         if (is->video_st)
984             val = get_video_clock(is);
985         else
986             val = get_audio_clock(is);
987     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
988         if (is->audio_st)
989             val = get_audio_clock(is);
990         else
991             val = get_video_clock(is);
992     } else {
993         val = get_external_clock(is);
994     }
995     return val;
996 }
997
998 /* seek in the stream */
999 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1000 {
1001     if (!is->seek_req) {
1002         is->seek_pos = pos;
1003         is->seek_rel = rel;
1004         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1005         if (seek_by_bytes)
1006             is->seek_flags |= AVSEEK_FLAG_BYTE;
1007         is->seek_req = 1;
1008     }
1009 }
1010
1011 /* pause or resume the video */
1012 static void stream_toggle_pause(VideoState *is)
1013 {
1014     if (is->paused) {
1015         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1016         if(is->read_pause_return != AVERROR(ENOSYS)){
1017             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1018         }
1019         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1020     }
1021     is->paused = !is->paused;
1022 }
1023
1024 static double compute_target_time(double frame_current_pts, VideoState *is)
1025 {
1026     double delay, sync_threshold, diff;
1027
1028     /* compute nominal delay */
1029     delay = frame_current_pts - is->frame_last_pts;
1030     if (delay <= 0 || delay >= 10.0) {
1031         /* if incorrect delay, use previous one */
1032         delay = is->frame_last_delay;
1033     } else {
1034         is->frame_last_delay = delay;
1035     }
1036     is->frame_last_pts = frame_current_pts;
1037
1038     /* update delay to follow master synchronisation source */
1039     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1040          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1041         /* if video is slave, we try to correct big delays by
1042            duplicating or deleting a frame */
1043         diff = get_video_clock(is) - get_master_clock(is);
1044
1045         /* skip or repeat frame. We take into account the
1046            delay to compute the threshold. I still don't know
1047            if it is the best guess */
1048         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1049         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1050             if (diff <= -sync_threshold)
1051                 delay = 0;
1052             else if (diff >= sync_threshold)
1053                 delay = 2 * delay;
1054         }
1055     }
1056     is->frame_timer += delay;
1057 #if defined(DEBUG_SYNC)
1058     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1059             delay, actual_delay, frame_current_pts, -diff);
1060 #endif
1061
1062     return is->frame_timer;
1063 }
1064
1065 /* called to display each frame */
1066 static void video_refresh(void *opaque)
1067 {
1068     VideoState *is = opaque;
1069     VideoPicture *vp;
1070
1071     SubPicture *sp, *sp2;
1072
1073     if (is->video_st) {
1074 retry:
1075         if (is->pictq_size == 0) {
1076             //nothing to do, no picture to display in the que
1077         } else {
1078             double time= av_gettime()/1000000.0;
1079             double next_target;
1080             /* dequeue the picture */
1081             vp = &is->pictq[is->pictq_rindex];
1082
1083             if(time < vp->target_clock)
1084                 return;
1085             /* update current video pts */
1086             is->video_current_pts = vp->pts;
1087             is->video_current_pts_drift = is->video_current_pts - time;
1088             is->video_current_pos = vp->pos;
1089             if(is->pictq_size > 1){
1090                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1091                 assert(nextvp->target_clock >= vp->target_clock);
1092                 next_target= nextvp->target_clock;
1093             }else{
1094                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1095             }
1096             if(framedrop && time > next_target){
1097                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1098                 if(is->pictq_size > 1 || time > next_target + 0.5){
1099                     /* update queue size and signal for next picture */
1100                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1101                         is->pictq_rindex = 0;
1102
1103                     SDL_LockMutex(is->pictq_mutex);
1104                     is->pictq_size--;
1105                     SDL_CondSignal(is->pictq_cond);
1106                     SDL_UnlockMutex(is->pictq_mutex);
1107                     goto retry;
1108                 }
1109             }
1110
1111             if(is->subtitle_st) {
1112                 if (is->subtitle_stream_changed) {
1113                     SDL_LockMutex(is->subpq_mutex);
1114
1115                     while (is->subpq_size) {
1116                         free_subpicture(&is->subpq[is->subpq_rindex]);
1117
1118                         /* update queue size and signal for next picture */
1119                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1120                             is->subpq_rindex = 0;
1121
1122                         is->subpq_size--;
1123                     }
1124                     is->subtitle_stream_changed = 0;
1125
1126                     SDL_CondSignal(is->subpq_cond);
1127                     SDL_UnlockMutex(is->subpq_mutex);
1128                 } else {
1129                     if (is->subpq_size > 0) {
1130                         sp = &is->subpq[is->subpq_rindex];
1131
1132                         if (is->subpq_size > 1)
1133                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1134                         else
1135                             sp2 = NULL;
1136
1137                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1138                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1139                         {
1140                             free_subpicture(sp);
1141
1142                             /* update queue size and signal for next picture */
1143                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1144                                 is->subpq_rindex = 0;
1145
1146                             SDL_LockMutex(is->subpq_mutex);
1147                             is->subpq_size--;
1148                             SDL_CondSignal(is->subpq_cond);
1149                             SDL_UnlockMutex(is->subpq_mutex);
1150                         }
1151                     }
1152                 }
1153             }
1154
1155             /* display picture */
1156             if (!display_disable)
1157                 video_display(is);
1158
1159             /* update queue size and signal for next picture */
1160             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1161                 is->pictq_rindex = 0;
1162
1163             SDL_LockMutex(is->pictq_mutex);
1164             is->pictq_size--;
1165             SDL_CondSignal(is->pictq_cond);
1166             SDL_UnlockMutex(is->pictq_mutex);
1167         }
1168     } else if (is->audio_st) {
1169         /* draw the next audio frame */
1170
1171         /* if only audio stream, then display the audio bars (better
1172            than nothing, just to test the implementation */
1173
1174         /* display picture */
1175         if (!display_disable)
1176             video_display(is);
1177     }
1178     if (show_status) {
1179         static int64_t last_time;
1180         int64_t cur_time;
1181         int aqsize, vqsize, sqsize;
1182         double av_diff;
1183
1184         cur_time = av_gettime();
1185         if (!last_time || (cur_time - last_time) >= 30000) {
1186             aqsize = 0;
1187             vqsize = 0;
1188             sqsize = 0;
1189             if (is->audio_st)
1190                 aqsize = is->audioq.size;
1191             if (is->video_st)
1192                 vqsize = is->videoq.size;
1193             if (is->subtitle_st)
1194                 sqsize = is->subtitleq.size;
1195             av_diff = 0;
1196             if (is->audio_st && is->video_st)
1197                 av_diff = get_audio_clock(is) - get_video_clock(is);
1198             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1199                    get_master_clock(is),
1200                    av_diff,
1201                    FFMAX(is->skip_frames-1, 0),
1202                    aqsize / 1024,
1203                    vqsize / 1024,
1204                    sqsize,
1205                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1206                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1207             fflush(stdout);
1208             last_time = cur_time;
1209         }
1210     }
1211 }
1212
1213 static void stream_close(VideoState *is)
1214 {
1215     VideoPicture *vp;
1216     int i;
1217     /* XXX: use a special url_shutdown call to abort parse cleanly */
1218     is->abort_request = 1;
1219     SDL_WaitThread(is->read_tid, NULL);
1220     SDL_WaitThread(is->refresh_tid, NULL);
1221
1222     /* free all pictures */
1223     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1224         vp = &is->pictq[i];
1225 #if CONFIG_AVFILTER
1226         if (vp->picref) {
1227             avfilter_unref_buffer(vp->picref);
1228             vp->picref = NULL;
1229         }
1230 #endif
1231         if (vp->bmp) {
1232             SDL_FreeYUVOverlay(vp->bmp);
1233             vp->bmp = NULL;
1234         }
1235     }
1236     SDL_DestroyMutex(is->pictq_mutex);
1237     SDL_DestroyCond(is->pictq_cond);
1238     SDL_DestroyMutex(is->subpq_mutex);
1239     SDL_DestroyCond(is->subpq_cond);
1240 #if !CONFIG_AVFILTER
1241     if (is->img_convert_ctx)
1242         sws_freeContext(is->img_convert_ctx);
1243 #endif
1244     av_free(is);
1245 }
1246
1247 static void do_exit(void)
1248 {
1249     if (cur_stream) {
1250         stream_close(cur_stream);
1251         cur_stream = NULL;
1252     }
1253     uninit_opts();
1254 #if CONFIG_AVFILTER
1255     avfilter_uninit();
1256 #endif
1257     if (show_status)
1258         printf("\n");
1259     SDL_Quit();
1260     av_log(NULL, AV_LOG_QUIET, "");
1261     exit(0);
1262 }
1263
1264 /* allocate a picture (needs to do that in main thread to avoid
1265    potential locking problems */
1266 static void alloc_picture(void *opaque)
1267 {
1268     VideoState *is = opaque;
1269     VideoPicture *vp;
1270
1271     vp = &is->pictq[is->pictq_windex];
1272
1273     if (vp->bmp)
1274         SDL_FreeYUVOverlay(vp->bmp);
1275
1276 #if CONFIG_AVFILTER
1277     if (vp->picref)
1278         avfilter_unref_buffer(vp->picref);
1279     vp->picref = NULL;
1280
1281     vp->width   = is->out_video_filter->inputs[0]->w;
1282     vp->height  = is->out_video_filter->inputs[0]->h;
1283     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1284 #else
1285     vp->width   = is->video_st->codec->width;
1286     vp->height  = is->video_st->codec->height;
1287     vp->pix_fmt = is->video_st->codec->pix_fmt;
1288 #endif
1289
1290     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1291                                    SDL_YV12_OVERLAY,
1292                                    screen);
1293     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1294         /* SDL allocates a buffer smaller than requested if the video
1295          * overlay hardware is unable to support the requested size. */
1296         fprintf(stderr, "Error: the video system does not support an image\n"
1297                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1298                         "to reduce the image size.\n", vp->width, vp->height );
1299         do_exit();
1300     }
1301
1302     SDL_LockMutex(is->pictq_mutex);
1303     vp->allocated = 1;
1304     SDL_CondSignal(is->pictq_cond);
1305     SDL_UnlockMutex(is->pictq_mutex);
1306 }
1307
1308 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1309 {
1310     VideoPicture *vp;
1311     double frame_delay, pts = pts1;
1312
1313     /* compute the exact PTS for the picture if it is omitted in the stream
1314      * pts1 is the dts of the pkt / pts of the frame */
1315     if (pts != 0) {
1316         /* update video clock with pts, if present */
1317         is->video_clock = pts;
1318     } else {
1319         pts = is->video_clock;
1320     }
1321     /* update video clock for next frame */
1322     frame_delay = av_q2d(is->video_st->codec->time_base);
1323     /* for MPEG2, the frame can be repeated, so we update the
1324        clock accordingly */
1325     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1326     is->video_clock += frame_delay;
1327
1328 #if defined(DEBUG_SYNC) && 0
1329     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1330            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1331 #endif
1332
1333     /* wait until we have space to put a new picture */
1334     SDL_LockMutex(is->pictq_mutex);
1335
1336     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1337         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1338
1339     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1340            !is->videoq.abort_request) {
1341         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1342     }
1343     SDL_UnlockMutex(is->pictq_mutex);
1344
1345     if (is->videoq.abort_request)
1346         return -1;
1347
1348     vp = &is->pictq[is->pictq_windex];
1349
1350     /* alloc or resize hardware picture buffer */
1351     if (!vp->bmp ||
1352 #if CONFIG_AVFILTER
1353         vp->width  != is->out_video_filter->inputs[0]->w ||
1354         vp->height != is->out_video_filter->inputs[0]->h) {
1355 #else
1356         vp->width != is->video_st->codec->width ||
1357         vp->height != is->video_st->codec->height) {
1358 #endif
1359         SDL_Event event;
1360
1361         vp->allocated = 0;
1362
1363         /* the allocation must be done in the main thread to avoid
1364            locking problems */
1365         event.type = FF_ALLOC_EVENT;
1366         event.user.data1 = is;
1367         SDL_PushEvent(&event);
1368
1369         /* wait until the picture is allocated */
1370         SDL_LockMutex(is->pictq_mutex);
1371         while (!vp->allocated && !is->videoq.abort_request) {
1372             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1373         }
1374         SDL_UnlockMutex(is->pictq_mutex);
1375
1376         if (is->videoq.abort_request)
1377             return -1;
1378     }
1379
1380     /* if the frame is not skipped, then display it */
1381     if (vp->bmp) {
1382         AVPicture pict;
1383 #if CONFIG_AVFILTER
1384         if(vp->picref)
1385             avfilter_unref_buffer(vp->picref);
1386         vp->picref = src_frame->opaque;
1387 #endif
1388
1389         /* get a pointer on the bitmap */
1390         SDL_LockYUVOverlay (vp->bmp);
1391
1392         memset(&pict,0,sizeof(AVPicture));
1393         pict.data[0] = vp->bmp->pixels[0];
1394         pict.data[1] = vp->bmp->pixels[2];
1395         pict.data[2] = vp->bmp->pixels[1];
1396
1397         pict.linesize[0] = vp->bmp->pitches[0];
1398         pict.linesize[1] = vp->bmp->pitches[2];
1399         pict.linesize[2] = vp->bmp->pitches[1];
1400
1401 #if CONFIG_AVFILTER
1402         //FIXME use direct rendering
1403         av_picture_copy(&pict, (AVPicture *)src_frame,
1404                         vp->pix_fmt, vp->width, vp->height);
1405 #else
1406         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1407         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1408             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1409             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1410         if (is->img_convert_ctx == NULL) {
1411             fprintf(stderr, "Cannot initialize the conversion context\n");
1412             exit(1);
1413         }
1414         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1415                   0, vp->height, pict.data, pict.linesize);
1416 #endif
1417         /* update the bitmap content */
1418         SDL_UnlockYUVOverlay(vp->bmp);
1419
1420         vp->pts = pts;
1421         vp->pos = pos;
1422
1423         /* now we can update the picture count */
1424         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1425             is->pictq_windex = 0;
1426         SDL_LockMutex(is->pictq_mutex);
1427         vp->target_clock= compute_target_time(vp->pts, is);
1428
1429         is->pictq_size++;
1430         SDL_UnlockMutex(is->pictq_mutex);
1431     }
1432     return 0;
1433 }
1434
1435 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1436 {
1437     int len1 av_unused, got_picture, i;
1438
1439     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1440         return -1;
1441
1442     if (pkt->data == flush_pkt.data) {
1443         avcodec_flush_buffers(is->video_st->codec);
1444
1445         SDL_LockMutex(is->pictq_mutex);
1446         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1447         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1448             is->pictq[i].target_clock= 0;
1449         }
1450         while (is->pictq_size && !is->videoq.abort_request) {
1451             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1452         }
1453         is->video_current_pos = -1;
1454         SDL_UnlockMutex(is->pictq_mutex);
1455
1456         is->frame_last_pts = AV_NOPTS_VALUE;
1457         is->frame_last_delay = 0;
1458         is->frame_timer = (double)av_gettime() / 1000000.0;
1459         is->skip_frames = 1;
1460         is->skip_frames_index = 0;
1461         return 0;
1462     }
1463
1464     len1 = avcodec_decode_video2(is->video_st->codec,
1465                                  frame, &got_picture,
1466                                  pkt);
1467
1468     if (got_picture) {
1469         if (decoder_reorder_pts == -1) {
1470             *pts = frame->best_effort_timestamp;
1471         } else if (decoder_reorder_pts) {
1472             *pts = frame->pkt_pts;
1473         } else {
1474             *pts = frame->pkt_dts;
1475         }
1476
1477         if (*pts == AV_NOPTS_VALUE) {
1478             *pts = 0;
1479         }
1480
1481         is->skip_frames_index += 1;
1482         if(is->skip_frames_index >= is->skip_frames){
1483             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1484             return 1;
1485         }
1486
1487     }
1488     return 0;
1489 }
1490
1491 #if CONFIG_AVFILTER
1492 typedef struct {
1493     VideoState *is;
1494     AVFrame *frame;
1495     int use_dr1;
1496 } FilterPriv;
1497
1498 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1499 {
1500     AVFilterContext *ctx = codec->opaque;
1501     AVFilterBufferRef  *ref;
1502     int perms = AV_PERM_WRITE;
1503     int i, w, h, stride[4];
1504     unsigned edge;
1505     int pixel_size;
1506
1507     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1508
1509     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1510         perms |= AV_PERM_NEG_LINESIZES;
1511
1512     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1513         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1514         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1515         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1516     }
1517     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1518
1519     w = codec->width;
1520     h = codec->height;
1521
1522     if(av_image_check_size(w, h, 0, codec))
1523         return -1;
1524
1525     avcodec_align_dimensions2(codec, &w, &h, stride);
1526     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1527     w += edge << 1;
1528     h += edge << 1;
1529
1530     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1531         return -1;
1532
1533     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1534     ref->video->w = codec->width;
1535     ref->video->h = codec->height;
1536     for(i = 0; i < 4; i ++) {
1537         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1538         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1539
1540         if (ref->data[i]) {
1541             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1542         }
1543         pic->data[i]     = ref->data[i];
1544         pic->linesize[i] = ref->linesize[i];
1545     }
1546     pic->opaque = ref;
1547     pic->age    = INT_MAX;
1548     pic->type   = FF_BUFFER_TYPE_USER;
1549     pic->reordered_opaque = codec->reordered_opaque;
1550     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1551     else           pic->pkt_pts = AV_NOPTS_VALUE;
1552     return 0;
1553 }
1554
1555 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1556 {
1557     memset(pic->data, 0, sizeof(pic->data));
1558     avfilter_unref_buffer(pic->opaque);
1559 }
1560
1561 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1562 {
1563     AVFilterBufferRef *ref = pic->opaque;
1564
1565     if (pic->data[0] == NULL) {
1566         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1567         return codec->get_buffer(codec, pic);
1568     }
1569
1570     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1571         (codec->pix_fmt != ref->format)) {
1572         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1573         return -1;
1574     }
1575
1576     pic->reordered_opaque = codec->reordered_opaque;
1577     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1578     else           pic->pkt_pts = AV_NOPTS_VALUE;
1579     return 0;
1580 }
1581
1582 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1583 {
1584     FilterPriv *priv = ctx->priv;
1585     AVCodecContext *codec;
1586     if(!opaque) return -1;
1587
1588     priv->is = opaque;
1589     codec    = priv->is->video_st->codec;
1590     codec->opaque = ctx;
1591     if((codec->codec->capabilities & CODEC_CAP_DR1)
1592     ) {
1593         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1594         priv->use_dr1 = 1;
1595         codec->get_buffer     = input_get_buffer;
1596         codec->release_buffer = input_release_buffer;
1597         codec->reget_buffer   = input_reget_buffer;
1598         codec->thread_safe_callbacks = 1;
1599     }
1600
1601     priv->frame = avcodec_alloc_frame();
1602
1603     return 0;
1604 }
1605
1606 static void input_uninit(AVFilterContext *ctx)
1607 {
1608     FilterPriv *priv = ctx->priv;
1609     av_free(priv->frame);
1610 }
1611
1612 static int input_request_frame(AVFilterLink *link)
1613 {
1614     FilterPriv *priv = link->src->priv;
1615     AVFilterBufferRef *picref;
1616     int64_t pts = 0;
1617     AVPacket pkt;
1618     int ret;
1619
1620     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1621         av_free_packet(&pkt);
1622     if (ret < 0)
1623         return -1;
1624
1625     if(priv->use_dr1 && priv->frame->opaque) {
1626         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1627     } else {
1628         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1629         av_image_copy(picref->data, picref->linesize,
1630                       priv->frame->data, priv->frame->linesize,
1631                       picref->format, link->w, link->h);
1632     }
1633     av_free_packet(&pkt);
1634
1635     avfilter_copy_frame_props(picref, priv->frame);
1636     picref->pts = pts;
1637
1638     avfilter_start_frame(link, picref);
1639     avfilter_draw_slice(link, 0, link->h, 1);
1640     avfilter_end_frame(link);
1641
1642     return 0;
1643 }
1644
1645 static int input_query_formats(AVFilterContext *ctx)
1646 {
1647     FilterPriv *priv = ctx->priv;
1648     enum PixelFormat pix_fmts[] = {
1649         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1650     };
1651
1652     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1653     return 0;
1654 }
1655
1656 static int input_config_props(AVFilterLink *link)
1657 {
1658     FilterPriv *priv  = link->src->priv;
1659     AVCodecContext *c = priv->is->video_st->codec;
1660
1661     link->w = c->width;
1662     link->h = c->height;
1663     link->time_base = priv->is->video_st->time_base;
1664
1665     return 0;
1666 }
1667
1668 static AVFilter input_filter =
1669 {
1670     .name      = "ffplay_input",
1671
1672     .priv_size = sizeof(FilterPriv),
1673
1674     .init      = input_init,
1675     .uninit    = input_uninit,
1676
1677     .query_formats = input_query_formats,
1678
1679     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1680     .outputs   = (AVFilterPad[]) {{ .name = "default",
1681                                     .type = AVMEDIA_TYPE_VIDEO,
1682                                     .request_frame = input_request_frame,
1683                                     .config_props  = input_config_props, },
1684                                   { .name = NULL }},
1685 };
1686
1687 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1688 {
1689     char sws_flags_str[128];
1690     int ret;
1691     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1692     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1693     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1694     graph->scale_sws_opts = av_strdup(sws_flags_str);
1695
1696     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1697                                             NULL, is, graph)) < 0)
1698         goto the_end;
1699     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1700                                             NULL, &ffsink_ctx, graph)) < 0)
1701         goto the_end;
1702
1703     if(vfilters) {
1704         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1705         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1706
1707         outputs->name    = av_strdup("in");
1708         outputs->filter_ctx = filt_src;
1709         outputs->pad_idx = 0;
1710         outputs->next    = NULL;
1711
1712         inputs->name    = av_strdup("out");
1713         inputs->filter_ctx = filt_out;
1714         inputs->pad_idx = 0;
1715         inputs->next    = NULL;
1716
1717         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1718             goto the_end;
1719         av_freep(&vfilters);
1720     } else {
1721         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1722             goto the_end;
1723     }
1724
1725     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1726         goto the_end;
1727
1728     is->out_video_filter = filt_out;
1729 the_end:
1730     return ret;
1731 }
1732
1733 #endif  /* CONFIG_AVFILTER */
1734
1735 static int video_thread(void *arg)
1736 {
1737     VideoState *is = arg;
1738     AVFrame *frame= avcodec_alloc_frame();
1739     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1740     double pts;
1741     int ret;
1742
1743 #if CONFIG_AVFILTER
1744     AVFilterGraph *graph = avfilter_graph_alloc();
1745     AVFilterContext *filt_out = NULL;
1746
1747     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1748         goto the_end;
1749     filt_out = is->out_video_filter;
1750 #endif
1751
1752     for(;;) {
1753 #if !CONFIG_AVFILTER
1754         AVPacket pkt;
1755 #else
1756         AVFilterBufferRef *picref;
1757         AVRational tb;
1758 #endif
1759         while (is->paused && !is->videoq.abort_request)
1760             SDL_Delay(10);
1761 #if CONFIG_AVFILTER
1762         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1763         if (picref) {
1764             pts_int = picref->pts;
1765             pos     = picref->pos;
1766             frame->opaque = picref;
1767         }
1768
1769         if (av_cmp_q(tb, is->video_st->time_base)) {
1770             av_unused int64_t pts1 = pts_int;
1771             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1772             av_dlog(NULL, "video_thread(): "
1773                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1774                     tb.num, tb.den, pts1,
1775                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1776         }
1777 #else
1778         ret = get_video_frame(is, frame, &pts_int, &pkt);
1779         pos = pkt.pos;
1780         av_free_packet(&pkt);
1781 #endif
1782
1783         if (ret < 0) goto the_end;
1784
1785         if (!ret)
1786             continue;
1787
1788         pts = pts_int*av_q2d(is->video_st->time_base);
1789
1790         ret = queue_picture(is, frame, pts, pos);
1791
1792         if (ret < 0)
1793             goto the_end;
1794
1795         if (step)
1796             if (cur_stream)
1797                 stream_toggle_pause(cur_stream);
1798     }
1799  the_end:
1800 #if CONFIG_AVFILTER
1801     avfilter_graph_free(&graph);
1802 #endif
1803     av_free(frame);
1804     return 0;
1805 }
1806
1807 static int subtitle_thread(void *arg)
1808 {
1809     VideoState *is = arg;
1810     SubPicture *sp;
1811     AVPacket pkt1, *pkt = &pkt1;
1812     int len1 av_unused, got_subtitle;
1813     double pts;
1814     int i, j;
1815     int r, g, b, y, u, v, a;
1816
1817     for(;;) {
1818         while (is->paused && !is->subtitleq.abort_request) {
1819             SDL_Delay(10);
1820         }
1821         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1822             break;
1823
1824         if(pkt->data == flush_pkt.data){
1825             avcodec_flush_buffers(is->subtitle_st->codec);
1826             continue;
1827         }
1828         SDL_LockMutex(is->subpq_mutex);
1829         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1830                !is->subtitleq.abort_request) {
1831             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1832         }
1833         SDL_UnlockMutex(is->subpq_mutex);
1834
1835         if (is->subtitleq.abort_request)
1836             goto the_end;
1837
1838         sp = &is->subpq[is->subpq_windex];
1839
1840        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1841            this packet, if any */
1842         pts = 0;
1843         if (pkt->pts != AV_NOPTS_VALUE)
1844             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1845
1846         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1847                                     &sp->sub, &got_subtitle,
1848                                     pkt);
1849         if (got_subtitle && sp->sub.format == 0) {
1850             sp->pts = pts;
1851
1852             for (i = 0; i < sp->sub.num_rects; i++)
1853             {
1854                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1855                 {
1856                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1857                     y = RGB_TO_Y_CCIR(r, g, b);
1858                     u = RGB_TO_U_CCIR(r, g, b, 0);
1859                     v = RGB_TO_V_CCIR(r, g, b, 0);
1860                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1861                 }
1862             }
1863
1864             /* now we can update the picture count */
1865             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1866                 is->subpq_windex = 0;
1867             SDL_LockMutex(is->subpq_mutex);
1868             is->subpq_size++;
1869             SDL_UnlockMutex(is->subpq_mutex);
1870         }
1871         av_free_packet(pkt);
1872     }
1873  the_end:
1874     return 0;
1875 }
1876
1877 /* copy samples for viewing in editor window */
1878 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1879 {
1880     int size, len;
1881
1882     size = samples_size / sizeof(short);
1883     while (size > 0) {
1884         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1885         if (len > size)
1886             len = size;
1887         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1888         samples += len;
1889         is->sample_array_index += len;
1890         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1891             is->sample_array_index = 0;
1892         size -= len;
1893     }
1894 }
1895
1896 /* return the new audio buffer size (samples can be added or deleted
1897    to get better sync if video or external master clock) */
1898 static int synchronize_audio(VideoState *is, short *samples,
1899                              int samples_size1, double pts)
1900 {
1901     int n, samples_size;
1902     double ref_clock;
1903
1904     n = 2 * is->audio_st->codec->channels;
1905     samples_size = samples_size1;
1906
1907     /* if not master, then we try to remove or add samples to correct the clock */
1908     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1909          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1910         double diff, avg_diff;
1911         int wanted_size, min_size, max_size, nb_samples;
1912
1913         ref_clock = get_master_clock(is);
1914         diff = get_audio_clock(is) - ref_clock;
1915
1916         if (diff < AV_NOSYNC_THRESHOLD) {
1917             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1918             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1919                 /* not enough measures to have a correct estimate */
1920                 is->audio_diff_avg_count++;
1921             } else {
1922                 /* estimate the A-V difference */
1923                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1924
1925                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1926                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1927                     nb_samples = samples_size / n;
1928
1929                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1930                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1931                     if (wanted_size < min_size)
1932                         wanted_size = min_size;
1933                     else if (wanted_size > max_size)
1934                         wanted_size = max_size;
1935
1936                     /* add or remove samples to correction the synchro */
1937                     if (wanted_size < samples_size) {
1938                         /* remove samples */
1939                         samples_size = wanted_size;
1940                     } else if (wanted_size > samples_size) {
1941                         uint8_t *samples_end, *q;
1942                         int nb;
1943
1944                         /* add samples */
1945                         nb = (samples_size - wanted_size);
1946                         samples_end = (uint8_t *)samples + samples_size - n;
1947                         q = samples_end + n;
1948                         while (nb > 0) {
1949                             memcpy(q, samples_end, n);
1950                             q += n;
1951                             nb -= n;
1952                         }
1953                         samples_size = wanted_size;
1954                     }
1955                 }
1956 #if 0
1957                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1958                        diff, avg_diff, samples_size - samples_size1,
1959                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1960 #endif
1961             }
1962         } else {
1963             /* too big difference : may be initial PTS errors, so
1964                reset A-V filter */
1965             is->audio_diff_avg_count = 0;
1966             is->audio_diff_cum = 0;
1967         }
1968     }
1969
1970     return samples_size;
1971 }
1972
1973 /* decode one audio frame and returns its uncompressed size */
1974 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1975 {
1976     AVPacket *pkt_temp = &is->audio_pkt_temp;
1977     AVPacket *pkt = &is->audio_pkt;
1978     AVCodecContext *dec= is->audio_st->codec;
1979     int n, len1, data_size;
1980     double pts;
1981
1982     for(;;) {
1983         /* NOTE: the audio packet can contain several frames */
1984         while (pkt_temp->size > 0) {
1985             data_size = sizeof(is->audio_buf1);
1986             len1 = avcodec_decode_audio3(dec,
1987                                         (int16_t *)is->audio_buf1, &data_size,
1988                                         pkt_temp);
1989             if (len1 < 0) {
1990                 /* if error, we skip the frame */
1991                 pkt_temp->size = 0;
1992                 break;
1993             }
1994
1995             pkt_temp->data += len1;
1996             pkt_temp->size -= len1;
1997             if (data_size <= 0)
1998                 continue;
1999
2000             if (dec->sample_fmt != is->audio_src_fmt) {
2001                 if (is->reformat_ctx)
2002                     av_audio_convert_free(is->reformat_ctx);
2003                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2004                                                          dec->sample_fmt, 1, NULL, 0);
2005                 if (!is->reformat_ctx) {
2006                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2007                         av_get_sample_fmt_name(dec->sample_fmt),
2008                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2009                         break;
2010                 }
2011                 is->audio_src_fmt= dec->sample_fmt;
2012             }
2013
2014             if (is->reformat_ctx) {
2015                 const void *ibuf[6]= {is->audio_buf1};
2016                 void *obuf[6]= {is->audio_buf2};
2017                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2018                 int ostride[6]= {2};
2019                 int len= data_size/istride[0];
2020                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2021                     printf("av_audio_convert() failed\n");
2022                     break;
2023                 }
2024                 is->audio_buf= is->audio_buf2;
2025                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2026                           remove this legacy cruft */
2027                 data_size= len*2;
2028             }else{
2029                 is->audio_buf= is->audio_buf1;
2030             }
2031
2032             /* if no pts, then compute it */
2033             pts = is->audio_clock;
2034             *pts_ptr = pts;
2035             n = 2 * dec->channels;
2036             is->audio_clock += (double)data_size /
2037                 (double)(n * dec->sample_rate);
2038 #if defined(DEBUG_SYNC)
2039             {
2040                 static double last_clock;
2041                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2042                        is->audio_clock - last_clock,
2043                        is->audio_clock, pts);
2044                 last_clock = is->audio_clock;
2045             }
2046 #endif
2047             return data_size;
2048         }
2049
2050         /* free the current packet */
2051         if (pkt->data)
2052             av_free_packet(pkt);
2053
2054         if (is->paused || is->audioq.abort_request) {
2055             return -1;
2056         }
2057
2058         /* read next packet */
2059         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2060             return -1;
2061         if(pkt->data == flush_pkt.data){
2062             avcodec_flush_buffers(dec);
2063             continue;
2064         }
2065
2066         pkt_temp->data = pkt->data;
2067         pkt_temp->size = pkt->size;
2068
2069         /* if update the audio clock with the pts */
2070         if (pkt->pts != AV_NOPTS_VALUE) {
2071             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2072         }
2073     }
2074 }
2075
2076 /* prepare a new audio buffer */
2077 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2078 {
2079     VideoState *is = opaque;
2080     int audio_size, len1;
2081     double pts;
2082
2083     audio_callback_time = av_gettime();
2084
2085     while (len > 0) {
2086         if (is->audio_buf_index >= is->audio_buf_size) {
2087            audio_size = audio_decode_frame(is, &pts);
2088            if (audio_size < 0) {
2089                 /* if error, just output silence */
2090                is->audio_buf = is->audio_buf1;
2091                is->audio_buf_size = 1024;
2092                memset(is->audio_buf, 0, is->audio_buf_size);
2093            } else {
2094                if (is->show_mode != SHOW_MODE_VIDEO)
2095                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2096                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2097                                               pts);
2098                is->audio_buf_size = audio_size;
2099            }
2100            is->audio_buf_index = 0;
2101         }
2102         len1 = is->audio_buf_size - is->audio_buf_index;
2103         if (len1 > len)
2104             len1 = len;
2105         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2106         len -= len1;
2107         stream += len1;
2108         is->audio_buf_index += len1;
2109     }
2110 }
2111
2112 /* open a given stream. Return 0 if OK */
2113 static int stream_component_open(VideoState *is, int stream_index)
2114 {
2115     AVFormatContext *ic = is->ic;
2116     AVCodecContext *avctx;
2117     AVCodec *codec;
2118     SDL_AudioSpec wanted_spec, spec;
2119
2120     if (stream_index < 0 || stream_index >= ic->nb_streams)
2121         return -1;
2122     avctx = ic->streams[stream_index]->codec;
2123
2124     /* prepare audio output */
2125     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2126         if (avctx->channels > 0) {
2127             avctx->request_channels = FFMIN(2, avctx->channels);
2128         } else {
2129             avctx->request_channels = 2;
2130         }
2131     }
2132
2133     codec = avcodec_find_decoder(avctx->codec_id);
2134     if (!codec)
2135         return -1;
2136
2137     avctx->debug_mv = debug_mv;
2138     avctx->debug = debug;
2139     avctx->workaround_bugs = workaround_bugs;
2140     avctx->lowres = lowres;
2141     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2142     avctx->idct_algo= idct;
2143     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2144     avctx->skip_frame= skip_frame;
2145     avctx->skip_idct= skip_idct;
2146     avctx->skip_loop_filter= skip_loop_filter;
2147     avctx->error_recognition= error_recognition;
2148     avctx->error_concealment= error_concealment;
2149     avctx->thread_count= thread_count;
2150
2151     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2152
2153     if(codec->capabilities & CODEC_CAP_DR1)
2154         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2155
2156     if (avcodec_open(avctx, codec) < 0)
2157         return -1;
2158
2159     /* prepare audio output */
2160     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2161         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2162             fprintf(stderr, "Invalid sample rate or channel count\n");
2163             return -1;
2164         }
2165         wanted_spec.freq = avctx->sample_rate;
2166         wanted_spec.format = AUDIO_S16SYS;
2167         wanted_spec.channels = avctx->channels;
2168         wanted_spec.silence = 0;
2169         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2170         wanted_spec.callback = sdl_audio_callback;
2171         wanted_spec.userdata = is;
2172         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2173             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2174             return -1;
2175         }
2176         is->audio_hw_buf_size = spec.size;
2177         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2178     }
2179
2180     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2181     switch(avctx->codec_type) {
2182     case AVMEDIA_TYPE_AUDIO:
2183         is->audio_stream = stream_index;
2184         is->audio_st = ic->streams[stream_index];
2185         is->audio_buf_size = 0;
2186         is->audio_buf_index = 0;
2187
2188         /* init averaging filter */
2189         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2190         is->audio_diff_avg_count = 0;
2191         /* since we do not have a precise anough audio fifo fullness,
2192            we correct audio sync only if larger than this threshold */
2193         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2194
2195         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2196         packet_queue_init(&is->audioq);
2197         SDL_PauseAudio(0);
2198         break;
2199     case AVMEDIA_TYPE_VIDEO:
2200         is->video_stream = stream_index;
2201         is->video_st = ic->streams[stream_index];
2202
2203         packet_queue_init(&is->videoq);
2204         is->video_tid = SDL_CreateThread(video_thread, is);
2205         break;
2206     case AVMEDIA_TYPE_SUBTITLE:
2207         is->subtitle_stream = stream_index;
2208         is->subtitle_st = ic->streams[stream_index];
2209         packet_queue_init(&is->subtitleq);
2210
2211         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2212         break;
2213     default:
2214         break;
2215     }
2216     return 0;
2217 }
2218
2219 static void stream_component_close(VideoState *is, int stream_index)
2220 {
2221     AVFormatContext *ic = is->ic;
2222     AVCodecContext *avctx;
2223
2224     if (stream_index < 0 || stream_index >= ic->nb_streams)
2225         return;
2226     avctx = ic->streams[stream_index]->codec;
2227
2228     switch(avctx->codec_type) {
2229     case AVMEDIA_TYPE_AUDIO:
2230         packet_queue_abort(&is->audioq);
2231
2232         SDL_CloseAudio();
2233
2234         packet_queue_end(&is->audioq);
2235         if (is->reformat_ctx)
2236             av_audio_convert_free(is->reformat_ctx);
2237         is->reformat_ctx = NULL;
2238         break;
2239     case AVMEDIA_TYPE_VIDEO:
2240         packet_queue_abort(&is->videoq);
2241
2242         /* note: we also signal this mutex to make sure we deblock the
2243            video thread in all cases */
2244         SDL_LockMutex(is->pictq_mutex);
2245         SDL_CondSignal(is->pictq_cond);
2246         SDL_UnlockMutex(is->pictq_mutex);
2247
2248         SDL_WaitThread(is->video_tid, NULL);
2249
2250         packet_queue_end(&is->videoq);
2251         break;
2252     case AVMEDIA_TYPE_SUBTITLE:
2253         packet_queue_abort(&is->subtitleq);
2254
2255         /* note: we also signal this mutex to make sure we deblock the
2256            video thread in all cases */
2257         SDL_LockMutex(is->subpq_mutex);
2258         is->subtitle_stream_changed = 1;
2259
2260         SDL_CondSignal(is->subpq_cond);
2261         SDL_UnlockMutex(is->subpq_mutex);
2262
2263         SDL_WaitThread(is->subtitle_tid, NULL);
2264
2265         packet_queue_end(&is->subtitleq);
2266         break;
2267     default:
2268         break;
2269     }
2270
2271     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2272     avcodec_close(avctx);
2273     switch(avctx->codec_type) {
2274     case AVMEDIA_TYPE_AUDIO:
2275         is->audio_st = NULL;
2276         is->audio_stream = -1;
2277         break;
2278     case AVMEDIA_TYPE_VIDEO:
2279         is->video_st = NULL;
2280         is->video_stream = -1;
2281         break;
2282     case AVMEDIA_TYPE_SUBTITLE:
2283         is->subtitle_st = NULL;
2284         is->subtitle_stream = -1;
2285         break;
2286     default:
2287         break;
2288     }
2289 }
2290
2291 /* since we have only one decoding thread, we can use a global
2292    variable instead of a thread local variable */
2293 static VideoState *global_video_state;
2294
2295 static int decode_interrupt_cb(void)
2296 {
2297     return (global_video_state && global_video_state->abort_request);
2298 }
2299
2300 /* this thread gets the stream from the disk or the network */
2301 static int read_thread(void *arg)
2302 {
2303     VideoState *is = arg;
2304     AVFormatContext *ic;
2305     int err, i, ret;
2306     int st_index[AVMEDIA_TYPE_NB];
2307     AVPacket pkt1, *pkt = &pkt1;
2308     AVFormatParameters params, *ap = &params;
2309     int eof=0;
2310     int pkt_in_play_range = 0;
2311
2312     ic = avformat_alloc_context();
2313
2314     memset(st_index, -1, sizeof(st_index));
2315     is->video_stream = -1;
2316     is->audio_stream = -1;
2317     is->subtitle_stream = -1;
2318
2319     global_video_state = is;
2320     avio_set_interrupt_cb(decode_interrupt_cb);
2321
2322     memset(ap, 0, sizeof(*ap));
2323
2324     ap->prealloced_context = 1;
2325     ap->width = frame_width;
2326     ap->height= frame_height;
2327     ap->time_base= (AVRational){1, 25};
2328     ap->pix_fmt = frame_pix_fmt;
2329     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2330
2331
2332     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2333     if (err >= 0) {
2334         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2335         err = av_demuxer_open(ic, ap);
2336         if(err < 0){
2337             avformat_free_context(ic);
2338             ic= NULL;
2339         }
2340     }
2341     if (err < 0) {
2342         print_error(is->filename, err);
2343         ret = -1;
2344         goto fail;
2345     }
2346     is->ic = ic;
2347
2348     if(genpts)
2349         ic->flags |= AVFMT_FLAG_GENPTS;
2350
2351     err = av_find_stream_info(ic);
2352     if (err < 0) {
2353         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2354         ret = -1;
2355         goto fail;
2356     }
2357     if(ic->pb)
2358         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2359
2360     if(seek_by_bytes<0)
2361         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2362
2363     /* if seeking requested, we execute it */
2364     if (start_time != AV_NOPTS_VALUE) {
2365         int64_t timestamp;
2366
2367         timestamp = start_time;
2368         /* add the stream start time */
2369         if (ic->start_time != AV_NOPTS_VALUE)
2370             timestamp += ic->start_time;
2371         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2372         if (ret < 0) {
2373             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2374                     is->filename, (double)timestamp / AV_TIME_BASE);
2375         }
2376     }
2377
2378     for (i = 0; i < ic->nb_streams; i++)
2379         ic->streams[i]->discard = AVDISCARD_ALL;
2380     if (!video_disable)
2381         st_index[AVMEDIA_TYPE_VIDEO] =
2382             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2383                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2384     if (!audio_disable)
2385         st_index[AVMEDIA_TYPE_AUDIO] =
2386             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2387                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2388                                 st_index[AVMEDIA_TYPE_VIDEO],
2389                                 NULL, 0);
2390     if (!video_disable)
2391         st_index[AVMEDIA_TYPE_SUBTITLE] =
2392             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2393                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2394                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2395                                  st_index[AVMEDIA_TYPE_AUDIO] :
2396                                  st_index[AVMEDIA_TYPE_VIDEO]),
2397                                 NULL, 0);
2398     if (show_status) {
2399         av_dump_format(ic, 0, is->filename, 0);
2400     }
2401
2402     is->show_mode = show_mode;
2403
2404     /* open the streams */
2405     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2406         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2407     }
2408
2409     ret=-1;
2410     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2411         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2412     }
2413     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2414     if (is->show_mode == SHOW_MODE_NONE)
2415         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2416
2417     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2418         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2419     }
2420
2421     if (is->video_stream < 0 && is->audio_stream < 0) {
2422         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2423         ret = -1;
2424         goto fail;
2425     }
2426
2427     for(;;) {
2428         if (is->abort_request)
2429             break;
2430         if (is->paused != is->last_paused) {
2431             is->last_paused = is->paused;
2432             if (is->paused)
2433                 is->read_pause_return= av_read_pause(ic);
2434             else
2435                 av_read_play(ic);
2436         }
2437 #if CONFIG_RTSP_DEMUXER
2438         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2439             /* wait 10 ms to avoid trying to get another packet */
2440             /* XXX: horrible */
2441             SDL_Delay(10);
2442             continue;
2443         }
2444 #endif
2445         if (is->seek_req) {
2446             int64_t seek_target= is->seek_pos;
2447             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2448             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2449 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2450 //      of the seek_pos/seek_rel variables
2451
2452             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2453             if (ret < 0) {
2454                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2455             }else{
2456                 if (is->audio_stream >= 0) {
2457                     packet_queue_flush(&is->audioq);
2458                     packet_queue_put(&is->audioq, &flush_pkt);
2459                 }
2460                 if (is->subtitle_stream >= 0) {
2461                     packet_queue_flush(&is->subtitleq);
2462                     packet_queue_put(&is->subtitleq, &flush_pkt);
2463                 }
2464                 if (is->video_stream >= 0) {
2465                     packet_queue_flush(&is->videoq);
2466                     packet_queue_put(&is->videoq, &flush_pkt);
2467                 }
2468             }
2469             is->seek_req = 0;
2470             eof= 0;
2471         }
2472
2473         /* if the queue are full, no need to read more */
2474         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2475             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2476                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2477                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2478             /* wait 10 ms */
2479             SDL_Delay(10);
2480             continue;
2481         }
2482         if(eof) {
2483             if(is->video_stream >= 0){
2484                 av_init_packet(pkt);
2485                 pkt->data=NULL;
2486                 pkt->size=0;
2487                 pkt->stream_index= is->video_stream;
2488                 packet_queue_put(&is->videoq, pkt);
2489             }
2490             SDL_Delay(10);
2491             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2492                 if(loop!=1 && (!loop || --loop)){
2493                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2494                 }else if(autoexit){
2495                     ret=AVERROR_EOF;
2496                     goto fail;
2497                 }
2498             }
2499             eof=0;
2500             continue;
2501         }
2502         ret = av_read_frame(ic, pkt);
2503         if (ret < 0) {
2504             if (ret == AVERROR_EOF || url_feof(ic->pb))
2505                 eof=1;
2506             if (ic->pb && ic->pb->error)
2507                 break;
2508             SDL_Delay(100); /* wait for user event */
2509             continue;
2510         }
2511         /* check if packet is in play range specified by user, then queue, otherwise discard */
2512         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2513                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2514                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2515                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2516                 <= ((double)duration/1000000);
2517         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2518             packet_queue_put(&is->audioq, pkt);
2519         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2520             packet_queue_put(&is->videoq, pkt);
2521         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2522             packet_queue_put(&is->subtitleq, pkt);
2523         } else {
2524             av_free_packet(pkt);
2525         }
2526     }
2527     /* wait until the end */
2528     while (!is->abort_request) {
2529         SDL_Delay(100);
2530     }
2531
2532     ret = 0;
2533  fail:
2534     /* disable interrupting */
2535     global_video_state = NULL;
2536
2537     /* close each stream */
2538     if (is->audio_stream >= 0)
2539         stream_component_close(is, is->audio_stream);
2540     if (is->video_stream >= 0)
2541         stream_component_close(is, is->video_stream);
2542     if (is->subtitle_stream >= 0)
2543         stream_component_close(is, is->subtitle_stream);
2544     if (is->ic) {
2545         av_close_input_file(is->ic);
2546         is->ic = NULL; /* safety */
2547     }
2548     avio_set_interrupt_cb(NULL);
2549
2550     if (ret != 0) {
2551         SDL_Event event;
2552
2553         event.type = FF_QUIT_EVENT;
2554         event.user.data1 = is;
2555         SDL_PushEvent(&event);
2556     }
2557     return 0;
2558 }
2559
2560 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2561 {
2562     VideoState *is;
2563
2564     is = av_mallocz(sizeof(VideoState));
2565     if (!is)
2566         return NULL;
2567     av_strlcpy(is->filename, filename, sizeof(is->filename));
2568     is->iformat = iformat;
2569     is->ytop = 0;
2570     is->xleft = 0;
2571
2572     /* start video display */
2573     is->pictq_mutex = SDL_CreateMutex();
2574     is->pictq_cond = SDL_CreateCond();
2575
2576     is->subpq_mutex = SDL_CreateMutex();
2577     is->subpq_cond = SDL_CreateCond();
2578
2579     is->av_sync_type = av_sync_type;
2580     is->read_tid = SDL_CreateThread(read_thread, is);
2581     if (!is->read_tid) {
2582         av_free(is);
2583         return NULL;
2584     }
2585     return is;
2586 }
2587
2588 static void stream_cycle_channel(VideoState *is, int codec_type)
2589 {
2590     AVFormatContext *ic = is->ic;
2591     int start_index, stream_index;
2592     AVStream *st;
2593
2594     if (codec_type == AVMEDIA_TYPE_VIDEO)
2595         start_index = is->video_stream;
2596     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2597         start_index = is->audio_stream;
2598     else
2599         start_index = is->subtitle_stream;
2600     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2601         return;
2602     stream_index = start_index;
2603     for(;;) {
2604         if (++stream_index >= is->ic->nb_streams)
2605         {
2606             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2607             {
2608                 stream_index = -1;
2609                 goto the_end;
2610             } else
2611                 stream_index = 0;
2612         }
2613         if (stream_index == start_index)
2614             return;
2615         st = ic->streams[stream_index];
2616         if (st->codec->codec_type == codec_type) {
2617             /* check that parameters are OK */
2618             switch(codec_type) {
2619             case AVMEDIA_TYPE_AUDIO:
2620                 if (st->codec->sample_rate != 0 &&
2621                     st->codec->channels != 0)
2622                     goto the_end;
2623                 break;
2624             case AVMEDIA_TYPE_VIDEO:
2625             case AVMEDIA_TYPE_SUBTITLE:
2626                 goto the_end;
2627             default:
2628                 break;
2629             }
2630         }
2631     }
2632  the_end:
2633     stream_component_close(is, start_index);
2634     stream_component_open(is, stream_index);
2635 }
2636
2637
2638 static void toggle_full_screen(void)
2639 {
2640     is_full_screen = !is_full_screen;
2641     video_open(cur_stream);
2642 }
2643
2644 static void toggle_pause(void)
2645 {
2646     if (cur_stream)
2647         stream_toggle_pause(cur_stream);
2648     step = 0;
2649 }
2650
2651 static void step_to_next_frame(void)
2652 {
2653     if (cur_stream) {
2654         /* if the stream is paused unpause it, then step */
2655         if (cur_stream->paused)
2656             stream_toggle_pause(cur_stream);
2657     }
2658     step = 1;
2659 }
2660
2661 static void toggle_audio_display(void)
2662 {
2663     if (cur_stream) {
2664         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2665         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2666         fill_rectangle(screen,
2667                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2668                     bgcolor);
2669         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2670     }
2671 }
2672
2673 /* handle an event sent by the GUI */
2674 static void event_loop(void)
2675 {
2676     SDL_Event event;
2677     double incr, pos, frac;
2678
2679     for(;;) {
2680         double x;
2681         SDL_WaitEvent(&event);
2682         switch(event.type) {
2683         case SDL_KEYDOWN:
2684             if (exit_on_keydown) {
2685                 do_exit();
2686                 break;
2687             }
2688             switch(event.key.keysym.sym) {
2689             case SDLK_ESCAPE:
2690             case SDLK_q:
2691                 do_exit();
2692                 break;
2693             case SDLK_f:
2694                 toggle_full_screen();
2695                 break;
2696             case SDLK_p:
2697             case SDLK_SPACE:
2698                 toggle_pause();
2699                 break;
2700             case SDLK_s: //S: Step to next frame
2701                 step_to_next_frame();
2702                 break;
2703             case SDLK_a:
2704                 if (cur_stream)
2705                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2706                 break;
2707             case SDLK_v:
2708                 if (cur_stream)
2709                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2710                 break;
2711             case SDLK_t:
2712                 if (cur_stream)
2713                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2714                 break;
2715             case SDLK_w:
2716                 toggle_audio_display();
2717                 break;
2718             case SDLK_LEFT:
2719                 incr = -10.0;
2720                 goto do_seek;
2721             case SDLK_RIGHT:
2722                 incr = 10.0;
2723                 goto do_seek;
2724             case SDLK_UP:
2725                 incr = 60.0;
2726                 goto do_seek;
2727             case SDLK_DOWN:
2728                 incr = -60.0;
2729             do_seek:
2730                 if (cur_stream) {
2731                     if (seek_by_bytes) {
2732                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2733                             pos= cur_stream->video_current_pos;
2734                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2735                             pos= cur_stream->audio_pkt.pos;
2736                         }else
2737                             pos = avio_tell(cur_stream->ic->pb);
2738                         if (cur_stream->ic->bit_rate)
2739                             incr *= cur_stream->ic->bit_rate / 8.0;
2740                         else
2741                             incr *= 180000.0;
2742                         pos += incr;
2743                         stream_seek(cur_stream, pos, incr, 1);
2744                     } else {
2745                         pos = get_master_clock(cur_stream);
2746                         pos += incr;
2747                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2748                     }
2749                 }
2750                 break;
2751             default:
2752                 break;
2753             }
2754             break;
2755         case SDL_MOUSEBUTTONDOWN:
2756             if (exit_on_mousedown) {
2757                 do_exit();
2758                 break;
2759             }
2760         case SDL_MOUSEMOTION:
2761             if(event.type ==SDL_MOUSEBUTTONDOWN){
2762                 x= event.button.x;
2763             }else{
2764                 if(event.motion.state != SDL_PRESSED)
2765                     break;
2766                 x= event.motion.x;
2767             }
2768             if (cur_stream) {
2769                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2770                     uint64_t size=  avio_size(cur_stream->ic->pb);
2771                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2772                 }else{
2773                     int64_t ts;
2774                     int ns, hh, mm, ss;
2775                     int tns, thh, tmm, tss;
2776                     tns = cur_stream->ic->duration/1000000LL;
2777                     thh = tns/3600;
2778                     tmm = (tns%3600)/60;
2779                     tss = (tns%60);
2780                     frac = x/cur_stream->width;
2781                     ns = frac*tns;
2782                     hh = ns/3600;
2783                     mm = (ns%3600)/60;
2784                     ss = (ns%60);
2785                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2786                             hh, mm, ss, thh, tmm, tss);
2787                     ts = frac*cur_stream->ic->duration;
2788                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2789                         ts += cur_stream->ic->start_time;
2790                     stream_seek(cur_stream, ts, 0, 0);
2791                 }
2792             }
2793             break;
2794         case SDL_VIDEORESIZE:
2795             if (cur_stream) {
2796                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2797                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2798                 screen_width = cur_stream->width = event.resize.w;
2799                 screen_height= cur_stream->height= event.resize.h;
2800             }
2801             break;
2802         case SDL_QUIT:
2803         case FF_QUIT_EVENT:
2804             do_exit();
2805             break;
2806         case FF_ALLOC_EVENT:
2807             video_open(event.user.data1);
2808             alloc_picture(event.user.data1);
2809             break;
2810         case FF_REFRESH_EVENT:
2811             video_refresh(event.user.data1);
2812             cur_stream->refresh=0;
2813             break;
2814         default:
2815             break;
2816         }
2817     }
2818 }
2819
2820 static int opt_frame_size(const char *opt, const char *arg)
2821 {
2822     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2823         fprintf(stderr, "Incorrect frame size\n");
2824         return AVERROR(EINVAL);
2825     }
2826     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2827         fprintf(stderr, "Frame size must be a multiple of 2\n");
2828         return AVERROR(EINVAL);
2829     }
2830     return 0;
2831 }
2832
2833 static int opt_width(const char *opt, const char *arg)
2834 {
2835     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2836     return 0;
2837 }
2838
2839 static int opt_height(const char *opt, const char *arg)
2840 {
2841     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2842     return 0;
2843 }
2844
2845 static int opt_format(const char *opt, const char *arg)
2846 {
2847     file_iformat = av_find_input_format(arg);
2848     if (!file_iformat) {
2849         fprintf(stderr, "Unknown input format: %s\n", arg);
2850         return AVERROR(EINVAL);
2851     }
2852     return 0;
2853 }
2854
2855 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2856 {
2857     frame_pix_fmt = av_get_pix_fmt(arg);
2858     return 0;
2859 }
2860
2861 static int opt_sync(const char *opt, const char *arg)
2862 {
2863     if (!strcmp(arg, "audio"))
2864         av_sync_type = AV_SYNC_AUDIO_MASTER;
2865     else if (!strcmp(arg, "video"))
2866         av_sync_type = AV_SYNC_VIDEO_MASTER;
2867     else if (!strcmp(arg, "ext"))
2868         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2869     else {
2870         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2871         exit(1);
2872     }
2873     return 0;
2874 }
2875
2876 static int opt_seek(const char *opt, const char *arg)
2877 {
2878     start_time = parse_time_or_die(opt, arg, 1);
2879     return 0;
2880 }
2881
2882 static int opt_duration(const char *opt, const char *arg)
2883 {
2884     duration = parse_time_or_die(opt, arg, 1);
2885     return 0;
2886 }
2887
2888 static int opt_debug(const char *opt, const char *arg)
2889 {
2890     av_log_set_level(99);
2891     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2892     return 0;
2893 }
2894
2895 static int opt_vismv(const char *opt, const char *arg)
2896 {
2897     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2898     return 0;
2899 }
2900
2901 static int opt_thread_count(const char *opt, const char *arg)
2902 {
2903     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2904 #if !HAVE_THREADS
2905     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2906 #endif
2907     return 0;
2908 }
2909
2910 static int opt_show_mode(const char *opt, const char *arg)
2911 {
2912     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2913                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2914                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2915                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2916     return 0;
2917 }
2918
2919 static int opt_input_file(const char *opt, const char *filename)
2920 {
2921     if (input_filename) {
2922         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2923                 filename, input_filename);
2924         exit(1);
2925     }
2926     if (!strcmp(filename, "-"))
2927         filename = "pipe:";
2928     input_filename = filename;
2929     return 0;
2930 }
2931
2932 static const OptionDef options[] = {
2933 #include "cmdutils_common_opts.h"
2934     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2935     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2936     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2937     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2938     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2939     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2940     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2941     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2942     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2943     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2944     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2945     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2946     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2947     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2948     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2949     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2950     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2951     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2952     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2953     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2954     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2955     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2956     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2957     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2958     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2959     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2960     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2961     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2962     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2963     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2964     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2965     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2966     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2967     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2968     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2969     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2970     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2971 #if CONFIG_AVFILTER
2972     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2973 #endif
2974     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2975     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2976     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2977     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2978     { NULL, },
2979 };
2980
2981 static void show_usage(void)
2982 {
2983     printf("Simple media player\n");
2984     printf("usage: ffplay [options] input_file\n");
2985     printf("\n");
2986 }
2987
2988 static void show_help(void)
2989 {
2990     av_log_set_callback(log_callback_help);
2991     show_usage();
2992     show_help_options(options, "Main options:\n",
2993                       OPT_EXPERT, 0);
2994     show_help_options(options, "\nAdvanced options:\n",
2995                       OPT_EXPERT, OPT_EXPERT);
2996     printf("\n");
2997     av_opt_show2(avcodec_opts[0], NULL,
2998                  AV_OPT_FLAG_DECODING_PARAM, 0);
2999     printf("\n");
3000     av_opt_show2(avformat_opts, NULL,
3001                  AV_OPT_FLAG_DECODING_PARAM, 0);
3002 #if !CONFIG_AVFILTER
3003     printf("\n");
3004     av_opt_show2(sws_opts, NULL,
3005                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3006 #endif
3007     printf("\nWhile playing:\n"
3008            "q, ESC              quit\n"
3009            "f                   toggle full screen\n"
3010            "p, SPC              pause\n"
3011            "a                   cycle audio channel\n"
3012            "v                   cycle video channel\n"
3013            "t                   cycle subtitle channel\n"
3014            "w                   show audio waves\n"
3015            "s                   activate frame-step mode\n"
3016            "left/right          seek backward/forward 10 seconds\n"
3017            "down/up             seek backward/forward 1 minute\n"
3018            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3019            );
3020 }
3021
3022 /* Called from the main */
3023 int main(int argc, char **argv)
3024 {
3025     int flags;
3026
3027     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3028
3029     /* register all codecs, demux and protocols */
3030     avcodec_register_all();
3031 #if CONFIG_AVDEVICE
3032     avdevice_register_all();
3033 #endif
3034 #if CONFIG_AVFILTER
3035     avfilter_register_all();
3036 #endif
3037     av_register_all();
3038
3039     init_opts();
3040
3041     show_banner();
3042
3043     parse_options(argc, argv, options, opt_input_file);
3044
3045     if (!input_filename) {
3046         show_usage();
3047         fprintf(stderr, "An input file must be specified\n");
3048         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3049         exit(1);
3050     }
3051
3052     if (display_disable) {
3053         video_disable = 1;
3054     }
3055     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3056 #if !defined(__MINGW32__) && !defined(__APPLE__)
3057     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3058 #endif
3059     if (SDL_Init (flags)) {
3060         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3061         exit(1);
3062     }
3063
3064     if (!display_disable) {
3065 #if HAVE_SDL_VIDEO_SIZE
3066         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3067         fs_screen_width = vi->current_w;
3068         fs_screen_height = vi->current_h;
3069 #endif
3070     }
3071
3072     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3073     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3074     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3075
3076     av_init_packet(&flush_pkt);
3077     flush_pkt.data= "FLUSH";
3078
3079     cur_stream = stream_open(input_filename, file_iformat);
3080
3081     event_loop();
3082
3083     /* never returns */
3084
3085     return 0;
3086 }