]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Add another missing file from r24799.
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavcore/parseutils.h"
30 #include "libavformat/avformat.h"
31 #include "libavdevice/avdevice.h"
32 #include "libswscale/swscale.h"
33 #include "libavcodec/audioconvert.h"
34 #include "libavcodec/opt.h"
35 #include "libavcodec/avfft.h"
36
37 #if CONFIG_AVFILTER
38 # include "libavfilter/avfilter.h"
39 # include "libavfilter/avfiltergraph.h"
40 # include "libavfilter/graphparser.h"
41 #endif
42
43 #include "cmdutils.h"
44
45 #include <SDL.h>
46 #include <SDL_thread.h>
47
48 #ifdef __MINGW32__
49 #undef main /* We don't want SDL to override our main() */
50 #endif
51
52 #include <unistd.h>
53 #include <assert.h>
54
55 const char program_name[] = "FFplay";
56 const int program_birth_year = 2003;
57
58 //#define DEBUG_SYNC
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *parse_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138     int dtg_active_format;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum SampleFormat audio_src_fmt;
164     AVAudioConvert *reformat_ctx;
165
166     int show_audio; /* if true, display audio samples */
167     int16_t sample_array[SAMPLE_ARRAY_SIZE];
168     int sample_array_index;
169     int last_i_start;
170     RDFTContext *rdft;
171     int rdft_bits;
172     FFTSample *rdft_data;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for(;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #if 0
413 /* draw only the border of a rectangle */
414 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415 {
416     int w1, w2, h1, h2;
417
418     /* fill the background */
419     w1 = x;
420     if (w1 < 0)
421         w1 = 0;
422     w2 = s->width - (x + w);
423     if (w2 < 0)
424         w2 = 0;
425     h1 = y;
426     if (h1 < 0)
427         h1 = 0;
428     h2 = s->height - (y + h);
429     if (h2 < 0)
430         h2 = 0;
431     fill_rectangle(screen,
432                    s->xleft, s->ytop,
433                    w1, s->height,
434                    color);
435     fill_rectangle(screen,
436                    s->xleft + s->width - w2, s->ytop,
437                    w2, s->height,
438                    color);
439     fill_rectangle(screen,
440                    s->xleft + w1, s->ytop,
441                    s->width - w1 - w2, h1,
442                    color);
443     fill_rectangle(screen,
444                    s->xleft + w1, s->ytop + s->height - h2,
445                    s->width - w1 - w2, h2,
446                    color);
447 }
448 #endif
449
450 #define ALPHA_BLEND(a, oldp, newp, s)\
451 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452
453 #define RGBA_IN(r, g, b, a, s)\
454 {\
455     unsigned int v = ((const uint32_t *)(s))[0];\
456     a = (v >> 24) & 0xff;\
457     r = (v >> 16) & 0xff;\
458     g = (v >> 8) & 0xff;\
459     b = v & 0xff;\
460 }
461
462 #define YUVA_IN(y, u, v, a, s, pal)\
463 {\
464     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465     a = (val >> 24) & 0xff;\
466     y = (val >> 16) & 0xff;\
467     u = (val >> 8) & 0xff;\
468     v = val & 0xff;\
469 }
470
471 #define YUVA_OUT(d, y, u, v, a)\
472 {\
473     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474 }
475
476
477 #define BPP 1
478
479 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480 {
481     int wrap, wrap3, width2, skip2;
482     int y, u, v, a, u1, v1, a1, w, h;
483     uint8_t *lum, *cb, *cr;
484     const uint8_t *p;
485     const uint32_t *pal;
486     int dstx, dsty, dstw, dsth;
487
488     dstw = av_clip(rect->w, 0, imgw);
489     dsth = av_clip(rect->h, 0, imgh);
490     dstx = av_clip(rect->x, 0, imgw - dstw);
491     dsty = av_clip(rect->y, 0, imgh - dsth);
492     lum = dst->data[0] + dsty * dst->linesize[0];
493     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495
496     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497     skip2 = dstx >> 1;
498     wrap = dst->linesize[0];
499     wrap3 = rect->pict.linesize[0];
500     p = rect->pict.data[0];
501     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502
503     if (dsty & 1) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513             cb++;
514             cr++;
515             lum++;
516             p += BPP;
517         }
518         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 = u;
521             v1 = v;
522             a1 = a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524
525             YUVA_IN(y, u, v, a, p + BPP, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += 2 * BPP;
535             lum += 2;
536         }
537         if (w) {
538             YUVA_IN(y, u, v, a, p, pal);
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542             p++;
543             lum++;
544         }
545         p += wrap3 - dstw * BPP;
546         lum += wrap - dstw - dstx;
547         cb += dst->linesize[1] - width2 - skip2;
548         cr += dst->linesize[2] - width2 - skip2;
549     }
550     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551         lum += dstx;
552         cb += skip2;
553         cr += skip2;
554
555         if (dstx & 1) {
556             YUVA_IN(y, u, v, a, p, pal);
557             u1 = u;
558             v1 = v;
559             a1 = a;
560             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561             p += wrap3;
562             lum += wrap;
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570             cb++;
571             cr++;
572             p += -wrap3 + BPP;
573             lum += -wrap + 1;
574         }
575         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 = u;
578             v1 = v;
579             a1 = a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581
582             YUVA_IN(y, u, v, a, p + BPP, pal);
583             u1 += u;
584             v1 += v;
585             a1 += a;
586             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587             p += wrap3;
588             lum += wrap;
589
590             YUVA_IN(y, u, v, a, p, pal);
591             u1 += u;
592             v1 += v;
593             a1 += a;
594             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595
596             YUVA_IN(y, u, v, a, p + BPP, pal);
597             u1 += u;
598             v1 += v;
599             a1 += a;
600             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601
602             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604
605             cb++;
606             cr++;
607             p += -wrap3 + 2 * BPP;
608             lum += -wrap + 2;
609         }
610         if (w) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616             p += wrap3;
617             lum += wrap;
618             YUVA_IN(y, u, v, a, p, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625             cb++;
626             cr++;
627             p += -wrap3 + BPP;
628             lum += -wrap + 1;
629         }
630         p += wrap3 + (wrap3 - dstw * BPP);
631         lum += wrap + (wrap - dstw - dstx);
632         cb += dst->linesize[1] - width2 - skip2;
633         cr += dst->linesize[2] - width2 - skip2;
634     }
635     /* handle odd height */
636     if (h) {
637         lum += dstx;
638         cb += skip2;
639         cr += skip2;
640
641         if (dstx & 1) {
642             YUVA_IN(y, u, v, a, p, pal);
643             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646             cb++;
647             cr++;
648             lum++;
649             p += BPP;
650         }
651         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652             YUVA_IN(y, u, v, a, p, pal);
653             u1 = u;
654             v1 = v;
655             a1 = a;
656             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657
658             YUVA_IN(y, u, v, a, p + BPP, pal);
659             u1 += u;
660             v1 += v;
661             a1 += a;
662             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665             cb++;
666             cr++;
667             p += 2 * BPP;
668             lum += 2;
669         }
670         if (w) {
671             YUVA_IN(y, u, v, a, p, pal);
672             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675         }
676     }
677 }
678
679 static void free_subpicture(SubPicture *sp)
680 {
681     avsubtitle_free(&sp->sub);
682 }
683
684 static void video_image_display(VideoState *is)
685 {
686     VideoPicture *vp;
687     SubPicture *sp;
688     AVPicture pict;
689     float aspect_ratio;
690     int width, height, x, y;
691     SDL_Rect rect;
692     int i;
693
694     vp = &is->pictq[is->pictq_rindex];
695     if (vp->bmp) {
696 #if CONFIG_AVFILTER
697          if (vp->picref->video->pixel_aspect.num == 0)
698              aspect_ratio = 0;
699          else
700              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701 #else
702
703         /* XXX: use variable in the frame */
704         if (is->video_st->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706         else if (is->video_st->codec->sample_aspect_ratio.num)
707             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708         else
709             aspect_ratio = 0;
710 #endif
711         if (aspect_ratio <= 0.0)
712             aspect_ratio = 1.0;
713         aspect_ratio *= (float)vp->width / (float)vp->height;
714         /* if an active format is indicated, then it overrides the
715            mpeg format */
716 #if 0
717         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
718             is->dtg_active_format = is->video_st->codec->dtg_active_format;
719             printf("dtg_active_format=%d\n", is->dtg_active_format);
720         }
721 #endif
722 #if 0
723         switch(is->video_st->codec->dtg_active_format) {
724         case FF_DTG_AFD_SAME:
725         default:
726             /* nothing to do */
727             break;
728         case FF_DTG_AFD_4_3:
729             aspect_ratio = 4.0 / 3.0;
730             break;
731         case FF_DTG_AFD_16_9:
732             aspect_ratio = 16.0 / 9.0;
733             break;
734         case FF_DTG_AFD_14_9:
735             aspect_ratio = 14.0 / 9.0;
736             break;
737         case FF_DTG_AFD_4_3_SP_14_9:
738             aspect_ratio = 14.0 / 9.0;
739             break;
740         case FF_DTG_AFD_16_9_SP_14_9:
741             aspect_ratio = 14.0 / 9.0;
742             break;
743         case FF_DTG_AFD_SP_4_3:
744             aspect_ratio = 4.0 / 3.0;
745             break;
746         }
747 #endif
748
749         if (is->subtitle_st)
750         {
751             if (is->subpq_size > 0)
752             {
753                 sp = &is->subpq[is->subpq_rindex];
754
755                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
756                 {
757                     SDL_LockYUVOverlay (vp->bmp);
758
759                     pict.data[0] = vp->bmp->pixels[0];
760                     pict.data[1] = vp->bmp->pixels[2];
761                     pict.data[2] = vp->bmp->pixels[1];
762
763                     pict.linesize[0] = vp->bmp->pitches[0];
764                     pict.linesize[1] = vp->bmp->pitches[2];
765                     pict.linesize[2] = vp->bmp->pitches[1];
766
767                     for (i = 0; i < sp->sub.num_rects; i++)
768                         blend_subrect(&pict, sp->sub.rects[i],
769                                       vp->bmp->w, vp->bmp->h);
770
771                     SDL_UnlockYUVOverlay (vp->bmp);
772                 }
773             }
774         }
775
776
777         /* XXX: we suppose the screen has a 1.0 pixel ratio */
778         height = is->height;
779         width = ((int)rint(height * aspect_ratio)) & ~1;
780         if (width > is->width) {
781             width = is->width;
782             height = ((int)rint(width / aspect_ratio)) & ~1;
783         }
784         x = (is->width - width) / 2;
785         y = (is->height - height) / 2;
786         if (!is->no_background) {
787             /* fill the background */
788             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
789         } else {
790             is->no_background = 0;
791         }
792         rect.x = is->xleft + x;
793         rect.y = is->ytop  + y;
794         rect.w = width;
795         rect.h = height;
796         SDL_DisplayYUVOverlay(vp->bmp, &rect);
797     } else {
798 #if 0
799         fill_rectangle(screen,
800                        is->xleft, is->ytop, is->width, is->height,
801                        QERGB(0x00, 0x00, 0x00));
802 #endif
803     }
804 }
805
806 static inline int compute_mod(int a, int b)
807 {
808     a = a % b;
809     if (a >= 0)
810         return a;
811     else
812         return a + b;
813 }
814
815 static void video_audio_display(VideoState *s)
816 {
817     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818     int ch, channels, h, h2, bgcolor, fgcolor;
819     int16_t time_diff;
820     int rdft_bits, nb_freq;
821
822     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
823         ;
824     nb_freq= 1<<(rdft_bits-1);
825
826     /* compute display index : center on currently output samples */
827     channels = s->audio_st->codec->channels;
828     nb_display_channels = channels;
829     if (!s->paused) {
830         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
831         n = 2 * channels;
832         delay = audio_write_get_buf_size(s);
833         delay /= n;
834
835         /* to be more precise, we take into account the time spent since
836            the last buffer computation */
837         if (audio_callback_time) {
838             time_diff = av_gettime() - audio_callback_time;
839             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
840         }
841
842         delay += 2*data_used;
843         if (delay < data_used)
844             delay = data_used;
845
846         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847         if(s->show_audio==1){
848             h= INT_MIN;
849             for(i=0; i<1000; i+=channels){
850                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851                 int a= s->sample_array[idx];
852                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
853                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
854                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
855                 int score= a-d;
856                 if(h<score && (b^c)<0){
857                     h= score;
858                     i_start= idx;
859                 }
860             }
861         }
862
863         s->last_i_start = i_start;
864     } else {
865         i_start = s->last_i_start;
866     }
867
868     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869     if(s->show_audio==1){
870         fill_rectangle(screen,
871                        s->xleft, s->ytop, s->width, s->height,
872                        bgcolor);
873
874         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875
876         /* total height for one channel */
877         h = s->height / nb_display_channels;
878         /* graph height / 2 */
879         h2 = (h * 9) / 20;
880         for(ch = 0;ch < nb_display_channels; ch++) {
881             i = i_start + ch;
882             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883             for(x = 0; x < s->width; x++) {
884                 y = (s->sample_array[i] * h2) >> 15;
885                 if (y < 0) {
886                     y = -y;
887                     ys = y1 - y;
888                 } else {
889                     ys = y1;
890                 }
891                 fill_rectangle(screen,
892                                s->xleft + x, ys, 1, y,
893                                fgcolor);
894                 i += channels;
895                 if (i >= SAMPLE_ARRAY_SIZE)
896                     i -= SAMPLE_ARRAY_SIZE;
897             }
898         }
899
900         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901
902         for(ch = 1;ch < nb_display_channels; ch++) {
903             y = s->ytop + ch * h;
904             fill_rectangle(screen,
905                            s->xleft, y, s->width, 1,
906                            fgcolor);
907         }
908         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909     }else{
910         nb_display_channels= FFMIN(nb_display_channels, 2);
911         if(rdft_bits != s->rdft_bits){
912             av_rdft_end(s->rdft);
913             av_free(s->rdft_data);
914             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915             s->rdft_bits= rdft_bits;
916             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
917         }
918         {
919             FFTSample *data[2];
920             for(ch = 0;ch < nb_display_channels; ch++) {
921                 data[ch] = s->rdft_data + 2*nb_freq*ch;
922                 i = i_start + ch;
923                 for(x = 0; x < 2*nb_freq; x++) {
924                     double w= (x-nb_freq)*(1.0/nb_freq);
925                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
926                     i += channels;
927                     if (i >= SAMPLE_ARRAY_SIZE)
928                         i -= SAMPLE_ARRAY_SIZE;
929                 }
930                 av_rdft_calc(s->rdft, data[ch]);
931             }
932             //least efficient way to do this, we should of course directly access it but its more than fast enough
933             for(y=0; y<s->height; y++){
934                 double w= 1/sqrt(nb_freq);
935                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
936                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
937                        + data[1][2*y+1]*data[1][2*y+1])) : a;
938                 a= FFMIN(a,255);
939                 b= FFMIN(b,255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         s->xpos++;
949         if(s->xpos >= s->width)
950             s->xpos= s->xleft;
951     }
952 }
953
954 static int video_open(VideoState *is){
955     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956     int w,h;
957
958     if(is_full_screen) flags |= SDL_FULLSCREEN;
959     else               flags |= SDL_RESIZABLE;
960
961     if (is_full_screen && fs_screen_width) {
962         w = fs_screen_width;
963         h = fs_screen_height;
964     } else if(!is_full_screen && screen_width){
965         w = screen_width;
966         h = screen_height;
967 #if CONFIG_AVFILTER
968     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969         w = is->out_video_filter->inputs[0]->w;
970         h = is->out_video_filter->inputs[0]->h;
971 #else
972     }else if (is->video_st && is->video_st->codec->width){
973         w = is->video_st->codec->width;
974         h = is->video_st->codec->height;
975 #endif
976     } else {
977         w = 640;
978         h = 480;
979     }
980     if(screen && is->width == screen->w && screen->w == w
981        && is->height== screen->h && screen->h == h)
982         return 0;
983
984 #ifndef __APPLE__
985     screen = SDL_SetVideoMode(w, h, 0, flags);
986 #else
987     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988     screen = SDL_SetVideoMode(w, h, 24, flags);
989 #endif
990     if (!screen) {
991         fprintf(stderr, "SDL: could not set video mode - exiting\n");
992         return -1;
993     }
994     if (!window_title)
995         window_title = input_filename;
996     SDL_WM_SetCaption(window_title, window_title);
997
998     is->width = screen->w;
999     is->height = screen->h;
1000
1001     return 0;
1002 }
1003
1004 /* display the current picture, if any */
1005 static void video_display(VideoState *is)
1006 {
1007     if(!screen)
1008         video_open(cur_stream);
1009     if (is->audio_st && is->show_audio)
1010         video_audio_display(is);
1011     else if (is->video_st)
1012         video_image_display(is);
1013 }
1014
1015 static int refresh_thread(void *opaque)
1016 {
1017     VideoState *is= opaque;
1018     while(!is->abort_request){
1019     SDL_Event event;
1020     event.type = FF_REFRESH_EVENT;
1021     event.user.data1 = opaque;
1022         if(!is->refresh){
1023             is->refresh=1;
1024     SDL_PushEvent(&event);
1025         }
1026         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027     }
1028     return 0;
1029 }
1030
1031 /* get the current audio clock value */
1032 static double get_audio_clock(VideoState *is)
1033 {
1034     double pts;
1035     int hw_buf_size, bytes_per_sec;
1036     pts = is->audio_clock;
1037     hw_buf_size = audio_write_get_buf_size(is);
1038     bytes_per_sec = 0;
1039     if (is->audio_st) {
1040         bytes_per_sec = is->audio_st->codec->sample_rate *
1041             2 * is->audio_st->codec->channels;
1042     }
1043     if (bytes_per_sec)
1044         pts -= (double)hw_buf_size / bytes_per_sec;
1045     return pts;
1046 }
1047
1048 /* get the current video clock value */
1049 static double get_video_clock(VideoState *is)
1050 {
1051     if (is->paused) {
1052         return is->video_current_pts;
1053     } else {
1054         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055     }
1056 }
1057
1058 /* get the current external clock value */
1059 static double get_external_clock(VideoState *is)
1060 {
1061     int64_t ti;
1062     ti = av_gettime();
1063     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064 }
1065
1066 /* get the current master clock value */
1067 static double get_master_clock(VideoState *is)
1068 {
1069     double val;
1070
1071     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072         if (is->video_st)
1073             val = get_video_clock(is);
1074         else
1075             val = get_audio_clock(is);
1076     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077         if (is->audio_st)
1078             val = get_audio_clock(is);
1079         else
1080             val = get_video_clock(is);
1081     } else {
1082         val = get_external_clock(is);
1083     }
1084     return val;
1085 }
1086
1087 /* seek in the stream */
1088 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089 {
1090     if (!is->seek_req) {
1091         is->seek_pos = pos;
1092         is->seek_rel = rel;
1093         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094         if (seek_by_bytes)
1095             is->seek_flags |= AVSEEK_FLAG_BYTE;
1096         is->seek_req = 1;
1097     }
1098 }
1099
1100 /* pause or resume the video */
1101 static void stream_pause(VideoState *is)
1102 {
1103     if (is->paused) {
1104         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105         if(is->read_pause_return != AVERROR(ENOSYS)){
1106             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107         }
1108         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109     }
1110     is->paused = !is->paused;
1111 }
1112
1113 static double compute_target_time(double frame_current_pts, VideoState *is)
1114 {
1115     double delay, sync_threshold, diff;
1116
1117     /* compute nominal delay */
1118     delay = frame_current_pts - is->frame_last_pts;
1119     if (delay <= 0 || delay >= 10.0) {
1120         /* if incorrect delay, use previous one */
1121         delay = is->frame_last_delay;
1122     } else {
1123         is->frame_last_delay = delay;
1124     }
1125     is->frame_last_pts = frame_current_pts;
1126
1127     /* update delay to follow master synchronisation source */
1128     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130         /* if video is slave, we try to correct big delays by
1131            duplicating or deleting a frame */
1132         diff = get_video_clock(is) - get_master_clock(is);
1133
1134         /* skip or repeat frame. We take into account the
1135            delay to compute the threshold. I still don't know
1136            if it is the best guess */
1137         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139             if (diff <= -sync_threshold)
1140                 delay = 0;
1141             else if (diff >= sync_threshold)
1142                 delay = 2 * delay;
1143         }
1144     }
1145     is->frame_timer += delay;
1146 #if defined(DEBUG_SYNC)
1147     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148             delay, actual_delay, frame_current_pts, -diff);
1149 #endif
1150
1151     return is->frame_timer;
1152 }
1153
1154 /* called to display each frame */
1155 static void video_refresh_timer(void *opaque)
1156 {
1157     VideoState *is = opaque;
1158     VideoPicture *vp;
1159
1160     SubPicture *sp, *sp2;
1161
1162     if (is->video_st) {
1163 retry:
1164         if (is->pictq_size == 0) {
1165             //nothing to do, no picture to display in the que
1166         } else {
1167             double time= av_gettime()/1000000.0;
1168             double next_target;
1169             /* dequeue the picture */
1170             vp = &is->pictq[is->pictq_rindex];
1171
1172             if(time < vp->target_clock)
1173                 return;
1174             /* update current video pts */
1175             is->video_current_pts = vp->pts;
1176             is->video_current_pts_drift = is->video_current_pts - time;
1177             is->video_current_pos = vp->pos;
1178             if(is->pictq_size > 1){
1179                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180                 assert(nextvp->target_clock >= vp->target_clock);
1181                 next_target= nextvp->target_clock;
1182             }else{
1183                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184             }
1185             if(framedrop && time > next_target){
1186                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187                 if(is->pictq_size > 1 || time > next_target + 0.5){
1188                     /* update queue size and signal for next picture */
1189                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190                         is->pictq_rindex = 0;
1191
1192                     SDL_LockMutex(is->pictq_mutex);
1193                     is->pictq_size--;
1194                     SDL_CondSignal(is->pictq_cond);
1195                     SDL_UnlockMutex(is->pictq_mutex);
1196                     goto retry;
1197                 }
1198             }
1199
1200             if(is->subtitle_st) {
1201                 if (is->subtitle_stream_changed) {
1202                     SDL_LockMutex(is->subpq_mutex);
1203
1204                     while (is->subpq_size) {
1205                         free_subpicture(&is->subpq[is->subpq_rindex]);
1206
1207                         /* update queue size and signal for next picture */
1208                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209                             is->subpq_rindex = 0;
1210
1211                         is->subpq_size--;
1212                     }
1213                     is->subtitle_stream_changed = 0;
1214
1215                     SDL_CondSignal(is->subpq_cond);
1216                     SDL_UnlockMutex(is->subpq_mutex);
1217                 } else {
1218                     if (is->subpq_size > 0) {
1219                         sp = &is->subpq[is->subpq_rindex];
1220
1221                         if (is->subpq_size > 1)
1222                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223                         else
1224                             sp2 = NULL;
1225
1226                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228                         {
1229                             free_subpicture(sp);
1230
1231                             /* update queue size and signal for next picture */
1232                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233                                 is->subpq_rindex = 0;
1234
1235                             SDL_LockMutex(is->subpq_mutex);
1236                             is->subpq_size--;
1237                             SDL_CondSignal(is->subpq_cond);
1238                             SDL_UnlockMutex(is->subpq_mutex);
1239                         }
1240                     }
1241                 }
1242             }
1243
1244             /* display picture */
1245             video_display(is);
1246
1247             /* update queue size and signal for next picture */
1248             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249                 is->pictq_rindex = 0;
1250
1251             SDL_LockMutex(is->pictq_mutex);
1252             is->pictq_size--;
1253             SDL_CondSignal(is->pictq_cond);
1254             SDL_UnlockMutex(is->pictq_mutex);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         video_display(is);
1264     }
1265     if (show_status) {
1266         static int64_t last_time;
1267         int64_t cur_time;
1268         int aqsize, vqsize, sqsize;
1269         double av_diff;
1270
1271         cur_time = av_gettime();
1272         if (!last_time || (cur_time - last_time) >= 30000) {
1273             aqsize = 0;
1274             vqsize = 0;
1275             sqsize = 0;
1276             if (is->audio_st)
1277                 aqsize = is->audioq.size;
1278             if (is->video_st)
1279                 vqsize = is->videoq.size;
1280             if (is->subtitle_st)
1281                 sqsize = is->subtitleq.size;
1282             av_diff = 0;
1283             if (is->audio_st && is->video_st)
1284                 av_diff = get_audio_clock(is) - get_video_clock(is);
1285             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1286                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1287             fflush(stdout);
1288             last_time = cur_time;
1289         }
1290     }
1291 }
1292
1293 /* allocate a picture (needs to do that in main thread to avoid
1294    potential locking problems */
1295 static void alloc_picture(void *opaque)
1296 {
1297     VideoState *is = opaque;
1298     VideoPicture *vp;
1299
1300     vp = &is->pictq[is->pictq_windex];
1301
1302     if (vp->bmp)
1303         SDL_FreeYUVOverlay(vp->bmp);
1304
1305 #if CONFIG_AVFILTER
1306     if (vp->picref)
1307         avfilter_unref_buffer(vp->picref);
1308     vp->picref = NULL;
1309
1310     vp->width   = is->out_video_filter->inputs[0]->w;
1311     vp->height  = is->out_video_filter->inputs[0]->h;
1312     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1313 #else
1314     vp->width   = is->video_st->codec->width;
1315     vp->height  = is->video_st->codec->height;
1316     vp->pix_fmt = is->video_st->codec->pix_fmt;
1317 #endif
1318
1319     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1320                                    SDL_YV12_OVERLAY,
1321                                    screen);
1322
1323     SDL_LockMutex(is->pictq_mutex);
1324     vp->allocated = 1;
1325     SDL_CondSignal(is->pictq_cond);
1326     SDL_UnlockMutex(is->pictq_mutex);
1327 }
1328
1329 /**
1330  *
1331  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1332  */
1333 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1334 {
1335     VideoPicture *vp;
1336     int dst_pix_fmt;
1337 #if CONFIG_AVFILTER
1338     AVPicture pict_src;
1339 #endif
1340     /* wait until we have space to put a new picture */
1341     SDL_LockMutex(is->pictq_mutex);
1342
1343     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1344         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1345
1346     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1347            !is->videoq.abort_request) {
1348         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1349     }
1350     SDL_UnlockMutex(is->pictq_mutex);
1351
1352     if (is->videoq.abort_request)
1353         return -1;
1354
1355     vp = &is->pictq[is->pictq_windex];
1356
1357     /* alloc or resize hardware picture buffer */
1358     if (!vp->bmp ||
1359 #if CONFIG_AVFILTER
1360         vp->width  != is->out_video_filter->inputs[0]->w ||
1361         vp->height != is->out_video_filter->inputs[0]->h) {
1362 #else
1363         vp->width != is->video_st->codec->width ||
1364         vp->height != is->video_st->codec->height) {
1365 #endif
1366         SDL_Event event;
1367
1368         vp->allocated = 0;
1369
1370         /* the allocation must be done in the main thread to avoid
1371            locking problems */
1372         event.type = FF_ALLOC_EVENT;
1373         event.user.data1 = is;
1374         SDL_PushEvent(&event);
1375
1376         /* wait until the picture is allocated */
1377         SDL_LockMutex(is->pictq_mutex);
1378         while (!vp->allocated && !is->videoq.abort_request) {
1379             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1380         }
1381         SDL_UnlockMutex(is->pictq_mutex);
1382
1383         if (is->videoq.abort_request)
1384             return -1;
1385     }
1386
1387     /* if the frame is not skipped, then display it */
1388     if (vp->bmp) {
1389         AVPicture pict;
1390 #if CONFIG_AVFILTER
1391         if(vp->picref)
1392             avfilter_unref_buffer(vp->picref);
1393         vp->picref = src_frame->opaque;
1394 #endif
1395
1396         /* get a pointer on the bitmap */
1397         SDL_LockYUVOverlay (vp->bmp);
1398
1399         dst_pix_fmt = PIX_FMT_YUV420P;
1400         memset(&pict,0,sizeof(AVPicture));
1401         pict.data[0] = vp->bmp->pixels[0];
1402         pict.data[1] = vp->bmp->pixels[2];
1403         pict.data[2] = vp->bmp->pixels[1];
1404
1405         pict.linesize[0] = vp->bmp->pitches[0];
1406         pict.linesize[1] = vp->bmp->pitches[2];
1407         pict.linesize[2] = vp->bmp->pitches[1];
1408
1409 #if CONFIG_AVFILTER
1410         pict_src.data[0] = src_frame->data[0];
1411         pict_src.data[1] = src_frame->data[1];
1412         pict_src.data[2] = src_frame->data[2];
1413
1414         pict_src.linesize[0] = src_frame->linesize[0];
1415         pict_src.linesize[1] = src_frame->linesize[1];
1416         pict_src.linesize[2] = src_frame->linesize[2];
1417
1418         //FIXME use direct rendering
1419         av_picture_copy(&pict, &pict_src,
1420                         vp->pix_fmt, vp->width, vp->height);
1421 #else
1422         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1423         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1424             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1425             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1426         if (is->img_convert_ctx == NULL) {
1427             fprintf(stderr, "Cannot initialize the conversion context\n");
1428             exit(1);
1429         }
1430         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1431                   0, vp->height, pict.data, pict.linesize);
1432 #endif
1433         /* update the bitmap content */
1434         SDL_UnlockYUVOverlay(vp->bmp);
1435
1436         vp->pts = pts;
1437         vp->pos = pos;
1438
1439         /* now we can update the picture count */
1440         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1441             is->pictq_windex = 0;
1442         SDL_LockMutex(is->pictq_mutex);
1443         vp->target_clock= compute_target_time(vp->pts, is);
1444
1445         is->pictq_size++;
1446         SDL_UnlockMutex(is->pictq_mutex);
1447     }
1448     return 0;
1449 }
1450
1451 /**
1452  * compute the exact PTS for the picture if it is omitted in the stream
1453  * @param pts1 the dts of the pkt / pts of the frame
1454  */
1455 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1456 {
1457     double frame_delay, pts;
1458
1459     pts = pts1;
1460
1461     if (pts != 0) {
1462         /* update video clock with pts, if present */
1463         is->video_clock = pts;
1464     } else {
1465         pts = is->video_clock;
1466     }
1467     /* update video clock for next frame */
1468     frame_delay = av_q2d(is->video_st->codec->time_base);
1469     /* for MPEG2, the frame can be repeated, so we update the
1470        clock accordingly */
1471     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1472     is->video_clock += frame_delay;
1473
1474 #if defined(DEBUG_SYNC) && 0
1475     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1476            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1477 #endif
1478     return queue_picture(is, src_frame, pts, pos);
1479 }
1480
1481 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1482 {
1483     int len1, got_picture, i;
1484
1485         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1486             return -1;
1487
1488         if(pkt->data == flush_pkt.data){
1489             avcodec_flush_buffers(is->video_st->codec);
1490
1491             SDL_LockMutex(is->pictq_mutex);
1492             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1493             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1494                 is->pictq[i].target_clock= 0;
1495             }
1496             while (is->pictq_size && !is->videoq.abort_request) {
1497                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1498             }
1499             is->video_current_pos= -1;
1500             SDL_UnlockMutex(is->pictq_mutex);
1501
1502             is->last_dts_for_fault_detection=
1503             is->last_pts_for_fault_detection= INT64_MIN;
1504             is->frame_last_pts= AV_NOPTS_VALUE;
1505             is->frame_last_delay = 0;
1506             is->frame_timer = (double)av_gettime() / 1000000.0;
1507             is->skip_frames= 1;
1508             is->skip_frames_index= 0;
1509             return 0;
1510         }
1511
1512         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1513            this packet, if any */
1514         is->video_st->codec->reordered_opaque= pkt->pts;
1515         len1 = avcodec_decode_video2(is->video_st->codec,
1516                                     frame, &got_picture,
1517                                     pkt);
1518
1519         if (got_picture) {
1520             if(pkt->dts != AV_NOPTS_VALUE){
1521                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1522                 is->last_dts_for_fault_detection= pkt->dts;
1523             }
1524             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1525                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1526                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1527             }
1528         }
1529
1530         if(   (   decoder_reorder_pts==1
1531                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1532                || pkt->dts == AV_NOPTS_VALUE)
1533            && frame->reordered_opaque != AV_NOPTS_VALUE)
1534             *pts= frame->reordered_opaque;
1535         else if(pkt->dts != AV_NOPTS_VALUE)
1536             *pts= pkt->dts;
1537         else
1538             *pts= 0;
1539
1540 //            if (len1 < 0)
1541 //                break;
1542     if (got_picture){
1543         is->skip_frames_index += 1;
1544         if(is->skip_frames_index >= is->skip_frames){
1545             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1546             return 1;
1547         }
1548
1549     }
1550     return 0;
1551 }
1552
1553 #if CONFIG_AVFILTER
1554 typedef struct {
1555     VideoState *is;
1556     AVFrame *frame;
1557     int use_dr1;
1558 } FilterPriv;
1559
1560 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1561 {
1562     AVFilterContext *ctx = codec->opaque;
1563     AVFilterBufferRef  *ref;
1564     int perms = AV_PERM_WRITE;
1565     int i, w, h, stride[4];
1566     unsigned edge;
1567
1568     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1569         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1570         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1571         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1572     }
1573     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1574
1575     w = codec->width;
1576     h = codec->height;
1577     avcodec_align_dimensions2(codec, &w, &h, stride);
1578     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1579     w += edge << 1;
1580     h += edge << 1;
1581
1582     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1583         return -1;
1584
1585     ref->video->w = codec->width;
1586     ref->video->h = codec->height;
1587     for(i = 0; i < 4; i ++) {
1588         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1589         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1590
1591         if (ref->data[i]) {
1592             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1593         }
1594         pic->data[i]     = ref->data[i];
1595         pic->linesize[i] = ref->linesize[i];
1596     }
1597     pic->opaque = ref;
1598     pic->age    = INT_MAX;
1599     pic->type   = FF_BUFFER_TYPE_USER;
1600     pic->reordered_opaque = codec->reordered_opaque;
1601     return 0;
1602 }
1603
1604 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1605 {
1606     memset(pic->data, 0, sizeof(pic->data));
1607     avfilter_unref_buffer(pic->opaque);
1608 }
1609
1610 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1611 {
1612     AVFilterBufferRef *ref = pic->opaque;
1613
1614     if (pic->data[0] == NULL) {
1615         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1616         return codec->get_buffer(codec, pic);
1617     }
1618
1619     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1620         (codec->pix_fmt != ref->format)) {
1621         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1622         return -1;
1623     }
1624
1625     pic->reordered_opaque = codec->reordered_opaque;
1626     return 0;
1627 }
1628
1629 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1630 {
1631     FilterPriv *priv = ctx->priv;
1632     AVCodecContext *codec;
1633     if(!opaque) return -1;
1634
1635     priv->is = opaque;
1636     codec    = priv->is->video_st->codec;
1637     codec->opaque = ctx;
1638     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1639         priv->use_dr1 = 1;
1640         codec->get_buffer     = input_get_buffer;
1641         codec->release_buffer = input_release_buffer;
1642         codec->reget_buffer   = input_reget_buffer;
1643     }
1644
1645     priv->frame = avcodec_alloc_frame();
1646
1647     return 0;
1648 }
1649
1650 static void input_uninit(AVFilterContext *ctx)
1651 {
1652     FilterPriv *priv = ctx->priv;
1653     av_free(priv->frame);
1654 }
1655
1656 static int input_request_frame(AVFilterLink *link)
1657 {
1658     FilterPriv *priv = link->src->priv;
1659     AVFilterBufferRef *picref;
1660     int64_t pts = 0;
1661     AVPacket pkt;
1662     int ret;
1663
1664     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1665         av_free_packet(&pkt);
1666     if (ret < 0)
1667         return -1;
1668
1669     if(priv->use_dr1) {
1670         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1671     } else {
1672         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1673         av_picture_data_copy(picref->data, picref->linesize,
1674                              priv->frame->data, priv->frame->linesize,
1675                              picref->format, link->w, link->h);
1676     }
1677     av_free_packet(&pkt);
1678
1679     picref->pts = pts;
1680     picref->pos = pkt.pos;
1681     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1682     avfilter_start_frame(link, picref);
1683     avfilter_draw_slice(link, 0, link->h, 1);
1684     avfilter_end_frame(link);
1685
1686     return 0;
1687 }
1688
1689 static int input_query_formats(AVFilterContext *ctx)
1690 {
1691     FilterPriv *priv = ctx->priv;
1692     enum PixelFormat pix_fmts[] = {
1693         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1694     };
1695
1696     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1697     return 0;
1698 }
1699
1700 static int input_config_props(AVFilterLink *link)
1701 {
1702     FilterPriv *priv  = link->src->priv;
1703     AVCodecContext *c = priv->is->video_st->codec;
1704
1705     link->w = c->width;
1706     link->h = c->height;
1707
1708     return 0;
1709 }
1710
1711 static AVFilter input_filter =
1712 {
1713     .name      = "ffplay_input",
1714
1715     .priv_size = sizeof(FilterPriv),
1716
1717     .init      = input_init,
1718     .uninit    = input_uninit,
1719
1720     .query_formats = input_query_formats,
1721
1722     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1723     .outputs   = (AVFilterPad[]) {{ .name = "default",
1724                                     .type = AVMEDIA_TYPE_VIDEO,
1725                                     .request_frame = input_request_frame,
1726                                     .config_props  = input_config_props, },
1727                                   { .name = NULL }},
1728 };
1729
1730 static void output_end_frame(AVFilterLink *link)
1731 {
1732 }
1733
1734 static int output_query_formats(AVFilterContext *ctx)
1735 {
1736     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1737
1738     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1739     return 0;
1740 }
1741
1742 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1743                                     int64_t *pts, int64_t *pos)
1744 {
1745     AVFilterBufferRef *pic;
1746
1747     if(avfilter_request_frame(ctx->inputs[0]))
1748         return -1;
1749     if(!(pic = ctx->inputs[0]->cur_buf))
1750         return -1;
1751     ctx->inputs[0]->cur_buf = NULL;
1752
1753     frame->opaque = pic;
1754     *pts          = pic->pts;
1755     *pos          = pic->pos;
1756
1757     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1758     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1759
1760     return 1;
1761 }
1762
1763 static AVFilter output_filter =
1764 {
1765     .name      = "ffplay_output",
1766
1767     .query_formats = output_query_formats,
1768
1769     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1770                                     .type          = AVMEDIA_TYPE_VIDEO,
1771                                     .end_frame     = output_end_frame,
1772                                     .min_perms     = AV_PERM_READ, },
1773                                   { .name = NULL }},
1774     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1775 };
1776 #endif  /* CONFIG_AVFILTER */
1777
1778 static int video_thread(void *arg)
1779 {
1780     VideoState *is = arg;
1781     AVFrame *frame= avcodec_alloc_frame();
1782     int64_t pts_int;
1783     double pts;
1784     int ret;
1785
1786 #if CONFIG_AVFILTER
1787     int64_t pos;
1788     char sws_flags_str[128];
1789     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1790     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1791     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1792     graph->scale_sws_opts = av_strdup(sws_flags_str);
1793
1794     if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1795     if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1796
1797     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1798     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1799
1800
1801     if(vfilters) {
1802         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1803         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1804
1805         outputs->name    = av_strdup("in");
1806         outputs->filter  = filt_src;
1807         outputs->pad_idx = 0;
1808         outputs->next    = NULL;
1809
1810         inputs->name    = av_strdup("out");
1811         inputs->filter  = filt_out;
1812         inputs->pad_idx = 0;
1813         inputs->next    = NULL;
1814
1815         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1816             goto the_end;
1817         av_freep(&vfilters);
1818     } else {
1819         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1820     }
1821     avfilter_graph_add_filter(graph, filt_src);
1822     avfilter_graph_add_filter(graph, filt_out);
1823
1824     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1825     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1826     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1827
1828     is->out_video_filter = filt_out;
1829 #endif
1830
1831     for(;;) {
1832 #if !CONFIG_AVFILTER
1833         AVPacket pkt;
1834 #endif
1835         while (is->paused && !is->videoq.abort_request)
1836             SDL_Delay(10);
1837 #if CONFIG_AVFILTER
1838         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1839 #else
1840         ret = get_video_frame(is, frame, &pts_int, &pkt);
1841 #endif
1842
1843         if (ret < 0) goto the_end;
1844
1845         if (!ret)
1846             continue;
1847
1848         pts = pts_int*av_q2d(is->video_st->time_base);
1849
1850 #if CONFIG_AVFILTER
1851         ret = output_picture2(is, frame, pts, pos);
1852 #else
1853         ret = output_picture2(is, frame, pts,  pkt.pos);
1854         av_free_packet(&pkt);
1855 #endif
1856         if (ret < 0)
1857             goto the_end;
1858
1859         if (step)
1860             if (cur_stream)
1861                 stream_pause(cur_stream);
1862     }
1863  the_end:
1864 #if CONFIG_AVFILTER
1865     avfilter_graph_destroy(graph);
1866     av_freep(&graph);
1867 #endif
1868     av_free(frame);
1869     return 0;
1870 }
1871
1872 static int subtitle_thread(void *arg)
1873 {
1874     VideoState *is = arg;
1875     SubPicture *sp;
1876     AVPacket pkt1, *pkt = &pkt1;
1877     int len1, got_subtitle;
1878     double pts;
1879     int i, j;
1880     int r, g, b, y, u, v, a;
1881
1882     for(;;) {
1883         while (is->paused && !is->subtitleq.abort_request) {
1884             SDL_Delay(10);
1885         }
1886         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1887             break;
1888
1889         if(pkt->data == flush_pkt.data){
1890             avcodec_flush_buffers(is->subtitle_st->codec);
1891             continue;
1892         }
1893         SDL_LockMutex(is->subpq_mutex);
1894         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1895                !is->subtitleq.abort_request) {
1896             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1897         }
1898         SDL_UnlockMutex(is->subpq_mutex);
1899
1900         if (is->subtitleq.abort_request)
1901             goto the_end;
1902
1903         sp = &is->subpq[is->subpq_windex];
1904
1905        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1906            this packet, if any */
1907         pts = 0;
1908         if (pkt->pts != AV_NOPTS_VALUE)
1909             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1910
1911         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1912                                     &sp->sub, &got_subtitle,
1913                                     pkt);
1914 //            if (len1 < 0)
1915 //                break;
1916         if (got_subtitle && sp->sub.format == 0) {
1917             sp->pts = pts;
1918
1919             for (i = 0; i < sp->sub.num_rects; i++)
1920             {
1921                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1922                 {
1923                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1924                     y = RGB_TO_Y_CCIR(r, g, b);
1925                     u = RGB_TO_U_CCIR(r, g, b, 0);
1926                     v = RGB_TO_V_CCIR(r, g, b, 0);
1927                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1928                 }
1929             }
1930
1931             /* now we can update the picture count */
1932             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1933                 is->subpq_windex = 0;
1934             SDL_LockMutex(is->subpq_mutex);
1935             is->subpq_size++;
1936             SDL_UnlockMutex(is->subpq_mutex);
1937         }
1938         av_free_packet(pkt);
1939 //        if (step)
1940 //            if (cur_stream)
1941 //                stream_pause(cur_stream);
1942     }
1943  the_end:
1944     return 0;
1945 }
1946
1947 /* copy samples for viewing in editor window */
1948 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1949 {
1950     int size, len, channels;
1951
1952     channels = is->audio_st->codec->channels;
1953
1954     size = samples_size / sizeof(short);
1955     while (size > 0) {
1956         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1957         if (len > size)
1958             len = size;
1959         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1960         samples += len;
1961         is->sample_array_index += len;
1962         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1963             is->sample_array_index = 0;
1964         size -= len;
1965     }
1966 }
1967
1968 /* return the new audio buffer size (samples can be added or deleted
1969    to get better sync if video or external master clock) */
1970 static int synchronize_audio(VideoState *is, short *samples,
1971                              int samples_size1, double pts)
1972 {
1973     int n, samples_size;
1974     double ref_clock;
1975
1976     n = 2 * is->audio_st->codec->channels;
1977     samples_size = samples_size1;
1978
1979     /* if not master, then we try to remove or add samples to correct the clock */
1980     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1981          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1982         double diff, avg_diff;
1983         int wanted_size, min_size, max_size, nb_samples;
1984
1985         ref_clock = get_master_clock(is);
1986         diff = get_audio_clock(is) - ref_clock;
1987
1988         if (diff < AV_NOSYNC_THRESHOLD) {
1989             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1990             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1991                 /* not enough measures to have a correct estimate */
1992                 is->audio_diff_avg_count++;
1993             } else {
1994                 /* estimate the A-V difference */
1995                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1996
1997                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1998                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1999                     nb_samples = samples_size / n;
2000
2001                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2002                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2003                     if (wanted_size < min_size)
2004                         wanted_size = min_size;
2005                     else if (wanted_size > max_size)
2006                         wanted_size = max_size;
2007
2008                     /* add or remove samples to correction the synchro */
2009                     if (wanted_size < samples_size) {
2010                         /* remove samples */
2011                         samples_size = wanted_size;
2012                     } else if (wanted_size > samples_size) {
2013                         uint8_t *samples_end, *q;
2014                         int nb;
2015
2016                         /* add samples */
2017                         nb = (samples_size - wanted_size);
2018                         samples_end = (uint8_t *)samples + samples_size - n;
2019                         q = samples_end + n;
2020                         while (nb > 0) {
2021                             memcpy(q, samples_end, n);
2022                             q += n;
2023                             nb -= n;
2024                         }
2025                         samples_size = wanted_size;
2026                     }
2027                 }
2028 #if 0
2029                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2030                        diff, avg_diff, samples_size - samples_size1,
2031                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2032 #endif
2033             }
2034         } else {
2035             /* too big difference : may be initial PTS errors, so
2036                reset A-V filter */
2037             is->audio_diff_avg_count = 0;
2038             is->audio_diff_cum = 0;
2039         }
2040     }
2041
2042     return samples_size;
2043 }
2044
2045 /* decode one audio frame and returns its uncompressed size */
2046 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2047 {
2048     AVPacket *pkt_temp = &is->audio_pkt_temp;
2049     AVPacket *pkt = &is->audio_pkt;
2050     AVCodecContext *dec= is->audio_st->codec;
2051     int n, len1, data_size;
2052     double pts;
2053
2054     for(;;) {
2055         /* NOTE: the audio packet can contain several frames */
2056         while (pkt_temp->size > 0) {
2057             data_size = sizeof(is->audio_buf1);
2058             len1 = avcodec_decode_audio3(dec,
2059                                         (int16_t *)is->audio_buf1, &data_size,
2060                                         pkt_temp);
2061             if (len1 < 0) {
2062                 /* if error, we skip the frame */
2063                 pkt_temp->size = 0;
2064                 break;
2065             }
2066
2067             pkt_temp->data += len1;
2068             pkt_temp->size -= len1;
2069             if (data_size <= 0)
2070                 continue;
2071
2072             if (dec->sample_fmt != is->audio_src_fmt) {
2073                 if (is->reformat_ctx)
2074                     av_audio_convert_free(is->reformat_ctx);
2075                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2076                                                          dec->sample_fmt, 1, NULL, 0);
2077                 if (!is->reformat_ctx) {
2078                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2079                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2080                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2081                         break;
2082                 }
2083                 is->audio_src_fmt= dec->sample_fmt;
2084             }
2085
2086             if (is->reformat_ctx) {
2087                 const void *ibuf[6]= {is->audio_buf1};
2088                 void *obuf[6]= {is->audio_buf2};
2089                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2090                 int ostride[6]= {2};
2091                 int len= data_size/istride[0];
2092                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2093                     printf("av_audio_convert() failed\n");
2094                     break;
2095                 }
2096                 is->audio_buf= is->audio_buf2;
2097                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2098                           remove this legacy cruft */
2099                 data_size= len*2;
2100             }else{
2101                 is->audio_buf= is->audio_buf1;
2102             }
2103
2104             /* if no pts, then compute it */
2105             pts = is->audio_clock;
2106             *pts_ptr = pts;
2107             n = 2 * dec->channels;
2108             is->audio_clock += (double)data_size /
2109                 (double)(n * dec->sample_rate);
2110 #if defined(DEBUG_SYNC)
2111             {
2112                 static double last_clock;
2113                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2114                        is->audio_clock - last_clock,
2115                        is->audio_clock, pts);
2116                 last_clock = is->audio_clock;
2117             }
2118 #endif
2119             return data_size;
2120         }
2121
2122         /* free the current packet */
2123         if (pkt->data)
2124             av_free_packet(pkt);
2125
2126         if (is->paused || is->audioq.abort_request) {
2127             return -1;
2128         }
2129
2130         /* read next packet */
2131         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2132             return -1;
2133         if(pkt->data == flush_pkt.data){
2134             avcodec_flush_buffers(dec);
2135             continue;
2136         }
2137
2138         pkt_temp->data = pkt->data;
2139         pkt_temp->size = pkt->size;
2140
2141         /* if update the audio clock with the pts */
2142         if (pkt->pts != AV_NOPTS_VALUE) {
2143             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2144         }
2145     }
2146 }
2147
2148 /* get the current audio output buffer size, in samples. With SDL, we
2149    cannot have a precise information */
2150 static int audio_write_get_buf_size(VideoState *is)
2151 {
2152     return is->audio_buf_size - is->audio_buf_index;
2153 }
2154
2155
2156 /* prepare a new audio buffer */
2157 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2158 {
2159     VideoState *is = opaque;
2160     int audio_size, len1;
2161     double pts;
2162
2163     audio_callback_time = av_gettime();
2164
2165     while (len > 0) {
2166         if (is->audio_buf_index >= is->audio_buf_size) {
2167            audio_size = audio_decode_frame(is, &pts);
2168            if (audio_size < 0) {
2169                 /* if error, just output silence */
2170                is->audio_buf = is->audio_buf1;
2171                is->audio_buf_size = 1024;
2172                memset(is->audio_buf, 0, is->audio_buf_size);
2173            } else {
2174                if (is->show_audio)
2175                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2176                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2177                                               pts);
2178                is->audio_buf_size = audio_size;
2179            }
2180            is->audio_buf_index = 0;
2181         }
2182         len1 = is->audio_buf_size - is->audio_buf_index;
2183         if (len1 > len)
2184             len1 = len;
2185         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2186         len -= len1;
2187         stream += len1;
2188         is->audio_buf_index += len1;
2189     }
2190 }
2191
2192 /* open a given stream. Return 0 if OK */
2193 static int stream_component_open(VideoState *is, int stream_index)
2194 {
2195     AVFormatContext *ic = is->ic;
2196     AVCodecContext *avctx;
2197     AVCodec *codec;
2198     SDL_AudioSpec wanted_spec, spec;
2199
2200     if (stream_index < 0 || stream_index >= ic->nb_streams)
2201         return -1;
2202     avctx = ic->streams[stream_index]->codec;
2203
2204     /* prepare audio output */
2205     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2206         if (avctx->channels > 0) {
2207             avctx->request_channels = FFMIN(2, avctx->channels);
2208         } else {
2209             avctx->request_channels = 2;
2210         }
2211     }
2212
2213     codec = avcodec_find_decoder(avctx->codec_id);
2214     avctx->debug_mv = debug_mv;
2215     avctx->debug = debug;
2216     avctx->workaround_bugs = workaround_bugs;
2217     avctx->lowres = lowres;
2218     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2219     avctx->idct_algo= idct;
2220     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2221     avctx->skip_frame= skip_frame;
2222     avctx->skip_idct= skip_idct;
2223     avctx->skip_loop_filter= skip_loop_filter;
2224     avctx->error_recognition= error_recognition;
2225     avctx->error_concealment= error_concealment;
2226     avcodec_thread_init(avctx, thread_count);
2227
2228     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2229
2230     if (!codec ||
2231         avcodec_open(avctx, codec) < 0)
2232         return -1;
2233
2234     /* prepare audio output */
2235     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2236         wanted_spec.freq = avctx->sample_rate;
2237         wanted_spec.format = AUDIO_S16SYS;
2238         wanted_spec.channels = avctx->channels;
2239         wanted_spec.silence = 0;
2240         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2241         wanted_spec.callback = sdl_audio_callback;
2242         wanted_spec.userdata = is;
2243         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2244             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2245             return -1;
2246         }
2247         is->audio_hw_buf_size = spec.size;
2248         is->audio_src_fmt= SAMPLE_FMT_S16;
2249     }
2250
2251     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2252     switch(avctx->codec_type) {
2253     case AVMEDIA_TYPE_AUDIO:
2254         is->audio_stream = stream_index;
2255         is->audio_st = ic->streams[stream_index];
2256         is->audio_buf_size = 0;
2257         is->audio_buf_index = 0;
2258
2259         /* init averaging filter */
2260         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2261         is->audio_diff_avg_count = 0;
2262         /* since we do not have a precise anough audio fifo fullness,
2263            we correct audio sync only if larger than this threshold */
2264         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2265
2266         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2267         packet_queue_init(&is->audioq);
2268         SDL_PauseAudio(0);
2269         break;
2270     case AVMEDIA_TYPE_VIDEO:
2271         is->video_stream = stream_index;
2272         is->video_st = ic->streams[stream_index];
2273
2274 //        is->video_current_pts_time = av_gettime();
2275
2276         packet_queue_init(&is->videoq);
2277         is->video_tid = SDL_CreateThread(video_thread, is);
2278         break;
2279     case AVMEDIA_TYPE_SUBTITLE:
2280         is->subtitle_stream = stream_index;
2281         is->subtitle_st = ic->streams[stream_index];
2282         packet_queue_init(&is->subtitleq);
2283
2284         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2285         break;
2286     default:
2287         break;
2288     }
2289     return 0;
2290 }
2291
2292 static void stream_component_close(VideoState *is, int stream_index)
2293 {
2294     AVFormatContext *ic = is->ic;
2295     AVCodecContext *avctx;
2296
2297     if (stream_index < 0 || stream_index >= ic->nb_streams)
2298         return;
2299     avctx = ic->streams[stream_index]->codec;
2300
2301     switch(avctx->codec_type) {
2302     case AVMEDIA_TYPE_AUDIO:
2303         packet_queue_abort(&is->audioq);
2304
2305         SDL_CloseAudio();
2306
2307         packet_queue_end(&is->audioq);
2308         if (is->reformat_ctx)
2309             av_audio_convert_free(is->reformat_ctx);
2310         is->reformat_ctx = NULL;
2311         break;
2312     case AVMEDIA_TYPE_VIDEO:
2313         packet_queue_abort(&is->videoq);
2314
2315         /* note: we also signal this mutex to make sure we deblock the
2316            video thread in all cases */
2317         SDL_LockMutex(is->pictq_mutex);
2318         SDL_CondSignal(is->pictq_cond);
2319         SDL_UnlockMutex(is->pictq_mutex);
2320
2321         SDL_WaitThread(is->video_tid, NULL);
2322
2323         packet_queue_end(&is->videoq);
2324         break;
2325     case AVMEDIA_TYPE_SUBTITLE:
2326         packet_queue_abort(&is->subtitleq);
2327
2328         /* note: we also signal this mutex to make sure we deblock the
2329            video thread in all cases */
2330         SDL_LockMutex(is->subpq_mutex);
2331         is->subtitle_stream_changed = 1;
2332
2333         SDL_CondSignal(is->subpq_cond);
2334         SDL_UnlockMutex(is->subpq_mutex);
2335
2336         SDL_WaitThread(is->subtitle_tid, NULL);
2337
2338         packet_queue_end(&is->subtitleq);
2339         break;
2340     default:
2341         break;
2342     }
2343
2344     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2345     avcodec_close(avctx);
2346     switch(avctx->codec_type) {
2347     case AVMEDIA_TYPE_AUDIO:
2348         is->audio_st = NULL;
2349         is->audio_stream = -1;
2350         break;
2351     case AVMEDIA_TYPE_VIDEO:
2352         is->video_st = NULL;
2353         is->video_stream = -1;
2354         break;
2355     case AVMEDIA_TYPE_SUBTITLE:
2356         is->subtitle_st = NULL;
2357         is->subtitle_stream = -1;
2358         break;
2359     default:
2360         break;
2361     }
2362 }
2363
2364 /* since we have only one decoding thread, we can use a global
2365    variable instead of a thread local variable */
2366 static VideoState *global_video_state;
2367
2368 static int decode_interrupt_cb(void)
2369 {
2370     return (global_video_state && global_video_state->abort_request);
2371 }
2372
2373 /* this thread gets the stream from the disk or the network */
2374 static int decode_thread(void *arg)
2375 {
2376     VideoState *is = arg;
2377     AVFormatContext *ic;
2378     int err, i, ret;
2379     int st_index[AVMEDIA_TYPE_NB];
2380     int st_count[AVMEDIA_TYPE_NB]={0};
2381     int st_best_packet_count[AVMEDIA_TYPE_NB];
2382     AVPacket pkt1, *pkt = &pkt1;
2383     AVFormatParameters params, *ap = &params;
2384     int eof=0;
2385     int pkt_in_play_range = 0;
2386
2387     ic = avformat_alloc_context();
2388
2389     memset(st_index, -1, sizeof(st_index));
2390     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2391     is->video_stream = -1;
2392     is->audio_stream = -1;
2393     is->subtitle_stream = -1;
2394
2395     global_video_state = is;
2396     url_set_interrupt_cb(decode_interrupt_cb);
2397
2398     memset(ap, 0, sizeof(*ap));
2399
2400     ap->prealloced_context = 1;
2401     ap->width = frame_width;
2402     ap->height= frame_height;
2403     ap->time_base= (AVRational){1, 25};
2404     ap->pix_fmt = frame_pix_fmt;
2405
2406     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2407
2408     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2409     if (err < 0) {
2410         print_error(is->filename, err);
2411         ret = -1;
2412         goto fail;
2413     }
2414     is->ic = ic;
2415
2416     if(genpts)
2417         ic->flags |= AVFMT_FLAG_GENPTS;
2418
2419     err = av_find_stream_info(ic);
2420     if (err < 0) {
2421         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2422         ret = -1;
2423         goto fail;
2424     }
2425     if(ic->pb)
2426         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2427
2428     if(seek_by_bytes<0)
2429         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2430
2431     /* if seeking requested, we execute it */
2432     if (start_time != AV_NOPTS_VALUE) {
2433         int64_t timestamp;
2434
2435         timestamp = start_time;
2436         /* add the stream start time */
2437         if (ic->start_time != AV_NOPTS_VALUE)
2438             timestamp += ic->start_time;
2439         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2440         if (ret < 0) {
2441             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2442                     is->filename, (double)timestamp / AV_TIME_BASE);
2443         }
2444     }
2445
2446     for(i = 0; i < ic->nb_streams; i++) {
2447         AVStream *st= ic->streams[i];
2448         AVCodecContext *avctx = st->codec;
2449         ic->streams[i]->discard = AVDISCARD_ALL;
2450         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2451             continue;
2452         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2453             continue;
2454
2455         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2456             continue;
2457         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2458
2459         switch(avctx->codec_type) {
2460         case AVMEDIA_TYPE_AUDIO:
2461             if (!audio_disable)
2462                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2463             break;
2464         case AVMEDIA_TYPE_VIDEO:
2465         case AVMEDIA_TYPE_SUBTITLE:
2466             if (!video_disable)
2467                 st_index[avctx->codec_type] = i;
2468             break;
2469         default:
2470             break;
2471         }
2472     }
2473     if (show_status) {
2474         dump_format(ic, 0, is->filename, 0);
2475     }
2476
2477     /* open the streams */
2478     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2479         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2480     }
2481
2482     ret=-1;
2483     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2484         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2485     }
2486     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2487     if(ret<0) {
2488         if (!display_disable)
2489             is->show_audio = 2;
2490     }
2491
2492     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2493         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2494     }
2495
2496     if (is->video_stream < 0 && is->audio_stream < 0) {
2497         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2498         ret = -1;
2499         goto fail;
2500     }
2501
2502     for(;;) {
2503         if (is->abort_request)
2504             break;
2505         if (is->paused != is->last_paused) {
2506             is->last_paused = is->paused;
2507             if (is->paused)
2508                 is->read_pause_return= av_read_pause(ic);
2509             else
2510                 av_read_play(ic);
2511         }
2512 #if CONFIG_RTSP_DEMUXER
2513         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2514             /* wait 10 ms to avoid trying to get another packet */
2515             /* XXX: horrible */
2516             SDL_Delay(10);
2517             continue;
2518         }
2519 #endif
2520         if (is->seek_req) {
2521             int64_t seek_target= is->seek_pos;
2522             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2523             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2524 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2525 //      of the seek_pos/seek_rel variables
2526
2527             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2528             if (ret < 0) {
2529                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2530             }else{
2531                 if (is->audio_stream >= 0) {
2532                     packet_queue_flush(&is->audioq);
2533                     packet_queue_put(&is->audioq, &flush_pkt);
2534                 }
2535                 if (is->subtitle_stream >= 0) {
2536                     packet_queue_flush(&is->subtitleq);
2537                     packet_queue_put(&is->subtitleq, &flush_pkt);
2538                 }
2539                 if (is->video_stream >= 0) {
2540                     packet_queue_flush(&is->videoq);
2541                     packet_queue_put(&is->videoq, &flush_pkt);
2542                 }
2543             }
2544             is->seek_req = 0;
2545             eof= 0;
2546         }
2547
2548         /* if the queue are full, no need to read more */
2549         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2550             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2551                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2552                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2553             /* wait 10 ms */
2554             SDL_Delay(10);
2555             continue;
2556         }
2557         if(url_feof(ic->pb) || eof) {
2558             if(is->video_stream >= 0){
2559                 av_init_packet(pkt);
2560                 pkt->data=NULL;
2561                 pkt->size=0;
2562                 pkt->stream_index= is->video_stream;
2563                 packet_queue_put(&is->videoq, pkt);
2564             }
2565             SDL_Delay(10);
2566             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2567                 if(loop!=1 && (!loop || --loop)){
2568                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2569                 }else if(autoexit){
2570                     ret=AVERROR_EOF;
2571                     goto fail;
2572                 }
2573             }
2574             continue;
2575         }
2576         ret = av_read_frame(ic, pkt);
2577         if (ret < 0) {
2578             if (ret == AVERROR_EOF)
2579                 eof=1;
2580             if (url_ferror(ic->pb))
2581                 break;
2582             SDL_Delay(100); /* wait for user event */
2583             continue;
2584         }
2585         /* check if packet is in play range specified by user, then queue, otherwise discard */
2586         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2587                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2588                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2589                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2590                 <= ((double)duration/1000000);
2591         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2592             packet_queue_put(&is->audioq, pkt);
2593         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2594             packet_queue_put(&is->videoq, pkt);
2595         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2596             packet_queue_put(&is->subtitleq, pkt);
2597         } else {
2598             av_free_packet(pkt);
2599         }
2600     }
2601     /* wait until the end */
2602     while (!is->abort_request) {
2603         SDL_Delay(100);
2604     }
2605
2606     ret = 0;
2607  fail:
2608     /* disable interrupting */
2609     global_video_state = NULL;
2610
2611     /* close each stream */
2612     if (is->audio_stream >= 0)
2613         stream_component_close(is, is->audio_stream);
2614     if (is->video_stream >= 0)
2615         stream_component_close(is, is->video_stream);
2616     if (is->subtitle_stream >= 0)
2617         stream_component_close(is, is->subtitle_stream);
2618     if (is->ic) {
2619         av_close_input_file(is->ic);
2620         is->ic = NULL; /* safety */
2621     }
2622     url_set_interrupt_cb(NULL);
2623
2624     if (ret != 0) {
2625         SDL_Event event;
2626
2627         event.type = FF_QUIT_EVENT;
2628         event.user.data1 = is;
2629         SDL_PushEvent(&event);
2630     }
2631     return 0;
2632 }
2633
2634 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2635 {
2636     VideoState *is;
2637
2638     is = av_mallocz(sizeof(VideoState));
2639     if (!is)
2640         return NULL;
2641     av_strlcpy(is->filename, filename, sizeof(is->filename));
2642     is->iformat = iformat;
2643     is->ytop = 0;
2644     is->xleft = 0;
2645
2646     /* start video display */
2647     is->pictq_mutex = SDL_CreateMutex();
2648     is->pictq_cond = SDL_CreateCond();
2649
2650     is->subpq_mutex = SDL_CreateMutex();
2651     is->subpq_cond = SDL_CreateCond();
2652
2653     is->av_sync_type = av_sync_type;
2654     is->parse_tid = SDL_CreateThread(decode_thread, is);
2655     if (!is->parse_tid) {
2656         av_free(is);
2657         return NULL;
2658     }
2659     return is;
2660 }
2661
2662 static void stream_close(VideoState *is)
2663 {
2664     VideoPicture *vp;
2665     int i;
2666     /* XXX: use a special url_shutdown call to abort parse cleanly */
2667     is->abort_request = 1;
2668     SDL_WaitThread(is->parse_tid, NULL);
2669     SDL_WaitThread(is->refresh_tid, NULL);
2670
2671     /* free all pictures */
2672     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2673         vp = &is->pictq[i];
2674 #if CONFIG_AVFILTER
2675         if (vp->picref) {
2676             avfilter_unref_buffer(vp->picref);
2677             vp->picref = NULL;
2678         }
2679 #endif
2680         if (vp->bmp) {
2681             SDL_FreeYUVOverlay(vp->bmp);
2682             vp->bmp = NULL;
2683         }
2684     }
2685     SDL_DestroyMutex(is->pictq_mutex);
2686     SDL_DestroyCond(is->pictq_cond);
2687     SDL_DestroyMutex(is->subpq_mutex);
2688     SDL_DestroyCond(is->subpq_cond);
2689 #if !CONFIG_AVFILTER
2690     if (is->img_convert_ctx)
2691         sws_freeContext(is->img_convert_ctx);
2692 #endif
2693     av_free(is);
2694 }
2695
2696 static void stream_cycle_channel(VideoState *is, int codec_type)
2697 {
2698     AVFormatContext *ic = is->ic;
2699     int start_index, stream_index;
2700     AVStream *st;
2701
2702     if (codec_type == AVMEDIA_TYPE_VIDEO)
2703         start_index = is->video_stream;
2704     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2705         start_index = is->audio_stream;
2706     else
2707         start_index = is->subtitle_stream;
2708     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2709         return;
2710     stream_index = start_index;
2711     for(;;) {
2712         if (++stream_index >= is->ic->nb_streams)
2713         {
2714             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2715             {
2716                 stream_index = -1;
2717                 goto the_end;
2718             } else
2719                 stream_index = 0;
2720         }
2721         if (stream_index == start_index)
2722             return;
2723         st = ic->streams[stream_index];
2724         if (st->codec->codec_type == codec_type) {
2725             /* check that parameters are OK */
2726             switch(codec_type) {
2727             case AVMEDIA_TYPE_AUDIO:
2728                 if (st->codec->sample_rate != 0 &&
2729                     st->codec->channels != 0)
2730                     goto the_end;
2731                 break;
2732             case AVMEDIA_TYPE_VIDEO:
2733             case AVMEDIA_TYPE_SUBTITLE:
2734                 goto the_end;
2735             default:
2736                 break;
2737             }
2738         }
2739     }
2740  the_end:
2741     stream_component_close(is, start_index);
2742     stream_component_open(is, stream_index);
2743 }
2744
2745
2746 static void toggle_full_screen(void)
2747 {
2748     is_full_screen = !is_full_screen;
2749     if (!fs_screen_width) {
2750         /* use default SDL method */
2751 //        SDL_WM_ToggleFullScreen(screen);
2752     }
2753     video_open(cur_stream);
2754 }
2755
2756 static void toggle_pause(void)
2757 {
2758     if (cur_stream)
2759         stream_pause(cur_stream);
2760     step = 0;
2761 }
2762
2763 static void step_to_next_frame(void)
2764 {
2765     if (cur_stream) {
2766         /* if the stream is paused unpause it, then step */
2767         if (cur_stream->paused)
2768             stream_pause(cur_stream);
2769     }
2770     step = 1;
2771 }
2772
2773 static void do_exit(void)
2774 {
2775     int i;
2776     if (cur_stream) {
2777         stream_close(cur_stream);
2778         cur_stream = NULL;
2779     }
2780     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2781         av_free(avcodec_opts[i]);
2782     av_free(avformat_opts);
2783     av_free(sws_opts);
2784 #if CONFIG_AVFILTER
2785     avfilter_uninit();
2786 #endif
2787     if (show_status)
2788         printf("\n");
2789     SDL_Quit();
2790     exit(0);
2791 }
2792
2793 static void toggle_audio_display(void)
2794 {
2795     if (cur_stream) {
2796         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2797         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2798         fill_rectangle(screen,
2799                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2800                     bgcolor);
2801         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2802     }
2803 }
2804
2805 /* handle an event sent by the GUI */
2806 static void event_loop(void)
2807 {
2808     SDL_Event event;
2809     double incr, pos, frac;
2810
2811     for(;;) {
2812         double x;
2813         SDL_WaitEvent(&event);
2814         switch(event.type) {
2815         case SDL_KEYDOWN:
2816             if (exit_on_keydown) {
2817                 do_exit();
2818                 break;
2819             }
2820             switch(event.key.keysym.sym) {
2821             case SDLK_ESCAPE:
2822             case SDLK_q:
2823                 do_exit();
2824                 break;
2825             case SDLK_f:
2826                 toggle_full_screen();
2827                 break;
2828             case SDLK_p:
2829             case SDLK_SPACE:
2830                 toggle_pause();
2831                 break;
2832             case SDLK_s: //S: Step to next frame
2833                 step_to_next_frame();
2834                 break;
2835             case SDLK_a:
2836                 if (cur_stream)
2837                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2838                 break;
2839             case SDLK_v:
2840                 if (cur_stream)
2841                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2842                 break;
2843             case SDLK_t:
2844                 if (cur_stream)
2845                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2846                 break;
2847             case SDLK_w:
2848                 toggle_audio_display();
2849                 break;
2850             case SDLK_LEFT:
2851                 incr = -10.0;
2852                 goto do_seek;
2853             case SDLK_RIGHT:
2854                 incr = 10.0;
2855                 goto do_seek;
2856             case SDLK_UP:
2857                 incr = 60.0;
2858                 goto do_seek;
2859             case SDLK_DOWN:
2860                 incr = -60.0;
2861             do_seek:
2862                 if (cur_stream) {
2863                     if (seek_by_bytes) {
2864                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2865                             pos= cur_stream->video_current_pos;
2866                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2867                             pos= cur_stream->audio_pkt.pos;
2868                         }else
2869                             pos = url_ftell(cur_stream->ic->pb);
2870                         if (cur_stream->ic->bit_rate)
2871                             incr *= cur_stream->ic->bit_rate / 8.0;
2872                         else
2873                             incr *= 180000.0;
2874                         pos += incr;
2875                         stream_seek(cur_stream, pos, incr, 1);
2876                     } else {
2877                         pos = get_master_clock(cur_stream);
2878                         pos += incr;
2879                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2880                     }
2881                 }
2882                 break;
2883             default:
2884                 break;
2885             }
2886             break;
2887         case SDL_MOUSEBUTTONDOWN:
2888             if (exit_on_mousedown) {
2889                 do_exit();
2890                 break;
2891             }
2892         case SDL_MOUSEMOTION:
2893             if(event.type ==SDL_MOUSEBUTTONDOWN){
2894                 x= event.button.x;
2895             }else{
2896                 if(event.motion.state != SDL_PRESSED)
2897                     break;
2898                 x= event.motion.x;
2899             }
2900             if (cur_stream) {
2901                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2902                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2903                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2904                 }else{
2905                     int64_t ts;
2906                     int ns, hh, mm, ss;
2907                     int tns, thh, tmm, tss;
2908                     tns = cur_stream->ic->duration/1000000LL;
2909                     thh = tns/3600;
2910                     tmm = (tns%3600)/60;
2911                     tss = (tns%60);
2912                     frac = x/cur_stream->width;
2913                     ns = frac*tns;
2914                     hh = ns/3600;
2915                     mm = (ns%3600)/60;
2916                     ss = (ns%60);
2917                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2918                             hh, mm, ss, thh, tmm, tss);
2919                     ts = frac*cur_stream->ic->duration;
2920                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2921                         ts += cur_stream->ic->start_time;
2922                     stream_seek(cur_stream, ts, 0, 0);
2923                 }
2924             }
2925             break;
2926         case SDL_VIDEORESIZE:
2927             if (cur_stream) {
2928                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2929                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2930                 screen_width = cur_stream->width = event.resize.w;
2931                 screen_height= cur_stream->height= event.resize.h;
2932             }
2933             break;
2934         case SDL_QUIT:
2935         case FF_QUIT_EVENT:
2936             do_exit();
2937             break;
2938         case FF_ALLOC_EVENT:
2939             video_open(event.user.data1);
2940             alloc_picture(event.user.data1);
2941             break;
2942         case FF_REFRESH_EVENT:
2943             video_refresh_timer(event.user.data1);
2944             cur_stream->refresh=0;
2945             break;
2946         default:
2947             break;
2948         }
2949     }
2950 }
2951
2952 static void opt_frame_size(const char *arg)
2953 {
2954     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2955         fprintf(stderr, "Incorrect frame size\n");
2956         exit(1);
2957     }
2958     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2959         fprintf(stderr, "Frame size must be a multiple of 2\n");
2960         exit(1);
2961     }
2962 }
2963
2964 static int opt_width(const char *opt, const char *arg)
2965 {
2966     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2967     return 0;
2968 }
2969
2970 static int opt_height(const char *opt, const char *arg)
2971 {
2972     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2973     return 0;
2974 }
2975
2976 static void opt_format(const char *arg)
2977 {
2978     file_iformat = av_find_input_format(arg);
2979     if (!file_iformat) {
2980         fprintf(stderr, "Unknown input format: %s\n", arg);
2981         exit(1);
2982     }
2983 }
2984
2985 static void opt_frame_pix_fmt(const char *arg)
2986 {
2987     frame_pix_fmt = av_get_pix_fmt(arg);
2988 }
2989
2990 static int opt_sync(const char *opt, const char *arg)
2991 {
2992     if (!strcmp(arg, "audio"))
2993         av_sync_type = AV_SYNC_AUDIO_MASTER;
2994     else if (!strcmp(arg, "video"))
2995         av_sync_type = AV_SYNC_VIDEO_MASTER;
2996     else if (!strcmp(arg, "ext"))
2997         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2998     else {
2999         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3000         exit(1);
3001     }
3002     return 0;
3003 }
3004
3005 static int opt_seek(const char *opt, const char *arg)
3006 {
3007     start_time = parse_time_or_die(opt, arg, 1);
3008     return 0;
3009 }
3010
3011 static int opt_duration(const char *opt, const char *arg)
3012 {
3013     duration = parse_time_or_die(opt, arg, 1);
3014     return 0;
3015 }
3016
3017 static int opt_debug(const char *opt, const char *arg)
3018 {
3019     av_log_set_level(99);
3020     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3021     return 0;
3022 }
3023
3024 static int opt_vismv(const char *opt, const char *arg)
3025 {
3026     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3027     return 0;
3028 }
3029
3030 static int opt_thread_count(const char *opt, const char *arg)
3031 {
3032     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3033 #if !HAVE_THREADS
3034     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3035 #endif
3036     return 0;
3037 }
3038
3039 static const OptionDef options[] = {
3040 #include "cmdutils_common_opts.h"
3041     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3042     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3043     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3044     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3045     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3046     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3047     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3048     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3049     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3050     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3051     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3052     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3053     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3054     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3055     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3056     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3057     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3058     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3059     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3060     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3061     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3062     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3063     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3064     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3065     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3066     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3067     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3068     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3069     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3070     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3071     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3072     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3073     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3074     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3075     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3076     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3077     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3078 #if CONFIG_AVFILTER
3079     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3080 #endif
3081     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3082     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3083     { NULL, },
3084 };
3085
3086 static void show_usage(void)
3087 {
3088     printf("Simple media player\n");
3089     printf("usage: ffplay [options] input_file\n");
3090     printf("\n");
3091 }
3092
3093 static void show_help(void)
3094 {
3095     show_usage();
3096     show_help_options(options, "Main options:\n",
3097                       OPT_EXPERT, 0);
3098     show_help_options(options, "\nAdvanced options:\n",
3099                       OPT_EXPERT, OPT_EXPERT);
3100     printf("\nWhile playing:\n"
3101            "q, ESC              quit\n"
3102            "f                   toggle full screen\n"
3103            "p, SPC              pause\n"
3104            "a                   cycle audio channel\n"
3105            "v                   cycle video channel\n"
3106            "t                   cycle subtitle channel\n"
3107            "w                   show audio waves\n"
3108            "s                   activate frame-step mode\n"
3109            "left/right          seek backward/forward 10 seconds\n"
3110            "down/up             seek backward/forward 1 minute\n"
3111            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3112            );
3113 }
3114
3115 static void opt_input_file(const char *filename)
3116 {
3117     if (input_filename) {
3118         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3119                 filename, input_filename);
3120         exit(1);
3121     }
3122     if (!strcmp(filename, "-"))
3123         filename = "pipe:";
3124     input_filename = filename;
3125 }
3126
3127 /* Called from the main */
3128 int main(int argc, char **argv)
3129 {
3130     int flags, i;
3131
3132     /* register all codecs, demux and protocols */
3133     avcodec_register_all();
3134 #if CONFIG_AVDEVICE
3135     avdevice_register_all();
3136 #endif
3137 #if CONFIG_AVFILTER
3138     avfilter_register_all();
3139 #endif
3140     av_register_all();
3141
3142     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3143         avcodec_opts[i]= avcodec_alloc_context2(i);
3144     }
3145     avformat_opts = avformat_alloc_context();
3146 #if !CONFIG_AVFILTER
3147     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3148 #endif
3149
3150     show_banner();
3151
3152     parse_options(argc, argv, options, opt_input_file);
3153
3154     if (!input_filename) {
3155         show_usage();
3156         fprintf(stderr, "An input file must be specified\n");
3157         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3158         exit(1);
3159     }
3160
3161     if (display_disable) {
3162         video_disable = 1;
3163     }
3164     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3165 #if !defined(__MINGW32__) && !defined(__APPLE__)
3166     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3167 #endif
3168     if (SDL_Init (flags)) {
3169         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3170         exit(1);
3171     }
3172
3173     if (!display_disable) {
3174 #if HAVE_SDL_VIDEO_SIZE
3175         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3176         fs_screen_width = vi->current_w;
3177         fs_screen_height = vi->current_h;
3178 #endif
3179     }
3180
3181     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3182     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3183     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3184
3185     av_init_packet(&flush_pkt);
3186     flush_pkt.data= "FLUSH";
3187
3188     cur_stream = stream_open(input_filename, file_iformat);
3189
3190     event_loop();
3191
3192     /* never returns */
3193
3194     return 0;
3195 }