]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Fix MMX rgb24 to yuv conversion with gcc 4.6
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavcore/imgutils.h"
32 #include "libavcore/parseutils.h"
33 #include "libavcore/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavcodec/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "FFplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG
62 //#define DEBUG_SYNC
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2*65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///<presentation time stamp for this picture
104     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105     int64_t pos;                                 ///<byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142     int dtg_active_format;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     int show_audio; /* if true, display audio samples */
171     int16_t sample_array[SAMPLE_ARRAY_SIZE];
172     int sample_array_index;
173     int last_i_start;
174     RDFTContext *rdft;
175     int rdft_bits;
176     FFTSample *rdft_data;
177     int xpos;
178
179     SDL_Thread *subtitle_tid;
180     int subtitle_stream;
181     int subtitle_stream_changed;
182     AVStream *subtitle_st;
183     PacketQueue subtitleq;
184     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185     int subpq_size, subpq_rindex, subpq_windex;
186     SDL_mutex *subpq_mutex;
187     SDL_cond *subpq_cond;
188
189     double frame_timer;
190     double frame_last_pts;
191     double frame_last_delay;
192     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193     int video_stream;
194     AVStream *video_st;
195     PacketQueue videoq;
196     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198     int64_t video_current_pos;                   ///<current displayed file pos
199     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200     int pictq_size, pictq_rindex, pictq_windex;
201     SDL_mutex *pictq_mutex;
202     SDL_cond *pictq_cond;
203 #if !CONFIG_AVFILTER
204     struct SwsContext *img_convert_ctx;
205 #endif
206
207     //    QETimer *video_timer;
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211     PtsCorrectionContext pts_ctx;
212
213 #if CONFIG_AVFILTER
214     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
215 #endif
216
217     float skip_frames;
218     float skip_frames_index;
219     int refresh;
220 } VideoState;
221
222 static void show_help(void);
223 static int audio_write_get_buf_size(VideoState *is);
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width = 0;
232 static int screen_height = 0;
233 static int frame_width = 0;
234 static int frame_height = 0;
235 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
236 static int audio_disable;
237 static int video_disable;
238 static int wanted_stream[AVMEDIA_TYPE_NB]={
239     [AVMEDIA_TYPE_AUDIO]=-1,
240     [AVMEDIA_TYPE_VIDEO]=-1,
241     [AVMEDIA_TYPE_SUBTITLE]=-1,
242 };
243 static int seek_by_bytes=-1;
244 static int display_disable;
245 static int show_status = 1;
246 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
247 static int64_t start_time = AV_NOPTS_VALUE;
248 static int64_t duration = AV_NOPTS_VALUE;
249 static int debug = 0;
250 static int debug_mv = 0;
251 static int step = 0;
252 static int thread_count = 1;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int lowres = 0;
257 static int idct = FF_IDCT_AUTO;
258 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
260 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
261 static int error_recognition = FF_ER_CAREFUL;
262 static int error_concealment = 3;
263 static int decoder_reorder_pts= -1;
264 static int autoexit;
265 static int exit_on_keydown;
266 static int exit_on_mousedown;
267 static int loop=1;
268 static int framedrop=1;
269
270 static int rdftspeed=20;
271 #if CONFIG_AVFILTER
272 static char *vfilters = NULL;
273 #endif
274
275 /* current context */
276 static int is_full_screen;
277 static VideoState *cur_stream;
278 static int64_t audio_callback_time;
279
280 static AVPacket flush_pkt;
281
282 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
283 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
284 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
285
286 static SDL_Surface *screen;
287
288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
289
290 /* packet queue handling */
291 static void packet_queue_init(PacketQueue *q)
292 {
293     memset(q, 0, sizeof(PacketQueue));
294     q->mutex = SDL_CreateMutex();
295     q->cond = SDL_CreateCond();
296     packet_queue_put(q, &flush_pkt);
297 }
298
299 static void packet_queue_flush(PacketQueue *q)
300 {
301     AVPacketList *pkt, *pkt1;
302
303     SDL_LockMutex(q->mutex);
304     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
305         pkt1 = pkt->next;
306         av_free_packet(&pkt->pkt);
307         av_freep(&pkt);
308     }
309     q->last_pkt = NULL;
310     q->first_pkt = NULL;
311     q->nb_packets = 0;
312     q->size = 0;
313     SDL_UnlockMutex(q->mutex);
314 }
315
316 static void packet_queue_end(PacketQueue *q)
317 {
318     packet_queue_flush(q);
319     SDL_DestroyMutex(q->mutex);
320     SDL_DestroyCond(q->cond);
321 }
322
323 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
324 {
325     AVPacketList *pkt1;
326
327     /* duplicate the packet */
328     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
329         return -1;
330
331     pkt1 = av_malloc(sizeof(AVPacketList));
332     if (!pkt1)
333         return -1;
334     pkt1->pkt = *pkt;
335     pkt1->next = NULL;
336
337
338     SDL_LockMutex(q->mutex);
339
340     if (!q->last_pkt)
341
342         q->first_pkt = pkt1;
343     else
344         q->last_pkt->next = pkt1;
345     q->last_pkt = pkt1;
346     q->nb_packets++;
347     q->size += pkt1->pkt.size + sizeof(*pkt1);
348     /* XXX: should duplicate packet data in DV case */
349     SDL_CondSignal(q->cond);
350
351     SDL_UnlockMutex(q->mutex);
352     return 0;
353 }
354
355 static void packet_queue_abort(PacketQueue *q)
356 {
357     SDL_LockMutex(q->mutex);
358
359     q->abort_request = 1;
360
361     SDL_CondSignal(q->cond);
362
363     SDL_UnlockMutex(q->mutex);
364 }
365
366 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
367 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
368 {
369     AVPacketList *pkt1;
370     int ret;
371
372     SDL_LockMutex(q->mutex);
373
374     for(;;) {
375         if (q->abort_request) {
376             ret = -1;
377             break;
378         }
379
380         pkt1 = q->first_pkt;
381         if (pkt1) {
382             q->first_pkt = pkt1->next;
383             if (!q->first_pkt)
384                 q->last_pkt = NULL;
385             q->nb_packets--;
386             q->size -= pkt1->pkt.size + sizeof(*pkt1);
387             *pkt = pkt1->pkt;
388             av_free(pkt1);
389             ret = 1;
390             break;
391         } else if (!block) {
392             ret = 0;
393             break;
394         } else {
395             SDL_CondWait(q->cond, q->mutex);
396         }
397     }
398     SDL_UnlockMutex(q->mutex);
399     return ret;
400 }
401
402 static inline void fill_rectangle(SDL_Surface *screen,
403                                   int x, int y, int w, int h, int color)
404 {
405     SDL_Rect rect;
406     rect.x = x;
407     rect.y = y;
408     rect.w = w;
409     rect.h = h;
410     SDL_FillRect(screen, &rect, color);
411 }
412
413 #if 0
414 /* draw only the border of a rectangle */
415 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
416 {
417     int w1, w2, h1, h2;
418
419     /* fill the background */
420     w1 = x;
421     if (w1 < 0)
422         w1 = 0;
423     w2 = s->width - (x + w);
424     if (w2 < 0)
425         w2 = 0;
426     h1 = y;
427     if (h1 < 0)
428         h1 = 0;
429     h2 = s->height - (y + h);
430     if (h2 < 0)
431         h2 = 0;
432     fill_rectangle(screen,
433                    s->xleft, s->ytop,
434                    w1, s->height,
435                    color);
436     fill_rectangle(screen,
437                    s->xleft + s->width - w2, s->ytop,
438                    w2, s->height,
439                    color);
440     fill_rectangle(screen,
441                    s->xleft + w1, s->ytop,
442                    s->width - w1 - w2, h1,
443                    color);
444     fill_rectangle(screen,
445                    s->xleft + w1, s->ytop + s->height - h2,
446                    s->width - w1 - w2, h2,
447                    color);
448 }
449 #endif
450
451 #define ALPHA_BLEND(a, oldp, newp, s)\
452 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
453
454 #define RGBA_IN(r, g, b, a, s)\
455 {\
456     unsigned int v = ((const uint32_t *)(s))[0];\
457     a = (v >> 24) & 0xff;\
458     r = (v >> 16) & 0xff;\
459     g = (v >> 8) & 0xff;\
460     b = v & 0xff;\
461 }
462
463 #define YUVA_IN(y, u, v, a, s, pal)\
464 {\
465     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
466     a = (val >> 24) & 0xff;\
467     y = (val >> 16) & 0xff;\
468     u = (val >> 8) & 0xff;\
469     v = val & 0xff;\
470 }
471
472 #define YUVA_OUT(d, y, u, v, a)\
473 {\
474     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
475 }
476
477
478 #define BPP 1
479
480 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
481 {
482     int wrap, wrap3, width2, skip2;
483     int y, u, v, a, u1, v1, a1, w, h;
484     uint8_t *lum, *cb, *cr;
485     const uint8_t *p;
486     const uint32_t *pal;
487     int dstx, dsty, dstw, dsth;
488
489     dstw = av_clip(rect->w, 0, imgw);
490     dsth = av_clip(rect->h, 0, imgh);
491     dstx = av_clip(rect->x, 0, imgw - dstw);
492     dsty = av_clip(rect->y, 0, imgh - dsth);
493     lum = dst->data[0] + dsty * dst->linesize[0];
494     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
495     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
496
497     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
498     skip2 = dstx >> 1;
499     wrap = dst->linesize[0];
500     wrap3 = rect->pict.linesize[0];
501     p = rect->pict.data[0];
502     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
503
504     if (dsty & 1) {
505         lum += dstx;
506         cb += skip2;
507         cr += skip2;
508
509         if (dstx & 1) {
510             YUVA_IN(y, u, v, a, p, pal);
511             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
513             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
514             cb++;
515             cr++;
516             lum++;
517             p += BPP;
518         }
519         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 = u;
522             v1 = v;
523             a1 = a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
526             YUVA_IN(y, u, v, a, p + BPP, pal);
527             u1 += u;
528             v1 += v;
529             a1 += a;
530             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
531             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
532             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
533             cb++;
534             cr++;
535             p += 2 * BPP;
536             lum += 2;
537         }
538         if (w) {
539             YUVA_IN(y, u, v, a, p, pal);
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
542             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
543             p++;
544             lum++;
545         }
546         p += wrap3 - dstw * BPP;
547         lum += wrap - dstw - dstx;
548         cb += dst->linesize[1] - width2 - skip2;
549         cr += dst->linesize[2] - width2 - skip2;
550     }
551     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
552         lum += dstx;
553         cb += skip2;
554         cr += skip2;
555
556         if (dstx & 1) {
557             YUVA_IN(y, u, v, a, p, pal);
558             u1 = u;
559             v1 = v;
560             a1 = a;
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562             p += wrap3;
563             lum += wrap;
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 += u;
566             v1 += v;
567             a1 += a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
570             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
571             cb++;
572             cr++;
573             p += -wrap3 + BPP;
574             lum += -wrap + 1;
575         }
576         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 = u;
579             v1 = v;
580             a1 = a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582
583             YUVA_IN(y, u, v, a, p + BPP, pal);
584             u1 += u;
585             v1 += v;
586             a1 += a;
587             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
588             p += wrap3;
589             lum += wrap;
590
591             YUVA_IN(y, u, v, a, p, pal);
592             u1 += u;
593             v1 += v;
594             a1 += a;
595             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596
597             YUVA_IN(y, u, v, a, p + BPP, pal);
598             u1 += u;
599             v1 += v;
600             a1 += a;
601             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
602
603             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
604             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
605
606             cb++;
607             cr++;
608             p += -wrap3 + 2 * BPP;
609             lum += -wrap + 2;
610         }
611         if (w) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617             p += wrap3;
618             lum += wrap;
619             YUVA_IN(y, u, v, a, p, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
626             cb++;
627             cr++;
628             p += -wrap3 + BPP;
629             lum += -wrap + 1;
630         }
631         p += wrap3 + (wrap3 - dstw * BPP);
632         lum += wrap + (wrap - dstw - dstx);
633         cb += dst->linesize[1] - width2 - skip2;
634         cr += dst->linesize[2] - width2 - skip2;
635     }
636     /* handle odd height */
637     if (h) {
638         lum += dstx;
639         cb += skip2;
640         cr += skip2;
641
642         if (dstx & 1) {
643             YUVA_IN(y, u, v, a, p, pal);
644             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
645             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
646             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
647             cb++;
648             cr++;
649             lum++;
650             p += BPP;
651         }
652         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
653             YUVA_IN(y, u, v, a, p, pal);
654             u1 = u;
655             v1 = v;
656             a1 = a;
657             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658
659             YUVA_IN(y, u, v, a, p + BPP, pal);
660             u1 += u;
661             v1 += v;
662             a1 += a;
663             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
664             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
665             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
666             cb++;
667             cr++;
668             p += 2 * BPP;
669             lum += 2;
670         }
671         if (w) {
672             YUVA_IN(y, u, v, a, p, pal);
673             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
675             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
676         }
677     }
678 }
679
680 static void free_subpicture(SubPicture *sp)
681 {
682     avsubtitle_free(&sp->sub);
683 }
684
685 static void video_image_display(VideoState *is)
686 {
687     VideoPicture *vp;
688     SubPicture *sp;
689     AVPicture pict;
690     float aspect_ratio;
691     int width, height, x, y;
692     SDL_Rect rect;
693     int i;
694
695     vp = &is->pictq[is->pictq_rindex];
696     if (vp->bmp) {
697 #if CONFIG_AVFILTER
698          if (vp->picref->video->pixel_aspect.num == 0)
699              aspect_ratio = 0;
700          else
701              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
702 #else
703
704         /* XXX: use variable in the frame */
705         if (is->video_st->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
707         else if (is->video_st->codec->sample_aspect_ratio.num)
708             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
709         else
710             aspect_ratio = 0;
711 #endif
712         if (aspect_ratio <= 0.0)
713             aspect_ratio = 1.0;
714         aspect_ratio *= (float)vp->width / (float)vp->height;
715         /* if an active format is indicated, then it overrides the
716            mpeg format */
717 #if 0
718         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
719             is->dtg_active_format = is->video_st->codec->dtg_active_format;
720             printf("dtg_active_format=%d\n", is->dtg_active_format);
721         }
722 #endif
723 #if 0
724         switch(is->video_st->codec->dtg_active_format) {
725         case FF_DTG_AFD_SAME:
726         default:
727             /* nothing to do */
728             break;
729         case FF_DTG_AFD_4_3:
730             aspect_ratio = 4.0 / 3.0;
731             break;
732         case FF_DTG_AFD_16_9:
733             aspect_ratio = 16.0 / 9.0;
734             break;
735         case FF_DTG_AFD_14_9:
736             aspect_ratio = 14.0 / 9.0;
737             break;
738         case FF_DTG_AFD_4_3_SP_14_9:
739             aspect_ratio = 14.0 / 9.0;
740             break;
741         case FF_DTG_AFD_16_9_SP_14_9:
742             aspect_ratio = 14.0 / 9.0;
743             break;
744         case FF_DTG_AFD_SP_4_3:
745             aspect_ratio = 4.0 / 3.0;
746             break;
747         }
748 #endif
749
750         if (is->subtitle_st)
751         {
752             if (is->subpq_size > 0)
753             {
754                 sp = &is->subpq[is->subpq_rindex];
755
756                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
757                 {
758                     SDL_LockYUVOverlay (vp->bmp);
759
760                     pict.data[0] = vp->bmp->pixels[0];
761                     pict.data[1] = vp->bmp->pixels[2];
762                     pict.data[2] = vp->bmp->pixels[1];
763
764                     pict.linesize[0] = vp->bmp->pitches[0];
765                     pict.linesize[1] = vp->bmp->pitches[2];
766                     pict.linesize[2] = vp->bmp->pitches[1];
767
768                     for (i = 0; i < sp->sub.num_rects; i++)
769                         blend_subrect(&pict, sp->sub.rects[i],
770                                       vp->bmp->w, vp->bmp->h);
771
772                     SDL_UnlockYUVOverlay (vp->bmp);
773                 }
774             }
775         }
776
777
778         /* XXX: we suppose the screen has a 1.0 pixel ratio */
779         height = is->height;
780         width = ((int)rint(height * aspect_ratio)) & ~1;
781         if (width > is->width) {
782             width = is->width;
783             height = ((int)rint(width / aspect_ratio)) & ~1;
784         }
785         x = (is->width - width) / 2;
786         y = (is->height - height) / 2;
787         if (!is->no_background) {
788             /* fill the background */
789             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
790         } else {
791             is->no_background = 0;
792         }
793         rect.x = is->xleft + x;
794         rect.y = is->ytop  + y;
795         rect.w = width;
796         rect.h = height;
797         SDL_DisplayYUVOverlay(vp->bmp, &rect);
798     } else {
799 #if 0
800         fill_rectangle(screen,
801                        is->xleft, is->ytop, is->width, is->height,
802                        QERGB(0x00, 0x00, 0x00));
803 #endif
804     }
805 }
806
807 static inline int compute_mod(int a, int b)
808 {
809     a = a % b;
810     if (a >= 0)
811         return a;
812     else
813         return a + b;
814 }
815
816 static void video_audio_display(VideoState *s)
817 {
818     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
819     int ch, channels, h, h2, bgcolor, fgcolor;
820     int16_t time_diff;
821     int rdft_bits, nb_freq;
822
823     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
824         ;
825     nb_freq= 1<<(rdft_bits-1);
826
827     /* compute display index : center on currently output samples */
828     channels = s->audio_st->codec->channels;
829     nb_display_channels = channels;
830     if (!s->paused) {
831         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
832         n = 2 * channels;
833         delay = audio_write_get_buf_size(s);
834         delay /= n;
835
836         /* to be more precise, we take into account the time spent since
837            the last buffer computation */
838         if (audio_callback_time) {
839             time_diff = av_gettime() - audio_callback_time;
840             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
841         }
842
843         delay += 2*data_used;
844         if (delay < data_used)
845             delay = data_used;
846
847         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
848         if(s->show_audio==1){
849             h= INT_MIN;
850             for(i=0; i<1000; i+=channels){
851                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
852                 int a= s->sample_array[idx];
853                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
854                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
855                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
856                 int score= a-d;
857                 if(h<score && (b^c)<0){
858                     h= score;
859                     i_start= idx;
860                 }
861             }
862         }
863
864         s->last_i_start = i_start;
865     } else {
866         i_start = s->last_i_start;
867     }
868
869     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
870     if(s->show_audio==1){
871         fill_rectangle(screen,
872                        s->xleft, s->ytop, s->width, s->height,
873                        bgcolor);
874
875         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
876
877         /* total height for one channel */
878         h = s->height / nb_display_channels;
879         /* graph height / 2 */
880         h2 = (h * 9) / 20;
881         for(ch = 0;ch < nb_display_channels; ch++) {
882             i = i_start + ch;
883             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
884             for(x = 0; x < s->width; x++) {
885                 y = (s->sample_array[i] * h2) >> 15;
886                 if (y < 0) {
887                     y = -y;
888                     ys = y1 - y;
889                 } else {
890                     ys = y1;
891                 }
892                 fill_rectangle(screen,
893                                s->xleft + x, ys, 1, y,
894                                fgcolor);
895                 i += channels;
896                 if (i >= SAMPLE_ARRAY_SIZE)
897                     i -= SAMPLE_ARRAY_SIZE;
898             }
899         }
900
901         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
902
903         for(ch = 1;ch < nb_display_channels; ch++) {
904             y = s->ytop + ch * h;
905             fill_rectangle(screen,
906                            s->xleft, y, s->width, 1,
907                            fgcolor);
908         }
909         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
910     }else{
911         nb_display_channels= FFMIN(nb_display_channels, 2);
912         if(rdft_bits != s->rdft_bits){
913             av_rdft_end(s->rdft);
914             av_free(s->rdft_data);
915             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
916             s->rdft_bits= rdft_bits;
917             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
918         }
919         {
920             FFTSample *data[2];
921             for(ch = 0;ch < nb_display_channels; ch++) {
922                 data[ch] = s->rdft_data + 2*nb_freq*ch;
923                 i = i_start + ch;
924                 for(x = 0; x < 2*nb_freq; x++) {
925                     double w= (x-nb_freq)*(1.0/nb_freq);
926                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
927                     i += channels;
928                     if (i >= SAMPLE_ARRAY_SIZE)
929                         i -= SAMPLE_ARRAY_SIZE;
930                 }
931                 av_rdft_calc(s->rdft, data[ch]);
932             }
933             //least efficient way to do this, we should of course directly access it but its more than fast enough
934             for(y=0; y<s->height; y++){
935                 double w= 1/sqrt(nb_freq);
936                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
937                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
938                        + data[1][2*y+1]*data[1][2*y+1])) : a;
939                 a= FFMIN(a,255);
940                 b= FFMIN(b,255);
941                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
942
943                 fill_rectangle(screen,
944                             s->xpos, s->height-y, 1, 1,
945                             fgcolor);
946             }
947         }
948         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
949         s->xpos++;
950         if(s->xpos >= s->width)
951             s->xpos= s->xleft;
952     }
953 }
954
955 static int video_open(VideoState *is){
956     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
957     int w,h;
958
959     if(is_full_screen) flags |= SDL_FULLSCREEN;
960     else               flags |= SDL_RESIZABLE;
961
962     if (is_full_screen && fs_screen_width) {
963         w = fs_screen_width;
964         h = fs_screen_height;
965     } else if(!is_full_screen && screen_width){
966         w = screen_width;
967         h = screen_height;
968 #if CONFIG_AVFILTER
969     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
970         w = is->out_video_filter->inputs[0]->w;
971         h = is->out_video_filter->inputs[0]->h;
972 #else
973     }else if (is->video_st && is->video_st->codec->width){
974         w = is->video_st->codec->width;
975         h = is->video_st->codec->height;
976 #endif
977     } else {
978         w = 640;
979         h = 480;
980     }
981     if(screen && is->width == screen->w && screen->w == w
982        && is->height== screen->h && screen->h == h)
983         return 0;
984
985 #ifndef __APPLE__
986     screen = SDL_SetVideoMode(w, h, 0, flags);
987 #else
988     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
989     screen = SDL_SetVideoMode(w, h, 24, flags);
990 #endif
991     if (!screen) {
992         fprintf(stderr, "SDL: could not set video mode - exiting\n");
993         return -1;
994     }
995     if (!window_title)
996         window_title = input_filename;
997     SDL_WM_SetCaption(window_title, window_title);
998
999     is->width = screen->w;
1000     is->height = screen->h;
1001
1002     return 0;
1003 }
1004
1005 /* display the current picture, if any */
1006 static void video_display(VideoState *is)
1007 {
1008     if(!screen)
1009         video_open(cur_stream);
1010     if (is->audio_st && is->show_audio)
1011         video_audio_display(is);
1012     else if (is->video_st)
1013         video_image_display(is);
1014 }
1015
1016 static int refresh_thread(void *opaque)
1017 {
1018     VideoState *is= opaque;
1019     while(!is->abort_request){
1020         SDL_Event event;
1021         event.type = FF_REFRESH_EVENT;
1022         event.user.data1 = opaque;
1023         if(!is->refresh){
1024             is->refresh=1;
1025             SDL_PushEvent(&event);
1026         }
1027         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1028     }
1029     return 0;
1030 }
1031
1032 /* get the current audio clock value */
1033 static double get_audio_clock(VideoState *is)
1034 {
1035     double pts;
1036     int hw_buf_size, bytes_per_sec;
1037     pts = is->audio_clock;
1038     hw_buf_size = audio_write_get_buf_size(is);
1039     bytes_per_sec = 0;
1040     if (is->audio_st) {
1041         bytes_per_sec = is->audio_st->codec->sample_rate *
1042             2 * is->audio_st->codec->channels;
1043     }
1044     if (bytes_per_sec)
1045         pts -= (double)hw_buf_size / bytes_per_sec;
1046     return pts;
1047 }
1048
1049 /* get the current video clock value */
1050 static double get_video_clock(VideoState *is)
1051 {
1052     if (is->paused) {
1053         return is->video_current_pts;
1054     } else {
1055         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1056     }
1057 }
1058
1059 /* get the current external clock value */
1060 static double get_external_clock(VideoState *is)
1061 {
1062     int64_t ti;
1063     ti = av_gettime();
1064     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1065 }
1066
1067 /* get the current master clock value */
1068 static double get_master_clock(VideoState *is)
1069 {
1070     double val;
1071
1072     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1073         if (is->video_st)
1074             val = get_video_clock(is);
1075         else
1076             val = get_audio_clock(is);
1077     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1078         if (is->audio_st)
1079             val = get_audio_clock(is);
1080         else
1081             val = get_video_clock(is);
1082     } else {
1083         val = get_external_clock(is);
1084     }
1085     return val;
1086 }
1087
1088 /* seek in the stream */
1089 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1090 {
1091     if (!is->seek_req) {
1092         is->seek_pos = pos;
1093         is->seek_rel = rel;
1094         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1095         if (seek_by_bytes)
1096             is->seek_flags |= AVSEEK_FLAG_BYTE;
1097         is->seek_req = 1;
1098     }
1099 }
1100
1101 /* pause or resume the video */
1102 static void stream_pause(VideoState *is)
1103 {
1104     if (is->paused) {
1105         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1106         if(is->read_pause_return != AVERROR(ENOSYS)){
1107             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1108         }
1109         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1110     }
1111     is->paused = !is->paused;
1112 }
1113
1114 static double compute_target_time(double frame_current_pts, VideoState *is)
1115 {
1116     double delay, sync_threshold, diff;
1117
1118     /* compute nominal delay */
1119     delay = frame_current_pts - is->frame_last_pts;
1120     if (delay <= 0 || delay >= 10.0) {
1121         /* if incorrect delay, use previous one */
1122         delay = is->frame_last_delay;
1123     } else {
1124         is->frame_last_delay = delay;
1125     }
1126     is->frame_last_pts = frame_current_pts;
1127
1128     /* update delay to follow master synchronisation source */
1129     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1130          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1131         /* if video is slave, we try to correct big delays by
1132            duplicating or deleting a frame */
1133         diff = get_video_clock(is) - get_master_clock(is);
1134
1135         /* skip or repeat frame. We take into account the
1136            delay to compute the threshold. I still don't know
1137            if it is the best guess */
1138         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1139         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1140             if (diff <= -sync_threshold)
1141                 delay = 0;
1142             else if (diff >= sync_threshold)
1143                 delay = 2 * delay;
1144         }
1145     }
1146     is->frame_timer += delay;
1147 #if defined(DEBUG_SYNC)
1148     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1149             delay, actual_delay, frame_current_pts, -diff);
1150 #endif
1151
1152     return is->frame_timer;
1153 }
1154
1155 /* called to display each frame */
1156 static void video_refresh_timer(void *opaque)
1157 {
1158     VideoState *is = opaque;
1159     VideoPicture *vp;
1160
1161     SubPicture *sp, *sp2;
1162
1163     if (is->video_st) {
1164 retry:
1165         if (is->pictq_size == 0) {
1166             //nothing to do, no picture to display in the que
1167         } else {
1168             double time= av_gettime()/1000000.0;
1169             double next_target;
1170             /* dequeue the picture */
1171             vp = &is->pictq[is->pictq_rindex];
1172
1173             if(time < vp->target_clock)
1174                 return;
1175             /* update current video pts */
1176             is->video_current_pts = vp->pts;
1177             is->video_current_pts_drift = is->video_current_pts - time;
1178             is->video_current_pos = vp->pos;
1179             if(is->pictq_size > 1){
1180                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1181                 assert(nextvp->target_clock >= vp->target_clock);
1182                 next_target= nextvp->target_clock;
1183             }else{
1184                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1185             }
1186             if(framedrop && time > next_target){
1187                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1188                 if(is->pictq_size > 1 || time > next_target + 0.5){
1189                     /* update queue size and signal for next picture */
1190                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1191                         is->pictq_rindex = 0;
1192
1193                     SDL_LockMutex(is->pictq_mutex);
1194                     is->pictq_size--;
1195                     SDL_CondSignal(is->pictq_cond);
1196                     SDL_UnlockMutex(is->pictq_mutex);
1197                     goto retry;
1198                 }
1199             }
1200
1201             if(is->subtitle_st) {
1202                 if (is->subtitle_stream_changed) {
1203                     SDL_LockMutex(is->subpq_mutex);
1204
1205                     while (is->subpq_size) {
1206                         free_subpicture(&is->subpq[is->subpq_rindex]);
1207
1208                         /* update queue size and signal for next picture */
1209                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1210                             is->subpq_rindex = 0;
1211
1212                         is->subpq_size--;
1213                     }
1214                     is->subtitle_stream_changed = 0;
1215
1216                     SDL_CondSignal(is->subpq_cond);
1217                     SDL_UnlockMutex(is->subpq_mutex);
1218                 } else {
1219                     if (is->subpq_size > 0) {
1220                         sp = &is->subpq[is->subpq_rindex];
1221
1222                         if (is->subpq_size > 1)
1223                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1224                         else
1225                             sp2 = NULL;
1226
1227                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1228                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1229                         {
1230                             free_subpicture(sp);
1231
1232                             /* update queue size and signal for next picture */
1233                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1234                                 is->subpq_rindex = 0;
1235
1236                             SDL_LockMutex(is->subpq_mutex);
1237                             is->subpq_size--;
1238                             SDL_CondSignal(is->subpq_cond);
1239                             SDL_UnlockMutex(is->subpq_mutex);
1240                         }
1241                     }
1242                 }
1243             }
1244
1245             /* display picture */
1246             if (!display_disable)
1247                 video_display(is);
1248
1249             /* update queue size and signal for next picture */
1250             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1251                 is->pictq_rindex = 0;
1252
1253             SDL_LockMutex(is->pictq_mutex);
1254             is->pictq_size--;
1255             SDL_CondSignal(is->pictq_cond);
1256             SDL_UnlockMutex(is->pictq_mutex);
1257         }
1258     } else if (is->audio_st) {
1259         /* draw the next audio frame */
1260
1261         /* if only audio stream, then display the audio bars (better
1262            than nothing, just to test the implementation */
1263
1264         /* display picture */
1265         if (!display_disable)
1266             video_display(is);
1267     }
1268     if (show_status) {
1269         static int64_t last_time;
1270         int64_t cur_time;
1271         int aqsize, vqsize, sqsize;
1272         double av_diff;
1273
1274         cur_time = av_gettime();
1275         if (!last_time || (cur_time - last_time) >= 30000) {
1276             aqsize = 0;
1277             vqsize = 0;
1278             sqsize = 0;
1279             if (is->audio_st)
1280                 aqsize = is->audioq.size;
1281             if (is->video_st)
1282                 vqsize = is->videoq.size;
1283             if (is->subtitle_st)
1284                 sqsize = is->subtitleq.size;
1285             av_diff = 0;
1286             if (is->audio_st && is->video_st)
1287                 av_diff = get_audio_clock(is) - get_video_clock(is);
1288             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1290             fflush(stdout);
1291             last_time = cur_time;
1292         }
1293     }
1294 }
1295
1296 static void stream_close(VideoState *is)
1297 {
1298     VideoPicture *vp;
1299     int i;
1300     /* XXX: use a special url_shutdown call to abort parse cleanly */
1301     is->abort_request = 1;
1302     SDL_WaitThread(is->parse_tid, NULL);
1303     SDL_WaitThread(is->refresh_tid, NULL);
1304
1305     /* free all pictures */
1306     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1307         vp = &is->pictq[i];
1308 #if CONFIG_AVFILTER
1309         if (vp->picref) {
1310             avfilter_unref_buffer(vp->picref);
1311             vp->picref = NULL;
1312         }
1313 #endif
1314         if (vp->bmp) {
1315             SDL_FreeYUVOverlay(vp->bmp);
1316             vp->bmp = NULL;
1317         }
1318     }
1319     SDL_DestroyMutex(is->pictq_mutex);
1320     SDL_DestroyCond(is->pictq_cond);
1321     SDL_DestroyMutex(is->subpq_mutex);
1322     SDL_DestroyCond(is->subpq_cond);
1323 #if !CONFIG_AVFILTER
1324     if (is->img_convert_ctx)
1325         sws_freeContext(is->img_convert_ctx);
1326 #endif
1327     av_free(is);
1328 }
1329
1330 static void do_exit(void)
1331 {
1332     if (cur_stream) {
1333         stream_close(cur_stream);
1334         cur_stream = NULL;
1335     }
1336     uninit_opts();
1337 #if CONFIG_AVFILTER
1338     avfilter_uninit();
1339 #endif
1340     if (show_status)
1341         printf("\n");
1342     SDL_Quit();
1343     av_log(NULL, AV_LOG_QUIET, "");
1344     exit(0);
1345 }
1346
1347 /* allocate a picture (needs to do that in main thread to avoid
1348    potential locking problems */
1349 static void alloc_picture(void *opaque)
1350 {
1351     VideoState *is = opaque;
1352     VideoPicture *vp;
1353
1354     vp = &is->pictq[is->pictq_windex];
1355
1356     if (vp->bmp)
1357         SDL_FreeYUVOverlay(vp->bmp);
1358
1359 #if CONFIG_AVFILTER
1360     if (vp->picref)
1361         avfilter_unref_buffer(vp->picref);
1362     vp->picref = NULL;
1363
1364     vp->width   = is->out_video_filter->inputs[0]->w;
1365     vp->height  = is->out_video_filter->inputs[0]->h;
1366     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1367 #else
1368     vp->width   = is->video_st->codec->width;
1369     vp->height  = is->video_st->codec->height;
1370     vp->pix_fmt = is->video_st->codec->pix_fmt;
1371 #endif
1372
1373     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1374                                    SDL_YV12_OVERLAY,
1375                                    screen);
1376     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1377         /* SDL allocates a buffer smaller than requested if the video
1378          * overlay hardware is unable to support the requested size. */
1379         fprintf(stderr, "Error: the video system does not support an image\n"
1380                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1381                         "to reduce the image size.\n", vp->width, vp->height );
1382         do_exit();
1383     }
1384
1385     SDL_LockMutex(is->pictq_mutex);
1386     vp->allocated = 1;
1387     SDL_CondSignal(is->pictq_cond);
1388     SDL_UnlockMutex(is->pictq_mutex);
1389 }
1390
1391 /**
1392  *
1393  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1394  */
1395 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1396 {
1397     VideoPicture *vp;
1398     int dst_pix_fmt;
1399 #if CONFIG_AVFILTER
1400     AVPicture pict_src;
1401 #endif
1402     /* wait until we have space to put a new picture */
1403     SDL_LockMutex(is->pictq_mutex);
1404
1405     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1406         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1407
1408     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1409            !is->videoq.abort_request) {
1410         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1411     }
1412     SDL_UnlockMutex(is->pictq_mutex);
1413
1414     if (is->videoq.abort_request)
1415         return -1;
1416
1417     vp = &is->pictq[is->pictq_windex];
1418
1419     /* alloc or resize hardware picture buffer */
1420     if (!vp->bmp ||
1421 #if CONFIG_AVFILTER
1422         vp->width  != is->out_video_filter->inputs[0]->w ||
1423         vp->height != is->out_video_filter->inputs[0]->h) {
1424 #else
1425         vp->width != is->video_st->codec->width ||
1426         vp->height != is->video_st->codec->height) {
1427 #endif
1428         SDL_Event event;
1429
1430         vp->allocated = 0;
1431
1432         /* the allocation must be done in the main thread to avoid
1433            locking problems */
1434         event.type = FF_ALLOC_EVENT;
1435         event.user.data1 = is;
1436         SDL_PushEvent(&event);
1437
1438         /* wait until the picture is allocated */
1439         SDL_LockMutex(is->pictq_mutex);
1440         while (!vp->allocated && !is->videoq.abort_request) {
1441             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1442         }
1443         SDL_UnlockMutex(is->pictq_mutex);
1444
1445         if (is->videoq.abort_request)
1446             return -1;
1447     }
1448
1449     /* if the frame is not skipped, then display it */
1450     if (vp->bmp) {
1451         AVPicture pict;
1452 #if CONFIG_AVFILTER
1453         if(vp->picref)
1454             avfilter_unref_buffer(vp->picref);
1455         vp->picref = src_frame->opaque;
1456 #endif
1457
1458         /* get a pointer on the bitmap */
1459         SDL_LockYUVOverlay (vp->bmp);
1460
1461         dst_pix_fmt = PIX_FMT_YUV420P;
1462         memset(&pict,0,sizeof(AVPicture));
1463         pict.data[0] = vp->bmp->pixels[0];
1464         pict.data[1] = vp->bmp->pixels[2];
1465         pict.data[2] = vp->bmp->pixels[1];
1466
1467         pict.linesize[0] = vp->bmp->pitches[0];
1468         pict.linesize[1] = vp->bmp->pitches[2];
1469         pict.linesize[2] = vp->bmp->pitches[1];
1470
1471 #if CONFIG_AVFILTER
1472         pict_src.data[0] = src_frame->data[0];
1473         pict_src.data[1] = src_frame->data[1];
1474         pict_src.data[2] = src_frame->data[2];
1475
1476         pict_src.linesize[0] = src_frame->linesize[0];
1477         pict_src.linesize[1] = src_frame->linesize[1];
1478         pict_src.linesize[2] = src_frame->linesize[2];
1479
1480         //FIXME use direct rendering
1481         av_picture_copy(&pict, &pict_src,
1482                         vp->pix_fmt, vp->width, vp->height);
1483 #else
1484         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1485         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1486             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1487             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1488         if (is->img_convert_ctx == NULL) {
1489             fprintf(stderr, "Cannot initialize the conversion context\n");
1490             exit(1);
1491         }
1492         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1493                   0, vp->height, pict.data, pict.linesize);
1494 #endif
1495         /* update the bitmap content */
1496         SDL_UnlockYUVOverlay(vp->bmp);
1497
1498         vp->pts = pts;
1499         vp->pos = pos;
1500
1501         /* now we can update the picture count */
1502         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1503             is->pictq_windex = 0;
1504         SDL_LockMutex(is->pictq_mutex);
1505         vp->target_clock= compute_target_time(vp->pts, is);
1506
1507         is->pictq_size++;
1508         SDL_UnlockMutex(is->pictq_mutex);
1509     }
1510     return 0;
1511 }
1512
1513 /**
1514  * compute the exact PTS for the picture if it is omitted in the stream
1515  * @param pts1 the dts of the pkt / pts of the frame
1516  */
1517 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1518 {
1519     double frame_delay, pts;
1520
1521     pts = pts1;
1522
1523     if (pts != 0) {
1524         /* update video clock with pts, if present */
1525         is->video_clock = pts;
1526     } else {
1527         pts = is->video_clock;
1528     }
1529     /* update video clock for next frame */
1530     frame_delay = av_q2d(is->video_st->codec->time_base);
1531     /* for MPEG2, the frame can be repeated, so we update the
1532        clock accordingly */
1533     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1534     is->video_clock += frame_delay;
1535
1536 #if defined(DEBUG_SYNC) && 0
1537     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1538            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1539 #endif
1540     return queue_picture(is, src_frame, pts, pos);
1541 }
1542
1543 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1544 {
1545     int len1, got_picture, i;
1546
1547     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1548         return -1;
1549
1550     if (pkt->data == flush_pkt.data) {
1551         avcodec_flush_buffers(is->video_st->codec);
1552
1553         SDL_LockMutex(is->pictq_mutex);
1554         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1555         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1556             is->pictq[i].target_clock= 0;
1557         }
1558         while (is->pictq_size && !is->videoq.abort_request) {
1559             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1560         }
1561         is->video_current_pos = -1;
1562         SDL_UnlockMutex(is->pictq_mutex);
1563
1564         init_pts_correction(&is->pts_ctx);
1565         is->frame_last_pts = AV_NOPTS_VALUE;
1566         is->frame_last_delay = 0;
1567         is->frame_timer = (double)av_gettime() / 1000000.0;
1568         is->skip_frames = 1;
1569         is->skip_frames_index = 0;
1570         return 0;
1571     }
1572
1573     len1 = avcodec_decode_video2(is->video_st->codec,
1574                                  frame, &got_picture,
1575                                  pkt);
1576
1577     if (got_picture) {
1578         if (decoder_reorder_pts == -1) {
1579             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1580         } else if (decoder_reorder_pts) {
1581             *pts = frame->pkt_pts;
1582         } else {
1583             *pts = frame->pkt_dts;
1584         }
1585
1586         if (*pts == AV_NOPTS_VALUE) {
1587             *pts = 0;
1588         }
1589
1590         is->skip_frames_index += 1;
1591         if(is->skip_frames_index >= is->skip_frames){
1592             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1593             return 1;
1594         }
1595
1596     }
1597     return 0;
1598 }
1599
1600 #if CONFIG_AVFILTER
1601 typedef struct {
1602     VideoState *is;
1603     AVFrame *frame;
1604     int use_dr1;
1605 } FilterPriv;
1606
1607 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1608 {
1609     AVFilterContext *ctx = codec->opaque;
1610     AVFilterBufferRef  *ref;
1611     int perms = AV_PERM_WRITE;
1612     int i, w, h, stride[4];
1613     unsigned edge;
1614
1615     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1616         perms |= AV_PERM_NEG_LINESIZES;
1617
1618     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1619         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1620         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1621         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1622     }
1623     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1624
1625     w = codec->width;
1626     h = codec->height;
1627     avcodec_align_dimensions2(codec, &w, &h, stride);
1628     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1629     w += edge << 1;
1630     h += edge << 1;
1631
1632     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1633         return -1;
1634
1635     ref->video->w = codec->width;
1636     ref->video->h = codec->height;
1637     for(i = 0; i < 4; i ++) {
1638         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1639         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1640
1641         if (ref->data[i]) {
1642             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1643         }
1644         pic->data[i]     = ref->data[i];
1645         pic->linesize[i] = ref->linesize[i];
1646     }
1647     pic->opaque = ref;
1648     pic->age    = INT_MAX;
1649     pic->type   = FF_BUFFER_TYPE_USER;
1650     pic->reordered_opaque = codec->reordered_opaque;
1651     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1652     else           pic->pkt_pts = AV_NOPTS_VALUE;
1653     return 0;
1654 }
1655
1656 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1657 {
1658     memset(pic->data, 0, sizeof(pic->data));
1659     avfilter_unref_buffer(pic->opaque);
1660 }
1661
1662 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1663 {
1664     AVFilterBufferRef *ref = pic->opaque;
1665
1666     if (pic->data[0] == NULL) {
1667         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1668         return codec->get_buffer(codec, pic);
1669     }
1670
1671     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1672         (codec->pix_fmt != ref->format)) {
1673         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1674         return -1;
1675     }
1676
1677     pic->reordered_opaque = codec->reordered_opaque;
1678     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1679     else           pic->pkt_pts = AV_NOPTS_VALUE;
1680     return 0;
1681 }
1682
1683 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1684 {
1685     FilterPriv *priv = ctx->priv;
1686     AVCodecContext *codec;
1687     if(!opaque) return -1;
1688
1689     priv->is = opaque;
1690     codec    = priv->is->video_st->codec;
1691     codec->opaque = ctx;
1692     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1693         priv->use_dr1 = 1;
1694         codec->get_buffer     = input_get_buffer;
1695         codec->release_buffer = input_release_buffer;
1696         codec->reget_buffer   = input_reget_buffer;
1697         codec->thread_safe_callbacks = 1;
1698     }
1699
1700     priv->frame = avcodec_alloc_frame();
1701
1702     return 0;
1703 }
1704
1705 static void input_uninit(AVFilterContext *ctx)
1706 {
1707     FilterPriv *priv = ctx->priv;
1708     av_free(priv->frame);
1709 }
1710
1711 static int input_request_frame(AVFilterLink *link)
1712 {
1713     FilterPriv *priv = link->src->priv;
1714     AVFilterBufferRef *picref;
1715     int64_t pts = 0;
1716     AVPacket pkt;
1717     int ret;
1718
1719     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1720         av_free_packet(&pkt);
1721     if (ret < 0)
1722         return -1;
1723
1724     if(priv->use_dr1) {
1725         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1726     } else {
1727         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1728         av_image_copy(picref->data, picref->linesize,
1729                       priv->frame->data, priv->frame->linesize,
1730                       picref->format, link->w, link->h);
1731     }
1732     av_free_packet(&pkt);
1733
1734     picref->pts = pts;
1735     picref->pos = pkt.pos;
1736     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1737     avfilter_start_frame(link, picref);
1738     avfilter_draw_slice(link, 0, link->h, 1);
1739     avfilter_end_frame(link);
1740
1741     return 0;
1742 }
1743
1744 static int input_query_formats(AVFilterContext *ctx)
1745 {
1746     FilterPriv *priv = ctx->priv;
1747     enum PixelFormat pix_fmts[] = {
1748         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1749     };
1750
1751     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1752     return 0;
1753 }
1754
1755 static int input_config_props(AVFilterLink *link)
1756 {
1757     FilterPriv *priv  = link->src->priv;
1758     AVCodecContext *c = priv->is->video_st->codec;
1759
1760     link->w = c->width;
1761     link->h = c->height;
1762     link->time_base = priv->is->video_st->time_base;
1763
1764     return 0;
1765 }
1766
1767 static AVFilter input_filter =
1768 {
1769     .name      = "ffplay_input",
1770
1771     .priv_size = sizeof(FilterPriv),
1772
1773     .init      = input_init,
1774     .uninit    = input_uninit,
1775
1776     .query_formats = input_query_formats,
1777
1778     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1779     .outputs   = (AVFilterPad[]) {{ .name = "default",
1780                                     .type = AVMEDIA_TYPE_VIDEO,
1781                                     .request_frame = input_request_frame,
1782                                     .config_props  = input_config_props, },
1783                                   { .name = NULL }},
1784 };
1785
1786 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1787 {
1788     char sws_flags_str[128];
1789     int ret;
1790     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1791     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1792     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1793     graph->scale_sws_opts = av_strdup(sws_flags_str);
1794
1795     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1796                                             NULL, is, graph)) < 0)
1797         goto the_end;
1798     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1799                                             NULL, &ffsink_ctx, graph)) < 0)
1800         goto the_end;
1801
1802     if(vfilters) {
1803         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1804         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1805
1806         outputs->name    = av_strdup("in");
1807         outputs->filter_ctx = filt_src;
1808         outputs->pad_idx = 0;
1809         outputs->next    = NULL;
1810
1811         inputs->name    = av_strdup("out");
1812         inputs->filter_ctx = filt_out;
1813         inputs->pad_idx = 0;
1814         inputs->next    = NULL;
1815
1816         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1817             goto the_end;
1818         av_freep(&vfilters);
1819     } else {
1820         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1821             goto the_end;
1822     }
1823
1824     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1825         goto the_end;
1826
1827     is->out_video_filter = filt_out;
1828 the_end:
1829     return ret;
1830 }
1831
1832 #endif  /* CONFIG_AVFILTER */
1833
1834 static int video_thread(void *arg)
1835 {
1836     VideoState *is = arg;
1837     AVFrame *frame= avcodec_alloc_frame();
1838     int64_t pts_int;
1839     double pts;
1840     int ret;
1841
1842 #if CONFIG_AVFILTER
1843     AVFilterGraph *graph = avfilter_graph_alloc();
1844     AVFilterContext *filt_out = NULL;
1845     int64_t pos;
1846
1847     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1848         goto the_end;
1849     filt_out = is->out_video_filter;
1850 #endif
1851
1852     for(;;) {
1853 #if !CONFIG_AVFILTER
1854         AVPacket pkt;
1855 #else
1856         AVFilterBufferRef *picref;
1857         AVRational tb;
1858 #endif
1859         while (is->paused && !is->videoq.abort_request)
1860             SDL_Delay(10);
1861 #if CONFIG_AVFILTER
1862         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1863         if (picref) {
1864             pts_int = picref->pts;
1865             pos     = picref->pos;
1866             frame->opaque = picref;
1867         }
1868
1869         if (av_cmp_q(tb, is->video_st->time_base)) {
1870             av_unused int64_t pts1 = pts_int;
1871             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1872             av_dlog(NULL, "video_thread(): "
1873                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1874                     tb.num, tb.den, pts1,
1875                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1876         }
1877 #else
1878         ret = get_video_frame(is, frame, &pts_int, &pkt);
1879 #endif
1880
1881         if (ret < 0) goto the_end;
1882
1883         if (!ret)
1884             continue;
1885
1886         pts = pts_int*av_q2d(is->video_st->time_base);
1887
1888 #if CONFIG_AVFILTER
1889         ret = output_picture2(is, frame, pts, pos);
1890 #else
1891         ret = output_picture2(is, frame, pts,  pkt.pos);
1892         av_free_packet(&pkt);
1893 #endif
1894         if (ret < 0)
1895             goto the_end;
1896
1897         if (step)
1898             if (cur_stream)
1899                 stream_pause(cur_stream);
1900     }
1901  the_end:
1902 #if CONFIG_AVFILTER
1903     avfilter_graph_free(&graph);
1904 #endif
1905     av_free(frame);
1906     return 0;
1907 }
1908
1909 static int subtitle_thread(void *arg)
1910 {
1911     VideoState *is = arg;
1912     SubPicture *sp;
1913     AVPacket pkt1, *pkt = &pkt1;
1914     int len1, got_subtitle;
1915     double pts;
1916     int i, j;
1917     int r, g, b, y, u, v, a;
1918
1919     for(;;) {
1920         while (is->paused && !is->subtitleq.abort_request) {
1921             SDL_Delay(10);
1922         }
1923         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1924             break;
1925
1926         if(pkt->data == flush_pkt.data){
1927             avcodec_flush_buffers(is->subtitle_st->codec);
1928             continue;
1929         }
1930         SDL_LockMutex(is->subpq_mutex);
1931         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1932                !is->subtitleq.abort_request) {
1933             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1934         }
1935         SDL_UnlockMutex(is->subpq_mutex);
1936
1937         if (is->subtitleq.abort_request)
1938             goto the_end;
1939
1940         sp = &is->subpq[is->subpq_windex];
1941
1942        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1943            this packet, if any */
1944         pts = 0;
1945         if (pkt->pts != AV_NOPTS_VALUE)
1946             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1947
1948         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1949                                     &sp->sub, &got_subtitle,
1950                                     pkt);
1951 //            if (len1 < 0)
1952 //                break;
1953         if (got_subtitle && sp->sub.format == 0) {
1954             sp->pts = pts;
1955
1956             for (i = 0; i < sp->sub.num_rects; i++)
1957             {
1958                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1959                 {
1960                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1961                     y = RGB_TO_Y_CCIR(r, g, b);
1962                     u = RGB_TO_U_CCIR(r, g, b, 0);
1963                     v = RGB_TO_V_CCIR(r, g, b, 0);
1964                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1965                 }
1966             }
1967
1968             /* now we can update the picture count */
1969             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1970                 is->subpq_windex = 0;
1971             SDL_LockMutex(is->subpq_mutex);
1972             is->subpq_size++;
1973             SDL_UnlockMutex(is->subpq_mutex);
1974         }
1975         av_free_packet(pkt);
1976 //        if (step)
1977 //            if (cur_stream)
1978 //                stream_pause(cur_stream);
1979     }
1980  the_end:
1981     return 0;
1982 }
1983
1984 /* copy samples for viewing in editor window */
1985 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1986 {
1987     int size, len, channels;
1988
1989     channels = is->audio_st->codec->channels;
1990
1991     size = samples_size / sizeof(short);
1992     while (size > 0) {
1993         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1994         if (len > size)
1995             len = size;
1996         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1997         samples += len;
1998         is->sample_array_index += len;
1999         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2000             is->sample_array_index = 0;
2001         size -= len;
2002     }
2003 }
2004
2005 /* return the new audio buffer size (samples can be added or deleted
2006    to get better sync if video or external master clock) */
2007 static int synchronize_audio(VideoState *is, short *samples,
2008                              int samples_size1, double pts)
2009 {
2010     int n, samples_size;
2011     double ref_clock;
2012
2013     n = 2 * is->audio_st->codec->channels;
2014     samples_size = samples_size1;
2015
2016     /* if not master, then we try to remove or add samples to correct the clock */
2017     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2018          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2019         double diff, avg_diff;
2020         int wanted_size, min_size, max_size, nb_samples;
2021
2022         ref_clock = get_master_clock(is);
2023         diff = get_audio_clock(is) - ref_clock;
2024
2025         if (diff < AV_NOSYNC_THRESHOLD) {
2026             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2027             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2028                 /* not enough measures to have a correct estimate */
2029                 is->audio_diff_avg_count++;
2030             } else {
2031                 /* estimate the A-V difference */
2032                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2033
2034                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2035                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2036                     nb_samples = samples_size / n;
2037
2038                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2039                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2040                     if (wanted_size < min_size)
2041                         wanted_size = min_size;
2042                     else if (wanted_size > max_size)
2043                         wanted_size = max_size;
2044
2045                     /* add or remove samples to correction the synchro */
2046                     if (wanted_size < samples_size) {
2047                         /* remove samples */
2048                         samples_size = wanted_size;
2049                     } else if (wanted_size > samples_size) {
2050                         uint8_t *samples_end, *q;
2051                         int nb;
2052
2053                         /* add samples */
2054                         nb = (samples_size - wanted_size);
2055                         samples_end = (uint8_t *)samples + samples_size - n;
2056                         q = samples_end + n;
2057                         while (nb > 0) {
2058                             memcpy(q, samples_end, n);
2059                             q += n;
2060                             nb -= n;
2061                         }
2062                         samples_size = wanted_size;
2063                     }
2064                 }
2065 #if 0
2066                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2067                        diff, avg_diff, samples_size - samples_size1,
2068                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2069 #endif
2070             }
2071         } else {
2072             /* too big difference : may be initial PTS errors, so
2073                reset A-V filter */
2074             is->audio_diff_avg_count = 0;
2075             is->audio_diff_cum = 0;
2076         }
2077     }
2078
2079     return samples_size;
2080 }
2081
2082 /* decode one audio frame and returns its uncompressed size */
2083 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2084 {
2085     AVPacket *pkt_temp = &is->audio_pkt_temp;
2086     AVPacket *pkt = &is->audio_pkt;
2087     AVCodecContext *dec= is->audio_st->codec;
2088     int n, len1, data_size;
2089     double pts;
2090
2091     for(;;) {
2092         /* NOTE: the audio packet can contain several frames */
2093         while (pkt_temp->size > 0) {
2094             data_size = sizeof(is->audio_buf1);
2095             len1 = avcodec_decode_audio3(dec,
2096                                         (int16_t *)is->audio_buf1, &data_size,
2097                                         pkt_temp);
2098             if (len1 < 0) {
2099                 /* if error, we skip the frame */
2100                 pkt_temp->size = 0;
2101                 break;
2102             }
2103
2104             pkt_temp->data += len1;
2105             pkt_temp->size -= len1;
2106             if (data_size <= 0)
2107                 continue;
2108
2109             if (dec->sample_fmt != is->audio_src_fmt) {
2110                 if (is->reformat_ctx)
2111                     av_audio_convert_free(is->reformat_ctx);
2112                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2113                                                          dec->sample_fmt, 1, NULL, 0);
2114                 if (!is->reformat_ctx) {
2115                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2116                         av_get_sample_fmt_name(dec->sample_fmt),
2117                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2118                         break;
2119                 }
2120                 is->audio_src_fmt= dec->sample_fmt;
2121             }
2122
2123             if (is->reformat_ctx) {
2124                 const void *ibuf[6]= {is->audio_buf1};
2125                 void *obuf[6]= {is->audio_buf2};
2126                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2127                 int ostride[6]= {2};
2128                 int len= data_size/istride[0];
2129                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2130                     printf("av_audio_convert() failed\n");
2131                     break;
2132                 }
2133                 is->audio_buf= is->audio_buf2;
2134                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2135                           remove this legacy cruft */
2136                 data_size= len*2;
2137             }else{
2138                 is->audio_buf= is->audio_buf1;
2139             }
2140
2141             /* if no pts, then compute it */
2142             pts = is->audio_clock;
2143             *pts_ptr = pts;
2144             n = 2 * dec->channels;
2145             is->audio_clock += (double)data_size /
2146                 (double)(n * dec->sample_rate);
2147 #if defined(DEBUG_SYNC)
2148             {
2149                 static double last_clock;
2150                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2151                        is->audio_clock - last_clock,
2152                        is->audio_clock, pts);
2153                 last_clock = is->audio_clock;
2154             }
2155 #endif
2156             return data_size;
2157         }
2158
2159         /* free the current packet */
2160         if (pkt->data)
2161             av_free_packet(pkt);
2162
2163         if (is->paused || is->audioq.abort_request) {
2164             return -1;
2165         }
2166
2167         /* read next packet */
2168         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2169             return -1;
2170         if(pkt->data == flush_pkt.data){
2171             avcodec_flush_buffers(dec);
2172             continue;
2173         }
2174
2175         pkt_temp->data = pkt->data;
2176         pkt_temp->size = pkt->size;
2177
2178         /* if update the audio clock with the pts */
2179         if (pkt->pts != AV_NOPTS_VALUE) {
2180             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2181         }
2182     }
2183 }
2184
2185 /* get the current audio output buffer size, in samples. With SDL, we
2186    cannot have a precise information */
2187 static int audio_write_get_buf_size(VideoState *is)
2188 {
2189     return is->audio_buf_size - is->audio_buf_index;
2190 }
2191
2192
2193 /* prepare a new audio buffer */
2194 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2195 {
2196     VideoState *is = opaque;
2197     int audio_size, len1;
2198     double pts;
2199
2200     audio_callback_time = av_gettime();
2201
2202     while (len > 0) {
2203         if (is->audio_buf_index >= is->audio_buf_size) {
2204            audio_size = audio_decode_frame(is, &pts);
2205            if (audio_size < 0) {
2206                 /* if error, just output silence */
2207                is->audio_buf = is->audio_buf1;
2208                is->audio_buf_size = 1024;
2209                memset(is->audio_buf, 0, is->audio_buf_size);
2210            } else {
2211                if (is->show_audio)
2212                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2213                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2214                                               pts);
2215                is->audio_buf_size = audio_size;
2216            }
2217            is->audio_buf_index = 0;
2218         }
2219         len1 = is->audio_buf_size - is->audio_buf_index;
2220         if (len1 > len)
2221             len1 = len;
2222         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2223         len -= len1;
2224         stream += len1;
2225         is->audio_buf_index += len1;
2226     }
2227 }
2228
2229 /* open a given stream. Return 0 if OK */
2230 static int stream_component_open(VideoState *is, int stream_index)
2231 {
2232     AVFormatContext *ic = is->ic;
2233     AVCodecContext *avctx;
2234     AVCodec *codec;
2235     SDL_AudioSpec wanted_spec, spec;
2236
2237     if (stream_index < 0 || stream_index >= ic->nb_streams)
2238         return -1;
2239     avctx = ic->streams[stream_index]->codec;
2240
2241     /* prepare audio output */
2242     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2243         if (avctx->channels > 0) {
2244             avctx->request_channels = FFMIN(2, avctx->channels);
2245         } else {
2246             avctx->request_channels = 2;
2247         }
2248     }
2249
2250     codec = avcodec_find_decoder(avctx->codec_id);
2251     avctx->debug_mv = debug_mv;
2252     avctx->debug = debug;
2253     avctx->workaround_bugs = workaround_bugs;
2254     avctx->lowres = lowres;
2255     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2256     avctx->idct_algo= idct;
2257     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2258     avctx->skip_frame= skip_frame;
2259     avctx->skip_idct= skip_idct;
2260     avctx->skip_loop_filter= skip_loop_filter;
2261     avctx->error_recognition= error_recognition;
2262     avctx->error_concealment= error_concealment;
2263     avctx->thread_count= thread_count;
2264
2265     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2266
2267     if (!codec ||
2268         avcodec_open(avctx, codec) < 0)
2269         return -1;
2270
2271     /* prepare audio output */
2272     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2273         wanted_spec.freq = avctx->sample_rate;
2274         wanted_spec.format = AUDIO_S16SYS;
2275         wanted_spec.channels = avctx->channels;
2276         wanted_spec.silence = 0;
2277         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2278         wanted_spec.callback = sdl_audio_callback;
2279         wanted_spec.userdata = is;
2280         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2281             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2282             return -1;
2283         }
2284         is->audio_hw_buf_size = spec.size;
2285         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2286     }
2287
2288     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2289     switch(avctx->codec_type) {
2290     case AVMEDIA_TYPE_AUDIO:
2291         is->audio_stream = stream_index;
2292         is->audio_st = ic->streams[stream_index];
2293         is->audio_buf_size = 0;
2294         is->audio_buf_index = 0;
2295
2296         /* init averaging filter */
2297         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2298         is->audio_diff_avg_count = 0;
2299         /* since we do not have a precise anough audio fifo fullness,
2300            we correct audio sync only if larger than this threshold */
2301         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2302
2303         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2304         packet_queue_init(&is->audioq);
2305         SDL_PauseAudio(0);
2306         break;
2307     case AVMEDIA_TYPE_VIDEO:
2308         is->video_stream = stream_index;
2309         is->video_st = ic->streams[stream_index];
2310
2311 //        is->video_current_pts_time = av_gettime();
2312
2313         packet_queue_init(&is->videoq);
2314         is->video_tid = SDL_CreateThread(video_thread, is);
2315         break;
2316     case AVMEDIA_TYPE_SUBTITLE:
2317         is->subtitle_stream = stream_index;
2318         is->subtitle_st = ic->streams[stream_index];
2319         packet_queue_init(&is->subtitleq);
2320
2321         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2322         break;
2323     default:
2324         break;
2325     }
2326     return 0;
2327 }
2328
2329 static void stream_component_close(VideoState *is, int stream_index)
2330 {
2331     AVFormatContext *ic = is->ic;
2332     AVCodecContext *avctx;
2333
2334     if (stream_index < 0 || stream_index >= ic->nb_streams)
2335         return;
2336     avctx = ic->streams[stream_index]->codec;
2337
2338     switch(avctx->codec_type) {
2339     case AVMEDIA_TYPE_AUDIO:
2340         packet_queue_abort(&is->audioq);
2341
2342         SDL_CloseAudio();
2343
2344         packet_queue_end(&is->audioq);
2345         if (is->reformat_ctx)
2346             av_audio_convert_free(is->reformat_ctx);
2347         is->reformat_ctx = NULL;
2348         break;
2349     case AVMEDIA_TYPE_VIDEO:
2350         packet_queue_abort(&is->videoq);
2351
2352         /* note: we also signal this mutex to make sure we deblock the
2353            video thread in all cases */
2354         SDL_LockMutex(is->pictq_mutex);
2355         SDL_CondSignal(is->pictq_cond);
2356         SDL_UnlockMutex(is->pictq_mutex);
2357
2358         SDL_WaitThread(is->video_tid, NULL);
2359
2360         packet_queue_end(&is->videoq);
2361         break;
2362     case AVMEDIA_TYPE_SUBTITLE:
2363         packet_queue_abort(&is->subtitleq);
2364
2365         /* note: we also signal this mutex to make sure we deblock the
2366            video thread in all cases */
2367         SDL_LockMutex(is->subpq_mutex);
2368         is->subtitle_stream_changed = 1;
2369
2370         SDL_CondSignal(is->subpq_cond);
2371         SDL_UnlockMutex(is->subpq_mutex);
2372
2373         SDL_WaitThread(is->subtitle_tid, NULL);
2374
2375         packet_queue_end(&is->subtitleq);
2376         break;
2377     default:
2378         break;
2379     }
2380
2381     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2382     avcodec_close(avctx);
2383     switch(avctx->codec_type) {
2384     case AVMEDIA_TYPE_AUDIO:
2385         is->audio_st = NULL;
2386         is->audio_stream = -1;
2387         break;
2388     case AVMEDIA_TYPE_VIDEO:
2389         is->video_st = NULL;
2390         is->video_stream = -1;
2391         break;
2392     case AVMEDIA_TYPE_SUBTITLE:
2393         is->subtitle_st = NULL;
2394         is->subtitle_stream = -1;
2395         break;
2396     default:
2397         break;
2398     }
2399 }
2400
2401 /* since we have only one decoding thread, we can use a global
2402    variable instead of a thread local variable */
2403 static VideoState *global_video_state;
2404
2405 static int decode_interrupt_cb(void)
2406 {
2407     return (global_video_state && global_video_state->abort_request);
2408 }
2409
2410 /* this thread gets the stream from the disk or the network */
2411 static int decode_thread(void *arg)
2412 {
2413     VideoState *is = arg;
2414     AVFormatContext *ic;
2415     int err, i, ret;
2416     int st_index[AVMEDIA_TYPE_NB];
2417     AVPacket pkt1, *pkt = &pkt1;
2418     AVFormatParameters params, *ap = &params;
2419     int eof=0;
2420     int pkt_in_play_range = 0;
2421
2422     ic = avformat_alloc_context();
2423
2424     memset(st_index, -1, sizeof(st_index));
2425     is->video_stream = -1;
2426     is->audio_stream = -1;
2427     is->subtitle_stream = -1;
2428
2429     global_video_state = is;
2430     url_set_interrupt_cb(decode_interrupt_cb);
2431
2432     memset(ap, 0, sizeof(*ap));
2433
2434     ap->prealloced_context = 1;
2435     ap->width = frame_width;
2436     ap->height= frame_height;
2437     ap->time_base= (AVRational){1, 25};
2438     ap->pix_fmt = frame_pix_fmt;
2439
2440     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2441
2442     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2443     if (err < 0) {
2444         print_error(is->filename, err);
2445         ret = -1;
2446         goto fail;
2447     }
2448     is->ic = ic;
2449
2450     if(genpts)
2451         ic->flags |= AVFMT_FLAG_GENPTS;
2452
2453     err = av_find_stream_info(ic);
2454     if (err < 0) {
2455         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2456         ret = -1;
2457         goto fail;
2458     }
2459     if(ic->pb)
2460         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2461
2462     if(seek_by_bytes<0)
2463         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2464
2465     /* if seeking requested, we execute it */
2466     if (start_time != AV_NOPTS_VALUE) {
2467         int64_t timestamp;
2468
2469         timestamp = start_time;
2470         /* add the stream start time */
2471         if (ic->start_time != AV_NOPTS_VALUE)
2472             timestamp += ic->start_time;
2473         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2474         if (ret < 0) {
2475             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2476                     is->filename, (double)timestamp / AV_TIME_BASE);
2477         }
2478     }
2479
2480     for (i = 0; i < ic->nb_streams; i++)
2481         ic->streams[i]->discard = AVDISCARD_ALL;
2482     if (!video_disable)
2483         st_index[AVMEDIA_TYPE_VIDEO] =
2484             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2485                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2486     if (!audio_disable)
2487         st_index[AVMEDIA_TYPE_AUDIO] =
2488             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2489                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2490                                 st_index[AVMEDIA_TYPE_VIDEO],
2491                                 NULL, 0);
2492     if (!video_disable)
2493         st_index[AVMEDIA_TYPE_SUBTITLE] =
2494             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2495                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2496                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2497                                  st_index[AVMEDIA_TYPE_AUDIO] :
2498                                  st_index[AVMEDIA_TYPE_VIDEO]),
2499                                 NULL, 0);
2500     if (show_status) {
2501         dump_format(ic, 0, is->filename, 0);
2502     }
2503
2504     /* open the streams */
2505     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2506         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2507     }
2508
2509     ret=-1;
2510     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2511         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2512     }
2513     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2514     if(ret<0) {
2515         if (!display_disable)
2516             is->show_audio = 2;
2517     }
2518
2519     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2520         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2521     }
2522
2523     if (is->video_stream < 0 && is->audio_stream < 0) {
2524         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2525         ret = -1;
2526         goto fail;
2527     }
2528
2529     for(;;) {
2530         if (is->abort_request)
2531             break;
2532         if (is->paused != is->last_paused) {
2533             is->last_paused = is->paused;
2534             if (is->paused)
2535                 is->read_pause_return= av_read_pause(ic);
2536             else
2537                 av_read_play(ic);
2538         }
2539 #if CONFIG_RTSP_DEMUXER
2540         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2541             /* wait 10 ms to avoid trying to get another packet */
2542             /* XXX: horrible */
2543             SDL_Delay(10);
2544             continue;
2545         }
2546 #endif
2547         if (is->seek_req) {
2548             int64_t seek_target= is->seek_pos;
2549             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2550             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2551 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2552 //      of the seek_pos/seek_rel variables
2553
2554             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2555             if (ret < 0) {
2556                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2557             }else{
2558                 if (is->audio_stream >= 0) {
2559                     packet_queue_flush(&is->audioq);
2560                     packet_queue_put(&is->audioq, &flush_pkt);
2561                 }
2562                 if (is->subtitle_stream >= 0) {
2563                     packet_queue_flush(&is->subtitleq);
2564                     packet_queue_put(&is->subtitleq, &flush_pkt);
2565                 }
2566                 if (is->video_stream >= 0) {
2567                     packet_queue_flush(&is->videoq);
2568                     packet_queue_put(&is->videoq, &flush_pkt);
2569                 }
2570             }
2571             is->seek_req = 0;
2572             eof= 0;
2573         }
2574
2575         /* if the queue are full, no need to read more */
2576         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2577             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2578                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2579                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2580             /* wait 10 ms */
2581             SDL_Delay(10);
2582             continue;
2583         }
2584         if(eof) {
2585             if(is->video_stream >= 0){
2586                 av_init_packet(pkt);
2587                 pkt->data=NULL;
2588                 pkt->size=0;
2589                 pkt->stream_index= is->video_stream;
2590                 packet_queue_put(&is->videoq, pkt);
2591             }
2592             SDL_Delay(10);
2593             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2594                 if(loop!=1 && (!loop || --loop)){
2595                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2596                 }else if(autoexit){
2597                     ret=AVERROR_EOF;
2598                     goto fail;
2599                 }
2600             }
2601             continue;
2602         }
2603         ret = av_read_frame(ic, pkt);
2604         if (ret < 0) {
2605             if (ret == AVERROR_EOF || url_feof(ic->pb))
2606                 eof=1;
2607             if (url_ferror(ic->pb))
2608                 break;
2609             SDL_Delay(100); /* wait for user event */
2610             continue;
2611         }
2612         /* check if packet is in play range specified by user, then queue, otherwise discard */
2613         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2614                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2615                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2616                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2617                 <= ((double)duration/1000000);
2618         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2619             packet_queue_put(&is->audioq, pkt);
2620         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2621             packet_queue_put(&is->videoq, pkt);
2622         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2623             packet_queue_put(&is->subtitleq, pkt);
2624         } else {
2625             av_free_packet(pkt);
2626         }
2627     }
2628     /* wait until the end */
2629     while (!is->abort_request) {
2630         SDL_Delay(100);
2631     }
2632
2633     ret = 0;
2634  fail:
2635     /* disable interrupting */
2636     global_video_state = NULL;
2637
2638     /* close each stream */
2639     if (is->audio_stream >= 0)
2640         stream_component_close(is, is->audio_stream);
2641     if (is->video_stream >= 0)
2642         stream_component_close(is, is->video_stream);
2643     if (is->subtitle_stream >= 0)
2644         stream_component_close(is, is->subtitle_stream);
2645     if (is->ic) {
2646         av_close_input_file(is->ic);
2647         is->ic = NULL; /* safety */
2648     }
2649     url_set_interrupt_cb(NULL);
2650
2651     if (ret != 0) {
2652         SDL_Event event;
2653
2654         event.type = FF_QUIT_EVENT;
2655         event.user.data1 = is;
2656         SDL_PushEvent(&event);
2657     }
2658     return 0;
2659 }
2660
2661 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2662 {
2663     VideoState *is;
2664
2665     is = av_mallocz(sizeof(VideoState));
2666     if (!is)
2667         return NULL;
2668     av_strlcpy(is->filename, filename, sizeof(is->filename));
2669     is->iformat = iformat;
2670     is->ytop = 0;
2671     is->xleft = 0;
2672
2673     /* start video display */
2674     is->pictq_mutex = SDL_CreateMutex();
2675     is->pictq_cond = SDL_CreateCond();
2676
2677     is->subpq_mutex = SDL_CreateMutex();
2678     is->subpq_cond = SDL_CreateCond();
2679
2680     is->av_sync_type = av_sync_type;
2681     is->parse_tid = SDL_CreateThread(decode_thread, is);
2682     if (!is->parse_tid) {
2683         av_free(is);
2684         return NULL;
2685     }
2686     return is;
2687 }
2688
2689 static void stream_cycle_channel(VideoState *is, int codec_type)
2690 {
2691     AVFormatContext *ic = is->ic;
2692     int start_index, stream_index;
2693     AVStream *st;
2694
2695     if (codec_type == AVMEDIA_TYPE_VIDEO)
2696         start_index = is->video_stream;
2697     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2698         start_index = is->audio_stream;
2699     else
2700         start_index = is->subtitle_stream;
2701     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2702         return;
2703     stream_index = start_index;
2704     for(;;) {
2705         if (++stream_index >= is->ic->nb_streams)
2706         {
2707             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2708             {
2709                 stream_index = -1;
2710                 goto the_end;
2711             } else
2712                 stream_index = 0;
2713         }
2714         if (stream_index == start_index)
2715             return;
2716         st = ic->streams[stream_index];
2717         if (st->codec->codec_type == codec_type) {
2718             /* check that parameters are OK */
2719             switch(codec_type) {
2720             case AVMEDIA_TYPE_AUDIO:
2721                 if (st->codec->sample_rate != 0 &&
2722                     st->codec->channels != 0)
2723                     goto the_end;
2724                 break;
2725             case AVMEDIA_TYPE_VIDEO:
2726             case AVMEDIA_TYPE_SUBTITLE:
2727                 goto the_end;
2728             default:
2729                 break;
2730             }
2731         }
2732     }
2733  the_end:
2734     stream_component_close(is, start_index);
2735     stream_component_open(is, stream_index);
2736 }
2737
2738
2739 static void toggle_full_screen(void)
2740 {
2741     is_full_screen = !is_full_screen;
2742     if (!fs_screen_width) {
2743         /* use default SDL method */
2744 //        SDL_WM_ToggleFullScreen(screen);
2745     }
2746     video_open(cur_stream);
2747 }
2748
2749 static void toggle_pause(void)
2750 {
2751     if (cur_stream)
2752         stream_pause(cur_stream);
2753     step = 0;
2754 }
2755
2756 static void step_to_next_frame(void)
2757 {
2758     if (cur_stream) {
2759         /* if the stream is paused unpause it, then step */
2760         if (cur_stream->paused)
2761             stream_pause(cur_stream);
2762     }
2763     step = 1;
2764 }
2765
2766 static void toggle_audio_display(void)
2767 {
2768     if (cur_stream) {
2769         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2770         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2771         fill_rectangle(screen,
2772                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2773                     bgcolor);
2774         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2775     }
2776 }
2777
2778 /* handle an event sent by the GUI */
2779 static void event_loop(void)
2780 {
2781     SDL_Event event;
2782     double incr, pos, frac;
2783
2784     for(;;) {
2785         double x;
2786         SDL_WaitEvent(&event);
2787         switch(event.type) {
2788         case SDL_KEYDOWN:
2789             if (exit_on_keydown) {
2790                 do_exit();
2791                 break;
2792             }
2793             switch(event.key.keysym.sym) {
2794             case SDLK_ESCAPE:
2795             case SDLK_q:
2796                 do_exit();
2797                 break;
2798             case SDLK_f:
2799                 toggle_full_screen();
2800                 break;
2801             case SDLK_p:
2802             case SDLK_SPACE:
2803                 toggle_pause();
2804                 break;
2805             case SDLK_s: //S: Step to next frame
2806                 step_to_next_frame();
2807                 break;
2808             case SDLK_a:
2809                 if (cur_stream)
2810                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2811                 break;
2812             case SDLK_v:
2813                 if (cur_stream)
2814                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2815                 break;
2816             case SDLK_t:
2817                 if (cur_stream)
2818                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2819                 break;
2820             case SDLK_w:
2821                 toggle_audio_display();
2822                 break;
2823             case SDLK_LEFT:
2824                 incr = -10.0;
2825                 goto do_seek;
2826             case SDLK_RIGHT:
2827                 incr = 10.0;
2828                 goto do_seek;
2829             case SDLK_UP:
2830                 incr = 60.0;
2831                 goto do_seek;
2832             case SDLK_DOWN:
2833                 incr = -60.0;
2834             do_seek:
2835                 if (cur_stream) {
2836                     if (seek_by_bytes) {
2837                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2838                             pos= cur_stream->video_current_pos;
2839                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2840                             pos= cur_stream->audio_pkt.pos;
2841                         }else
2842                             pos = url_ftell(cur_stream->ic->pb);
2843                         if (cur_stream->ic->bit_rate)
2844                             incr *= cur_stream->ic->bit_rate / 8.0;
2845                         else
2846                             incr *= 180000.0;
2847                         pos += incr;
2848                         stream_seek(cur_stream, pos, incr, 1);
2849                     } else {
2850                         pos = get_master_clock(cur_stream);
2851                         pos += incr;
2852                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2853                     }
2854                 }
2855                 break;
2856             default:
2857                 break;
2858             }
2859             break;
2860         case SDL_MOUSEBUTTONDOWN:
2861             if (exit_on_mousedown) {
2862                 do_exit();
2863                 break;
2864             }
2865         case SDL_MOUSEMOTION:
2866             if(event.type ==SDL_MOUSEBUTTONDOWN){
2867                 x= event.button.x;
2868             }else{
2869                 if(event.motion.state != SDL_PRESSED)
2870                     break;
2871                 x= event.motion.x;
2872             }
2873             if (cur_stream) {
2874                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2875                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2876                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2877                 }else{
2878                     int64_t ts;
2879                     int ns, hh, mm, ss;
2880                     int tns, thh, tmm, tss;
2881                     tns = cur_stream->ic->duration/1000000LL;
2882                     thh = tns/3600;
2883                     tmm = (tns%3600)/60;
2884                     tss = (tns%60);
2885                     frac = x/cur_stream->width;
2886                     ns = frac*tns;
2887                     hh = ns/3600;
2888                     mm = (ns%3600)/60;
2889                     ss = (ns%60);
2890                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2891                             hh, mm, ss, thh, tmm, tss);
2892                     ts = frac*cur_stream->ic->duration;
2893                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2894                         ts += cur_stream->ic->start_time;
2895                     stream_seek(cur_stream, ts, 0, 0);
2896                 }
2897             }
2898             break;
2899         case SDL_VIDEORESIZE:
2900             if (cur_stream) {
2901                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2902                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2903                 screen_width = cur_stream->width = event.resize.w;
2904                 screen_height= cur_stream->height= event.resize.h;
2905             }
2906             break;
2907         case SDL_QUIT:
2908         case FF_QUIT_EVENT:
2909             do_exit();
2910             break;
2911         case FF_ALLOC_EVENT:
2912             video_open(event.user.data1);
2913             alloc_picture(event.user.data1);
2914             break;
2915         case FF_REFRESH_EVENT:
2916             video_refresh_timer(event.user.data1);
2917             cur_stream->refresh=0;
2918             break;
2919         default:
2920             break;
2921         }
2922     }
2923 }
2924
2925 static void opt_frame_size(const char *arg)
2926 {
2927     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2928         fprintf(stderr, "Incorrect frame size\n");
2929         exit(1);
2930     }
2931     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2932         fprintf(stderr, "Frame size must be a multiple of 2\n");
2933         exit(1);
2934     }
2935 }
2936
2937 static int opt_width(const char *opt, const char *arg)
2938 {
2939     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2940     return 0;
2941 }
2942
2943 static int opt_height(const char *opt, const char *arg)
2944 {
2945     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2946     return 0;
2947 }
2948
2949 static void opt_format(const char *arg)
2950 {
2951     file_iformat = av_find_input_format(arg);
2952     if (!file_iformat) {
2953         fprintf(stderr, "Unknown input format: %s\n", arg);
2954         exit(1);
2955     }
2956 }
2957
2958 static void opt_frame_pix_fmt(const char *arg)
2959 {
2960     frame_pix_fmt = av_get_pix_fmt(arg);
2961 }
2962
2963 static int opt_sync(const char *opt, const char *arg)
2964 {
2965     if (!strcmp(arg, "audio"))
2966         av_sync_type = AV_SYNC_AUDIO_MASTER;
2967     else if (!strcmp(arg, "video"))
2968         av_sync_type = AV_SYNC_VIDEO_MASTER;
2969     else if (!strcmp(arg, "ext"))
2970         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2971     else {
2972         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2973         exit(1);
2974     }
2975     return 0;
2976 }
2977
2978 static int opt_seek(const char *opt, const char *arg)
2979 {
2980     start_time = parse_time_or_die(opt, arg, 1);
2981     return 0;
2982 }
2983
2984 static int opt_duration(const char *opt, const char *arg)
2985 {
2986     duration = parse_time_or_die(opt, arg, 1);
2987     return 0;
2988 }
2989
2990 static int opt_debug(const char *opt, const char *arg)
2991 {
2992     av_log_set_level(99);
2993     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2994     return 0;
2995 }
2996
2997 static int opt_vismv(const char *opt, const char *arg)
2998 {
2999     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3000     return 0;
3001 }
3002
3003 static int opt_thread_count(const char *opt, const char *arg)
3004 {
3005     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3006 #if !HAVE_THREADS
3007     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3008 #endif
3009     return 0;
3010 }
3011
3012 static const OptionDef options[] = {
3013 #include "cmdutils_common_opts.h"
3014     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3015     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3016     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3017     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3018     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3019     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3020     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3021     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3022     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3023     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3024     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3025     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3026     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3027     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3028     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3029     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3030     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3031     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3032     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3033     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3034     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3035     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3036     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3037     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3038     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3039     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3040     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3041     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3042     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3043     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3044     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3045     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3046     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3047     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3048     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3049     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3050     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3051 #if CONFIG_AVFILTER
3052     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3053 #endif
3054     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3055     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3056     { NULL, },
3057 };
3058
3059 static void show_usage(void)
3060 {
3061     printf("Simple media player\n");
3062     printf("usage: ffplay [options] input_file\n");
3063     printf("\n");
3064 }
3065
3066 static void show_help(void)
3067 {
3068     av_log_set_callback(log_callback_help);
3069     show_usage();
3070     show_help_options(options, "Main options:\n",
3071                       OPT_EXPERT, 0);
3072     show_help_options(options, "\nAdvanced options:\n",
3073                       OPT_EXPERT, OPT_EXPERT);
3074     printf("\n");
3075     av_opt_show2(avcodec_opts[0], NULL,
3076                  AV_OPT_FLAG_DECODING_PARAM, 0);
3077     printf("\n");
3078     av_opt_show2(avformat_opts, NULL,
3079                  AV_OPT_FLAG_DECODING_PARAM, 0);
3080 #if !CONFIG_AVFILTER
3081     printf("\n");
3082     av_opt_show2(sws_opts, NULL,
3083                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3084 #endif
3085     printf("\nWhile playing:\n"
3086            "q, ESC              quit\n"
3087            "f                   toggle full screen\n"
3088            "p, SPC              pause\n"
3089            "a                   cycle audio channel\n"
3090            "v                   cycle video channel\n"
3091            "t                   cycle subtitle channel\n"
3092            "w                   show audio waves\n"
3093            "s                   activate frame-step mode\n"
3094            "left/right          seek backward/forward 10 seconds\n"
3095            "down/up             seek backward/forward 1 minute\n"
3096            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3097            );
3098 }
3099
3100 static void opt_input_file(const char *filename)
3101 {
3102     if (input_filename) {
3103         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3104                 filename, input_filename);
3105         exit(1);
3106     }
3107     if (!strcmp(filename, "-"))
3108         filename = "pipe:";
3109     input_filename = filename;
3110 }
3111
3112 /* Called from the main */
3113 int main(int argc, char **argv)
3114 {
3115     int flags;
3116
3117     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3118
3119     /* register all codecs, demux and protocols */
3120     avcodec_register_all();
3121 #if CONFIG_AVDEVICE
3122     avdevice_register_all();
3123 #endif
3124 #if CONFIG_AVFILTER
3125     avfilter_register_all();
3126 #endif
3127     av_register_all();
3128
3129     init_opts();
3130
3131     show_banner();
3132
3133     parse_options(argc, argv, options, opt_input_file);
3134
3135     if (!input_filename) {
3136         show_usage();
3137         fprintf(stderr, "An input file must be specified\n");
3138         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3139         exit(1);
3140     }
3141
3142     if (display_disable) {
3143         video_disable = 1;
3144     }
3145     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3146 #if !defined(__MINGW32__) && !defined(__APPLE__)
3147     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3148 #endif
3149     if (SDL_Init (flags)) {
3150         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3151         exit(1);
3152     }
3153
3154     if (!display_disable) {
3155 #if HAVE_SDL_VIDEO_SIZE
3156         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3157         fs_screen_width = vi->current_w;
3158         fs_screen_height = vi->current_h;
3159 #endif
3160     }
3161
3162     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3163     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3164     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3165
3166     av_init_packet(&flush_pkt);
3167     flush_pkt.data= "FLUSH";
3168
3169     cur_stream = stream_open(input_filename, file_iformat);
3170
3171     event_loop();
3172
3173     /* never returns */
3174
3175     return 0;
3176 }