]> git.sesse.net Git - ffmpeg/blob - ffplay.c
03017abfb367031c218bcf97d3cf68770b05d36c
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavcore/imgutils.h"
32 #include "libavcore/parseutils.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavcodec/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avfilter.h"
42 # include "libavfilter/avfiltergraph.h"
43 # include "libavfilter/graphparser.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "FFplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG_SYNC
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2*65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///<presentation time stamp for this picture
103     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
104     int64_t pos;                                 ///<byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     enum PixelFormat pix_fmt;
109
110 #if CONFIG_AVFILTER
111     AVFilterBufferRef *picref;
112 #endif
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 enum {
121     AV_SYNC_AUDIO_MASTER, /* default choice */
122     AV_SYNC_VIDEO_MASTER,
123     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125
126 typedef struct VideoState {
127     SDL_Thread *parse_tid;
128     SDL_Thread *video_tid;
129     SDL_Thread *refresh_tid;
130     AVInputFormat *iformat;
131     int no_background;
132     int abort_request;
133     int paused;
134     int last_paused;
135     int seek_req;
136     int seek_flags;
137     int64_t seek_pos;
138     int64_t seek_rel;
139     int read_pause_return;
140     AVFormatContext *ic;
141     int dtg_active_format;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     /* samples output by the codec. we reserve more space for avsync
158        compensation */
159     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     uint8_t *audio_buf;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     AVPacket audio_pkt_temp;
165     AVPacket audio_pkt;
166     enum SampleFormat audio_src_fmt;
167     AVAudioConvert *reformat_ctx;
168
169     int show_audio; /* if true, display audio samples */
170     int16_t sample_array[SAMPLE_ARRAY_SIZE];
171     int sample_array_index;
172     int last_i_start;
173     RDFTContext *rdft;
174     int rdft_bits;
175     FFTSample *rdft_data;
176     int xpos;
177
178     SDL_Thread *subtitle_tid;
179     int subtitle_stream;
180     int subtitle_stream_changed;
181     AVStream *subtitle_st;
182     PacketQueue subtitleq;
183     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
184     int subpq_size, subpq_rindex, subpq_windex;
185     SDL_mutex *subpq_mutex;
186     SDL_cond *subpq_cond;
187
188     double frame_timer;
189     double frame_last_pts;
190     double frame_last_delay;
191     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
192     int video_stream;
193     AVStream *video_st;
194     PacketQueue videoq;
195     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
196     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
197     int64_t video_current_pos;                   ///<current displayed file pos
198     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
199     int pictq_size, pictq_rindex, pictq_windex;
200     SDL_mutex *pictq_mutex;
201     SDL_cond *pictq_cond;
202 #if !CONFIG_AVFILTER
203     struct SwsContext *img_convert_ctx;
204 #endif
205
206     //    QETimer *video_timer;
207     char filename[1024];
208     int width, height, xleft, ytop;
209
210     int64_t faulty_pts;
211     int64_t faulty_dts;
212     int64_t last_dts_for_fault_detection;
213     int64_t last_pts_for_fault_detection;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
217 #endif
218
219     float skip_frames;
220     float skip_frames_index;
221     int refresh;
222 } VideoState;
223
224 static void show_help(void);
225 static int audio_write_get_buf_size(VideoState *is);
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width = 0;
234 static int screen_height = 0;
235 static int frame_width = 0;
236 static int frame_height = 0;
237 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
238 static int audio_disable;
239 static int video_disable;
240 static int wanted_stream[AVMEDIA_TYPE_NB]={
241     [AVMEDIA_TYPE_AUDIO]=-1,
242     [AVMEDIA_TYPE_VIDEO]=-1,
243     [AVMEDIA_TYPE_SUBTITLE]=-1,
244 };
245 static int seek_by_bytes=-1;
246 static int display_disable;
247 static int show_status = 1;
248 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
249 static int64_t start_time = AV_NOPTS_VALUE;
250 static int64_t duration = AV_NOPTS_VALUE;
251 static int debug = 0;
252 static int debug_mv = 0;
253 static int step = 0;
254 static int thread_count = 1;
255 static int workaround_bugs = 1;
256 static int fast = 0;
257 static int genpts = 0;
258 static int lowres = 0;
259 static int idct = FF_IDCT_AUTO;
260 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
261 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
262 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
263 static int error_recognition = FF_ER_CAREFUL;
264 static int error_concealment = 3;
265 static int decoder_reorder_pts= -1;
266 static int autoexit;
267 static int exit_on_keydown;
268 static int exit_on_mousedown;
269 static int loop=1;
270 static int framedrop=1;
271
272 static int rdftspeed=20;
273 #if CONFIG_AVFILTER
274 static char *vfilters = NULL;
275 #endif
276
277 /* current context */
278 static int is_full_screen;
279 static VideoState *cur_stream;
280 static int64_t audio_callback_time;
281
282 static AVPacket flush_pkt;
283
284 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
285 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
286 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
287
288 static SDL_Surface *screen;
289
290 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
291
292 /* packet queue handling */
293 static void packet_queue_init(PacketQueue *q)
294 {
295     memset(q, 0, sizeof(PacketQueue));
296     q->mutex = SDL_CreateMutex();
297     q->cond = SDL_CreateCond();
298     packet_queue_put(q, &flush_pkt);
299 }
300
301 static void packet_queue_flush(PacketQueue *q)
302 {
303     AVPacketList *pkt, *pkt1;
304
305     SDL_LockMutex(q->mutex);
306     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
307         pkt1 = pkt->next;
308         av_free_packet(&pkt->pkt);
309         av_freep(&pkt);
310     }
311     q->last_pkt = NULL;
312     q->first_pkt = NULL;
313     q->nb_packets = 0;
314     q->size = 0;
315     SDL_UnlockMutex(q->mutex);
316 }
317
318 static void packet_queue_end(PacketQueue *q)
319 {
320     packet_queue_flush(q);
321     SDL_DestroyMutex(q->mutex);
322     SDL_DestroyCond(q->cond);
323 }
324
325 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
326 {
327     AVPacketList *pkt1;
328
329     /* duplicate the packet */
330     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
331         return -1;
332
333     pkt1 = av_malloc(sizeof(AVPacketList));
334     if (!pkt1)
335         return -1;
336     pkt1->pkt = *pkt;
337     pkt1->next = NULL;
338
339
340     SDL_LockMutex(q->mutex);
341
342     if (!q->last_pkt)
343
344         q->first_pkt = pkt1;
345     else
346         q->last_pkt->next = pkt1;
347     q->last_pkt = pkt1;
348     q->nb_packets++;
349     q->size += pkt1->pkt.size + sizeof(*pkt1);
350     /* XXX: should duplicate packet data in DV case */
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354     return 0;
355 }
356
357 static void packet_queue_abort(PacketQueue *q)
358 {
359     SDL_LockMutex(q->mutex);
360
361     q->abort_request = 1;
362
363     SDL_CondSignal(q->cond);
364
365     SDL_UnlockMutex(q->mutex);
366 }
367
368 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
369 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
370 {
371     AVPacketList *pkt1;
372     int ret;
373
374     SDL_LockMutex(q->mutex);
375
376     for(;;) {
377         if (q->abort_request) {
378             ret = -1;
379             break;
380         }
381
382         pkt1 = q->first_pkt;
383         if (pkt1) {
384             q->first_pkt = pkt1->next;
385             if (!q->first_pkt)
386                 q->last_pkt = NULL;
387             q->nb_packets--;
388             q->size -= pkt1->pkt.size + sizeof(*pkt1);
389             *pkt = pkt1->pkt;
390             av_free(pkt1);
391             ret = 1;
392             break;
393         } else if (!block) {
394             ret = 0;
395             break;
396         } else {
397             SDL_CondWait(q->cond, q->mutex);
398         }
399     }
400     SDL_UnlockMutex(q->mutex);
401     return ret;
402 }
403
404 static inline void fill_rectangle(SDL_Surface *screen,
405                                   int x, int y, int w, int h, int color)
406 {
407     SDL_Rect rect;
408     rect.x = x;
409     rect.y = y;
410     rect.w = w;
411     rect.h = h;
412     SDL_FillRect(screen, &rect, color);
413 }
414
415 #if 0
416 /* draw only the border of a rectangle */
417 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
418 {
419     int w1, w2, h1, h2;
420
421     /* fill the background */
422     w1 = x;
423     if (w1 < 0)
424         w1 = 0;
425     w2 = s->width - (x + w);
426     if (w2 < 0)
427         w2 = 0;
428     h1 = y;
429     if (h1 < 0)
430         h1 = 0;
431     h2 = s->height - (y + h);
432     if (h2 < 0)
433         h2 = 0;
434     fill_rectangle(screen,
435                    s->xleft, s->ytop,
436                    w1, s->height,
437                    color);
438     fill_rectangle(screen,
439                    s->xleft + s->width - w2, s->ytop,
440                    w2, s->height,
441                    color);
442     fill_rectangle(screen,
443                    s->xleft + w1, s->ytop,
444                    s->width - w1 - w2, h1,
445                    color);
446     fill_rectangle(screen,
447                    s->xleft + w1, s->ytop + s->height - h2,
448                    s->width - w1 - w2, h2,
449                    color);
450 }
451 #endif
452
453 #define ALPHA_BLEND(a, oldp, newp, s)\
454 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
455
456 #define RGBA_IN(r, g, b, a, s)\
457 {\
458     unsigned int v = ((const uint32_t *)(s))[0];\
459     a = (v >> 24) & 0xff;\
460     r = (v >> 16) & 0xff;\
461     g = (v >> 8) & 0xff;\
462     b = v & 0xff;\
463 }
464
465 #define YUVA_IN(y, u, v, a, s, pal)\
466 {\
467     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
468     a = (val >> 24) & 0xff;\
469     y = (val >> 16) & 0xff;\
470     u = (val >> 8) & 0xff;\
471     v = val & 0xff;\
472 }
473
474 #define YUVA_OUT(d, y, u, v, a)\
475 {\
476     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
477 }
478
479
480 #define BPP 1
481
482 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
483 {
484     int wrap, wrap3, width2, skip2;
485     int y, u, v, a, u1, v1, a1, w, h;
486     uint8_t *lum, *cb, *cr;
487     const uint8_t *p;
488     const uint32_t *pal;
489     int dstx, dsty, dstw, dsth;
490
491     dstw = av_clip(rect->w, 0, imgw);
492     dsth = av_clip(rect->h, 0, imgh);
493     dstx = av_clip(rect->x, 0, imgw - dstw);
494     dsty = av_clip(rect->y, 0, imgh - dsth);
495     lum = dst->data[0] + dsty * dst->linesize[0];
496     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
497     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
498
499     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
500     skip2 = dstx >> 1;
501     wrap = dst->linesize[0];
502     wrap3 = rect->pict.linesize[0];
503     p = rect->pict.data[0];
504     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
505
506     if (dsty & 1) {
507         lum += dstx;
508         cb += skip2;
509         cr += skip2;
510
511         if (dstx & 1) {
512             YUVA_IN(y, u, v, a, p, pal);
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
515             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
516             cb++;
517             cr++;
518             lum++;
519             p += BPP;
520         }
521         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 = u;
524             v1 = v;
525             a1 = a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527
528             YUVA_IN(y, u, v, a, p + BPP, pal);
529             u1 += u;
530             v1 += v;
531             a1 += a;
532             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
533             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
534             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
535             cb++;
536             cr++;
537             p += 2 * BPP;
538             lum += 2;
539         }
540         if (w) {
541             YUVA_IN(y, u, v, a, p, pal);
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
544             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
545             p++;
546             lum++;
547         }
548         p += wrap3 - dstw * BPP;
549         lum += wrap - dstw - dstx;
550         cb += dst->linesize[1] - width2 - skip2;
551         cr += dst->linesize[2] - width2 - skip2;
552     }
553     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
554         lum += dstx;
555         cb += skip2;
556         cr += skip2;
557
558         if (dstx & 1) {
559             YUVA_IN(y, u, v, a, p, pal);
560             u1 = u;
561             v1 = v;
562             a1 = a;
563             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564             p += wrap3;
565             lum += wrap;
566             YUVA_IN(y, u, v, a, p, pal);
567             u1 += u;
568             v1 += v;
569             a1 += a;
570             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
572             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
573             cb++;
574             cr++;
575             p += -wrap3 + BPP;
576             lum += -wrap + 1;
577         }
578         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 = u;
581             v1 = v;
582             a1 = a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584
585             YUVA_IN(y, u, v, a, p + BPP, pal);
586             u1 += u;
587             v1 += v;
588             a1 += a;
589             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
590             p += wrap3;
591             lum += wrap;
592
593             YUVA_IN(y, u, v, a, p, pal);
594             u1 += u;
595             v1 += v;
596             a1 += a;
597             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598
599             YUVA_IN(y, u, v, a, p + BPP, pal);
600             u1 += u;
601             v1 += v;
602             a1 += a;
603             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
604
605             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
606             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
607
608             cb++;
609             cr++;
610             p += -wrap3 + 2 * BPP;
611             lum += -wrap + 2;
612         }
613         if (w) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             p += wrap3;
620             lum += wrap;
621             YUVA_IN(y, u, v, a, p, pal);
622             u1 += u;
623             v1 += v;
624             a1 += a;
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
627             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
628             cb++;
629             cr++;
630             p += -wrap3 + BPP;
631             lum += -wrap + 1;
632         }
633         p += wrap3 + (wrap3 - dstw * BPP);
634         lum += wrap + (wrap - dstw - dstx);
635         cb += dst->linesize[1] - width2 - skip2;
636         cr += dst->linesize[2] - width2 - skip2;
637     }
638     /* handle odd height */
639     if (h) {
640         lum += dstx;
641         cb += skip2;
642         cr += skip2;
643
644         if (dstx & 1) {
645             YUVA_IN(y, u, v, a, p, pal);
646             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
647             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
648             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
649             cb++;
650             cr++;
651             lum++;
652             p += BPP;
653         }
654         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
655             YUVA_IN(y, u, v, a, p, pal);
656             u1 = u;
657             v1 = v;
658             a1 = a;
659             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
660
661             YUVA_IN(y, u, v, a, p + BPP, pal);
662             u1 += u;
663             v1 += v;
664             a1 += a;
665             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
666             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
667             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
668             cb++;
669             cr++;
670             p += 2 * BPP;
671             lum += 2;
672         }
673         if (w) {
674             YUVA_IN(y, u, v, a, p, pal);
675             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
676             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
677             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
678         }
679     }
680 }
681
682 static void free_subpicture(SubPicture *sp)
683 {
684     avsubtitle_free(&sp->sub);
685 }
686
687 static void video_image_display(VideoState *is)
688 {
689     VideoPicture *vp;
690     SubPicture *sp;
691     AVPicture pict;
692     float aspect_ratio;
693     int width, height, x, y;
694     SDL_Rect rect;
695     int i;
696
697     vp = &is->pictq[is->pictq_rindex];
698     if (vp->bmp) {
699 #if CONFIG_AVFILTER
700          if (vp->picref->video->pixel_aspect.num == 0)
701              aspect_ratio = 0;
702          else
703              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
704 #else
705
706         /* XXX: use variable in the frame */
707         if (is->video_st->sample_aspect_ratio.num)
708             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
709         else if (is->video_st->codec->sample_aspect_ratio.num)
710             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
711         else
712             aspect_ratio = 0;
713 #endif
714         if (aspect_ratio <= 0.0)
715             aspect_ratio = 1.0;
716         aspect_ratio *= (float)vp->width / (float)vp->height;
717         /* if an active format is indicated, then it overrides the
718            mpeg format */
719 #if 0
720         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
721             is->dtg_active_format = is->video_st->codec->dtg_active_format;
722             printf("dtg_active_format=%d\n", is->dtg_active_format);
723         }
724 #endif
725 #if 0
726         switch(is->video_st->codec->dtg_active_format) {
727         case FF_DTG_AFD_SAME:
728         default:
729             /* nothing to do */
730             break;
731         case FF_DTG_AFD_4_3:
732             aspect_ratio = 4.0 / 3.0;
733             break;
734         case FF_DTG_AFD_16_9:
735             aspect_ratio = 16.0 / 9.0;
736             break;
737         case FF_DTG_AFD_14_9:
738             aspect_ratio = 14.0 / 9.0;
739             break;
740         case FF_DTG_AFD_4_3_SP_14_9:
741             aspect_ratio = 14.0 / 9.0;
742             break;
743         case FF_DTG_AFD_16_9_SP_14_9:
744             aspect_ratio = 14.0 / 9.0;
745             break;
746         case FF_DTG_AFD_SP_4_3:
747             aspect_ratio = 4.0 / 3.0;
748             break;
749         }
750 #endif
751
752         if (is->subtitle_st)
753         {
754             if (is->subpq_size > 0)
755             {
756                 sp = &is->subpq[is->subpq_rindex];
757
758                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
759                 {
760                     SDL_LockYUVOverlay (vp->bmp);
761
762                     pict.data[0] = vp->bmp->pixels[0];
763                     pict.data[1] = vp->bmp->pixels[2];
764                     pict.data[2] = vp->bmp->pixels[1];
765
766                     pict.linesize[0] = vp->bmp->pitches[0];
767                     pict.linesize[1] = vp->bmp->pitches[2];
768                     pict.linesize[2] = vp->bmp->pitches[1];
769
770                     for (i = 0; i < sp->sub.num_rects; i++)
771                         blend_subrect(&pict, sp->sub.rects[i],
772                                       vp->bmp->w, vp->bmp->h);
773
774                     SDL_UnlockYUVOverlay (vp->bmp);
775                 }
776             }
777         }
778
779
780         /* XXX: we suppose the screen has a 1.0 pixel ratio */
781         height = is->height;
782         width = ((int)rint(height * aspect_ratio)) & ~1;
783         if (width > is->width) {
784             width = is->width;
785             height = ((int)rint(width / aspect_ratio)) & ~1;
786         }
787         x = (is->width - width) / 2;
788         y = (is->height - height) / 2;
789         if (!is->no_background) {
790             /* fill the background */
791             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
792         } else {
793             is->no_background = 0;
794         }
795         rect.x = is->xleft + x;
796         rect.y = is->ytop  + y;
797         rect.w = width;
798         rect.h = height;
799         SDL_DisplayYUVOverlay(vp->bmp, &rect);
800     } else {
801 #if 0
802         fill_rectangle(screen,
803                        is->xleft, is->ytop, is->width, is->height,
804                        QERGB(0x00, 0x00, 0x00));
805 #endif
806     }
807 }
808
809 static inline int compute_mod(int a, int b)
810 {
811     a = a % b;
812     if (a >= 0)
813         return a;
814     else
815         return a + b;
816 }
817
818 static void video_audio_display(VideoState *s)
819 {
820     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
821     int ch, channels, h, h2, bgcolor, fgcolor;
822     int16_t time_diff;
823     int rdft_bits, nb_freq;
824
825     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
826         ;
827     nb_freq= 1<<(rdft_bits-1);
828
829     /* compute display index : center on currently output samples */
830     channels = s->audio_st->codec->channels;
831     nb_display_channels = channels;
832     if (!s->paused) {
833         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
834         n = 2 * channels;
835         delay = audio_write_get_buf_size(s);
836         delay /= n;
837
838         /* to be more precise, we take into account the time spent since
839            the last buffer computation */
840         if (audio_callback_time) {
841             time_diff = av_gettime() - audio_callback_time;
842             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
843         }
844
845         delay += 2*data_used;
846         if (delay < data_used)
847             delay = data_used;
848
849         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
850         if(s->show_audio==1){
851             h= INT_MIN;
852             for(i=0; i<1000; i+=channels){
853                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
854                 int a= s->sample_array[idx];
855                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
856                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
857                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
858                 int score= a-d;
859                 if(h<score && (b^c)<0){
860                     h= score;
861                     i_start= idx;
862                 }
863             }
864         }
865
866         s->last_i_start = i_start;
867     } else {
868         i_start = s->last_i_start;
869     }
870
871     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
872     if(s->show_audio==1){
873         fill_rectangle(screen,
874                        s->xleft, s->ytop, s->width, s->height,
875                        bgcolor);
876
877         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
878
879         /* total height for one channel */
880         h = s->height / nb_display_channels;
881         /* graph height / 2 */
882         h2 = (h * 9) / 20;
883         for(ch = 0;ch < nb_display_channels; ch++) {
884             i = i_start + ch;
885             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
886             for(x = 0; x < s->width; x++) {
887                 y = (s->sample_array[i] * h2) >> 15;
888                 if (y < 0) {
889                     y = -y;
890                     ys = y1 - y;
891                 } else {
892                     ys = y1;
893                 }
894                 fill_rectangle(screen,
895                                s->xleft + x, ys, 1, y,
896                                fgcolor);
897                 i += channels;
898                 if (i >= SAMPLE_ARRAY_SIZE)
899                     i -= SAMPLE_ARRAY_SIZE;
900             }
901         }
902
903         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
904
905         for(ch = 1;ch < nb_display_channels; ch++) {
906             y = s->ytop + ch * h;
907             fill_rectangle(screen,
908                            s->xleft, y, s->width, 1,
909                            fgcolor);
910         }
911         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
912     }else{
913         nb_display_channels= FFMIN(nb_display_channels, 2);
914         if(rdft_bits != s->rdft_bits){
915             av_rdft_end(s->rdft);
916             av_free(s->rdft_data);
917             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
918             s->rdft_bits= rdft_bits;
919             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
920         }
921         {
922             FFTSample *data[2];
923             for(ch = 0;ch < nb_display_channels; ch++) {
924                 data[ch] = s->rdft_data + 2*nb_freq*ch;
925                 i = i_start + ch;
926                 for(x = 0; x < 2*nb_freq; x++) {
927                     double w= (x-nb_freq)*(1.0/nb_freq);
928                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
929                     i += channels;
930                     if (i >= SAMPLE_ARRAY_SIZE)
931                         i -= SAMPLE_ARRAY_SIZE;
932                 }
933                 av_rdft_calc(s->rdft, data[ch]);
934             }
935             //least efficient way to do this, we should of course directly access it but its more than fast enough
936             for(y=0; y<s->height; y++){
937                 double w= 1/sqrt(nb_freq);
938                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
939                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
940                        + data[1][2*y+1]*data[1][2*y+1])) : a;
941                 a= FFMIN(a,255);
942                 b= FFMIN(b,255);
943                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
944
945                 fill_rectangle(screen,
946                             s->xpos, s->height-y, 1, 1,
947                             fgcolor);
948             }
949         }
950         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
951         s->xpos++;
952         if(s->xpos >= s->width)
953             s->xpos= s->xleft;
954     }
955 }
956
957 static int video_open(VideoState *is){
958     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
959     int w,h;
960
961     if(is_full_screen) flags |= SDL_FULLSCREEN;
962     else               flags |= SDL_RESIZABLE;
963
964     if (is_full_screen && fs_screen_width) {
965         w = fs_screen_width;
966         h = fs_screen_height;
967     } else if(!is_full_screen && screen_width){
968         w = screen_width;
969         h = screen_height;
970 #if CONFIG_AVFILTER
971     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
972         w = is->out_video_filter->inputs[0]->w;
973         h = is->out_video_filter->inputs[0]->h;
974 #else
975     }else if (is->video_st && is->video_st->codec->width){
976         w = is->video_st->codec->width;
977         h = is->video_st->codec->height;
978 #endif
979     } else {
980         w = 640;
981         h = 480;
982     }
983     if(screen && is->width == screen->w && screen->w == w
984        && is->height== screen->h && screen->h == h)
985         return 0;
986
987 #ifndef __APPLE__
988     screen = SDL_SetVideoMode(w, h, 0, flags);
989 #else
990     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
991     screen = SDL_SetVideoMode(w, h, 24, flags);
992 #endif
993     if (!screen) {
994         fprintf(stderr, "SDL: could not set video mode - exiting\n");
995         return -1;
996     }
997     if (!window_title)
998         window_title = input_filename;
999     SDL_WM_SetCaption(window_title, window_title);
1000
1001     is->width = screen->w;
1002     is->height = screen->h;
1003
1004     return 0;
1005 }
1006
1007 /* display the current picture, if any */
1008 static void video_display(VideoState *is)
1009 {
1010     if(!screen)
1011         video_open(cur_stream);
1012     if (is->audio_st && is->show_audio)
1013         video_audio_display(is);
1014     else if (is->video_st)
1015         video_image_display(is);
1016 }
1017
1018 static int refresh_thread(void *opaque)
1019 {
1020     VideoState *is= opaque;
1021     while(!is->abort_request){
1022     SDL_Event event;
1023     event.type = FF_REFRESH_EVENT;
1024     event.user.data1 = opaque;
1025         if(!is->refresh){
1026             is->refresh=1;
1027     SDL_PushEvent(&event);
1028         }
1029         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1030     }
1031     return 0;
1032 }
1033
1034 /* get the current audio clock value */
1035 static double get_audio_clock(VideoState *is)
1036 {
1037     double pts;
1038     int hw_buf_size, bytes_per_sec;
1039     pts = is->audio_clock;
1040     hw_buf_size = audio_write_get_buf_size(is);
1041     bytes_per_sec = 0;
1042     if (is->audio_st) {
1043         bytes_per_sec = is->audio_st->codec->sample_rate *
1044             2 * is->audio_st->codec->channels;
1045     }
1046     if (bytes_per_sec)
1047         pts -= (double)hw_buf_size / bytes_per_sec;
1048     return pts;
1049 }
1050
1051 /* get the current video clock value */
1052 static double get_video_clock(VideoState *is)
1053 {
1054     if (is->paused) {
1055         return is->video_current_pts;
1056     } else {
1057         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1058     }
1059 }
1060
1061 /* get the current external clock value */
1062 static double get_external_clock(VideoState *is)
1063 {
1064     int64_t ti;
1065     ti = av_gettime();
1066     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1067 }
1068
1069 /* get the current master clock value */
1070 static double get_master_clock(VideoState *is)
1071 {
1072     double val;
1073
1074     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075         if (is->video_st)
1076             val = get_video_clock(is);
1077         else
1078             val = get_audio_clock(is);
1079     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080         if (is->audio_st)
1081             val = get_audio_clock(is);
1082         else
1083             val = get_video_clock(is);
1084     } else {
1085         val = get_external_clock(is);
1086     }
1087     return val;
1088 }
1089
1090 /* seek in the stream */
1091 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1092 {
1093     if (!is->seek_req) {
1094         is->seek_pos = pos;
1095         is->seek_rel = rel;
1096         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1097         if (seek_by_bytes)
1098             is->seek_flags |= AVSEEK_FLAG_BYTE;
1099         is->seek_req = 1;
1100     }
1101 }
1102
1103 /* pause or resume the video */
1104 static void stream_pause(VideoState *is)
1105 {
1106     if (is->paused) {
1107         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1108         if(is->read_pause_return != AVERROR(ENOSYS)){
1109             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1110         }
1111         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1112     }
1113     is->paused = !is->paused;
1114 }
1115
1116 static double compute_target_time(double frame_current_pts, VideoState *is)
1117 {
1118     double delay, sync_threshold, diff;
1119
1120     /* compute nominal delay */
1121     delay = frame_current_pts - is->frame_last_pts;
1122     if (delay <= 0 || delay >= 10.0) {
1123         /* if incorrect delay, use previous one */
1124         delay = is->frame_last_delay;
1125     } else {
1126         is->frame_last_delay = delay;
1127     }
1128     is->frame_last_pts = frame_current_pts;
1129
1130     /* update delay to follow master synchronisation source */
1131     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1132          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1133         /* if video is slave, we try to correct big delays by
1134            duplicating or deleting a frame */
1135         diff = get_video_clock(is) - get_master_clock(is);
1136
1137         /* skip or repeat frame. We take into account the
1138            delay to compute the threshold. I still don't know
1139            if it is the best guess */
1140         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1141         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1142             if (diff <= -sync_threshold)
1143                 delay = 0;
1144             else if (diff >= sync_threshold)
1145                 delay = 2 * delay;
1146         }
1147     }
1148     is->frame_timer += delay;
1149 #if defined(DEBUG_SYNC)
1150     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1151             delay, actual_delay, frame_current_pts, -diff);
1152 #endif
1153
1154     return is->frame_timer;
1155 }
1156
1157 /* called to display each frame */
1158 static void video_refresh_timer(void *opaque)
1159 {
1160     VideoState *is = opaque;
1161     VideoPicture *vp;
1162
1163     SubPicture *sp, *sp2;
1164
1165     if (is->video_st) {
1166 retry:
1167         if (is->pictq_size == 0) {
1168             //nothing to do, no picture to display in the que
1169         } else {
1170             double time= av_gettime()/1000000.0;
1171             double next_target;
1172             /* dequeue the picture */
1173             vp = &is->pictq[is->pictq_rindex];
1174
1175             if(time < vp->target_clock)
1176                 return;
1177             /* update current video pts */
1178             is->video_current_pts = vp->pts;
1179             is->video_current_pts_drift = is->video_current_pts - time;
1180             is->video_current_pos = vp->pos;
1181             if(is->pictq_size > 1){
1182                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1183                 assert(nextvp->target_clock >= vp->target_clock);
1184                 next_target= nextvp->target_clock;
1185             }else{
1186                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1187             }
1188             if(framedrop && time > next_target){
1189                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1190                 if(is->pictq_size > 1 || time > next_target + 0.5){
1191                     /* update queue size and signal for next picture */
1192                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1193                         is->pictq_rindex = 0;
1194
1195                     SDL_LockMutex(is->pictq_mutex);
1196                     is->pictq_size--;
1197                     SDL_CondSignal(is->pictq_cond);
1198                     SDL_UnlockMutex(is->pictq_mutex);
1199                     goto retry;
1200                 }
1201             }
1202
1203             if(is->subtitle_st) {
1204                 if (is->subtitle_stream_changed) {
1205                     SDL_LockMutex(is->subpq_mutex);
1206
1207                     while (is->subpq_size) {
1208                         free_subpicture(&is->subpq[is->subpq_rindex]);
1209
1210                         /* update queue size and signal for next picture */
1211                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212                             is->subpq_rindex = 0;
1213
1214                         is->subpq_size--;
1215                     }
1216                     is->subtitle_stream_changed = 0;
1217
1218                     SDL_CondSignal(is->subpq_cond);
1219                     SDL_UnlockMutex(is->subpq_mutex);
1220                 } else {
1221                     if (is->subpq_size > 0) {
1222                         sp = &is->subpq[is->subpq_rindex];
1223
1224                         if (is->subpq_size > 1)
1225                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226                         else
1227                             sp2 = NULL;
1228
1229                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231                         {
1232                             free_subpicture(sp);
1233
1234                             /* update queue size and signal for next picture */
1235                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236                                 is->subpq_rindex = 0;
1237
1238                             SDL_LockMutex(is->subpq_mutex);
1239                             is->subpq_size--;
1240                             SDL_CondSignal(is->subpq_cond);
1241                             SDL_UnlockMutex(is->subpq_mutex);
1242                         }
1243                     }
1244                 }
1245             }
1246
1247             /* display picture */
1248             video_display(is);
1249
1250             /* update queue size and signal for next picture */
1251             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1252                 is->pictq_rindex = 0;
1253
1254             SDL_LockMutex(is->pictq_mutex);
1255             is->pictq_size--;
1256             SDL_CondSignal(is->pictq_cond);
1257             SDL_UnlockMutex(is->pictq_mutex);
1258         }
1259     } else if (is->audio_st) {
1260         /* draw the next audio frame */
1261
1262         /* if only audio stream, then display the audio bars (better
1263            than nothing, just to test the implementation */
1264
1265         /* display picture */
1266         video_display(is);
1267     }
1268     if (show_status) {
1269         static int64_t last_time;
1270         int64_t cur_time;
1271         int aqsize, vqsize, sqsize;
1272         double av_diff;
1273
1274         cur_time = av_gettime();
1275         if (!last_time || (cur_time - last_time) >= 30000) {
1276             aqsize = 0;
1277             vqsize = 0;
1278             sqsize = 0;
1279             if (is->audio_st)
1280                 aqsize = is->audioq.size;
1281             if (is->video_st)
1282                 vqsize = is->videoq.size;
1283             if (is->subtitle_st)
1284                 sqsize = is->subtitleq.size;
1285             av_diff = 0;
1286             if (is->audio_st && is->video_st)
1287                 av_diff = get_audio_clock(is) - get_video_clock(is);
1288             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1290             fflush(stdout);
1291             last_time = cur_time;
1292         }
1293     }
1294 }
1295
1296 static void stream_close(VideoState *is)
1297 {
1298     VideoPicture *vp;
1299     int i;
1300     /* XXX: use a special url_shutdown call to abort parse cleanly */
1301     is->abort_request = 1;
1302     SDL_WaitThread(is->parse_tid, NULL);
1303     SDL_WaitThread(is->refresh_tid, NULL);
1304
1305     /* free all pictures */
1306     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1307         vp = &is->pictq[i];
1308 #if CONFIG_AVFILTER
1309         if (vp->picref) {
1310             avfilter_unref_buffer(vp->picref);
1311             vp->picref = NULL;
1312         }
1313 #endif
1314         if (vp->bmp) {
1315             SDL_FreeYUVOverlay(vp->bmp);
1316             vp->bmp = NULL;
1317         }
1318     }
1319     SDL_DestroyMutex(is->pictq_mutex);
1320     SDL_DestroyCond(is->pictq_cond);
1321     SDL_DestroyMutex(is->subpq_mutex);
1322     SDL_DestroyCond(is->subpq_cond);
1323 #if !CONFIG_AVFILTER
1324     if (is->img_convert_ctx)
1325         sws_freeContext(is->img_convert_ctx);
1326 #endif
1327     av_free(is);
1328 }
1329
1330 static void do_exit(void)
1331 {
1332     int i;
1333     if (cur_stream) {
1334         stream_close(cur_stream);
1335         cur_stream = NULL;
1336     }
1337     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
1338         av_free(avcodec_opts[i]);
1339     av_free(avformat_opts);
1340     av_free(sws_opts);
1341 #if CONFIG_AVFILTER
1342     avfilter_uninit();
1343 #endif
1344     if (show_status)
1345         printf("\n");
1346     SDL_Quit();
1347     av_log(NULL, AV_LOG_QUIET, "");
1348     exit(0);
1349 }
1350
1351 /* allocate a picture (needs to do that in main thread to avoid
1352    potential locking problems */
1353 static void alloc_picture(void *opaque)
1354 {
1355     VideoState *is = opaque;
1356     VideoPicture *vp;
1357
1358     vp = &is->pictq[is->pictq_windex];
1359
1360     if (vp->bmp)
1361         SDL_FreeYUVOverlay(vp->bmp);
1362
1363 #if CONFIG_AVFILTER
1364     if (vp->picref)
1365         avfilter_unref_buffer(vp->picref);
1366     vp->picref = NULL;
1367
1368     vp->width   = is->out_video_filter->inputs[0]->w;
1369     vp->height  = is->out_video_filter->inputs[0]->h;
1370     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1371 #else
1372     vp->width   = is->video_st->codec->width;
1373     vp->height  = is->video_st->codec->height;
1374     vp->pix_fmt = is->video_st->codec->pix_fmt;
1375 #endif
1376
1377     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1378                                    SDL_YV12_OVERLAY,
1379                                    screen);
1380     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1381         /* SDL allocates a buffer smaller than requested if the video
1382          * overlay hardware is unable to support the requested size. */
1383         fprintf(stderr, "Error: the video system does not support an image\n"
1384                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1385                         "to reduce the image size.\n", vp->width, vp->height );
1386         do_exit();
1387     }
1388
1389     SDL_LockMutex(is->pictq_mutex);
1390     vp->allocated = 1;
1391     SDL_CondSignal(is->pictq_cond);
1392     SDL_UnlockMutex(is->pictq_mutex);
1393 }
1394
1395 /**
1396  *
1397  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1398  */
1399 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1400 {
1401     VideoPicture *vp;
1402     int dst_pix_fmt;
1403 #if CONFIG_AVFILTER
1404     AVPicture pict_src;
1405 #endif
1406     /* wait until we have space to put a new picture */
1407     SDL_LockMutex(is->pictq_mutex);
1408
1409     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1410         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1411
1412     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1413            !is->videoq.abort_request) {
1414         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1415     }
1416     SDL_UnlockMutex(is->pictq_mutex);
1417
1418     if (is->videoq.abort_request)
1419         return -1;
1420
1421     vp = &is->pictq[is->pictq_windex];
1422
1423     /* alloc or resize hardware picture buffer */
1424     if (!vp->bmp ||
1425 #if CONFIG_AVFILTER
1426         vp->width  != is->out_video_filter->inputs[0]->w ||
1427         vp->height != is->out_video_filter->inputs[0]->h) {
1428 #else
1429         vp->width != is->video_st->codec->width ||
1430         vp->height != is->video_st->codec->height) {
1431 #endif
1432         SDL_Event event;
1433
1434         vp->allocated = 0;
1435
1436         /* the allocation must be done in the main thread to avoid
1437            locking problems */
1438         event.type = FF_ALLOC_EVENT;
1439         event.user.data1 = is;
1440         SDL_PushEvent(&event);
1441
1442         /* wait until the picture is allocated */
1443         SDL_LockMutex(is->pictq_mutex);
1444         while (!vp->allocated && !is->videoq.abort_request) {
1445             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1446         }
1447         SDL_UnlockMutex(is->pictq_mutex);
1448
1449         if (is->videoq.abort_request)
1450             return -1;
1451     }
1452
1453     /* if the frame is not skipped, then display it */
1454     if (vp->bmp) {
1455         AVPicture pict;
1456 #if CONFIG_AVFILTER
1457         if(vp->picref)
1458             avfilter_unref_buffer(vp->picref);
1459         vp->picref = src_frame->opaque;
1460 #endif
1461
1462         /* get a pointer on the bitmap */
1463         SDL_LockYUVOverlay (vp->bmp);
1464
1465         dst_pix_fmt = PIX_FMT_YUV420P;
1466         memset(&pict,0,sizeof(AVPicture));
1467         pict.data[0] = vp->bmp->pixels[0];
1468         pict.data[1] = vp->bmp->pixels[2];
1469         pict.data[2] = vp->bmp->pixels[1];
1470
1471         pict.linesize[0] = vp->bmp->pitches[0];
1472         pict.linesize[1] = vp->bmp->pitches[2];
1473         pict.linesize[2] = vp->bmp->pitches[1];
1474
1475 #if CONFIG_AVFILTER
1476         pict_src.data[0] = src_frame->data[0];
1477         pict_src.data[1] = src_frame->data[1];
1478         pict_src.data[2] = src_frame->data[2];
1479
1480         pict_src.linesize[0] = src_frame->linesize[0];
1481         pict_src.linesize[1] = src_frame->linesize[1];
1482         pict_src.linesize[2] = src_frame->linesize[2];
1483
1484         //FIXME use direct rendering
1485         av_picture_copy(&pict, &pict_src,
1486                         vp->pix_fmt, vp->width, vp->height);
1487 #else
1488         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1489         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1490             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1491             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1492         if (is->img_convert_ctx == NULL) {
1493             fprintf(stderr, "Cannot initialize the conversion context\n");
1494             exit(1);
1495         }
1496         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1497                   0, vp->height, pict.data, pict.linesize);
1498 #endif
1499         /* update the bitmap content */
1500         SDL_UnlockYUVOverlay(vp->bmp);
1501
1502         vp->pts = pts;
1503         vp->pos = pos;
1504
1505         /* now we can update the picture count */
1506         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1507             is->pictq_windex = 0;
1508         SDL_LockMutex(is->pictq_mutex);
1509         vp->target_clock= compute_target_time(vp->pts, is);
1510
1511         is->pictq_size++;
1512         SDL_UnlockMutex(is->pictq_mutex);
1513     }
1514     return 0;
1515 }
1516
1517 /**
1518  * compute the exact PTS for the picture if it is omitted in the stream
1519  * @param pts1 the dts of the pkt / pts of the frame
1520  */
1521 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1522 {
1523     double frame_delay, pts;
1524
1525     pts = pts1;
1526
1527     if (pts != 0) {
1528         /* update video clock with pts, if present */
1529         is->video_clock = pts;
1530     } else {
1531         pts = is->video_clock;
1532     }
1533     /* update video clock for next frame */
1534     frame_delay = av_q2d(is->video_st->codec->time_base);
1535     /* for MPEG2, the frame can be repeated, so we update the
1536        clock accordingly */
1537     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1538     is->video_clock += frame_delay;
1539
1540 #if defined(DEBUG_SYNC) && 0
1541     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1542            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1543 #endif
1544     return queue_picture(is, src_frame, pts, pos);
1545 }
1546
1547 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1548 {
1549     int len1, got_picture, i;
1550
1551         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1552             return -1;
1553
1554         if(pkt->data == flush_pkt.data){
1555             avcodec_flush_buffers(is->video_st->codec);
1556
1557             SDL_LockMutex(is->pictq_mutex);
1558             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1559             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1560                 is->pictq[i].target_clock= 0;
1561             }
1562             while (is->pictq_size && !is->videoq.abort_request) {
1563                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1564             }
1565             is->video_current_pos= -1;
1566             SDL_UnlockMutex(is->pictq_mutex);
1567
1568             is->last_dts_for_fault_detection=
1569             is->last_pts_for_fault_detection= INT64_MIN;
1570             is->frame_last_pts= AV_NOPTS_VALUE;
1571             is->frame_last_delay = 0;
1572             is->frame_timer = (double)av_gettime() / 1000000.0;
1573             is->skip_frames= 1;
1574             is->skip_frames_index= 0;
1575             return 0;
1576         }
1577
1578         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1579            this packet, if any */
1580         is->video_st->codec->reordered_opaque= pkt->pts;
1581         len1 = avcodec_decode_video2(is->video_st->codec,
1582                                     frame, &got_picture,
1583                                     pkt);
1584
1585         if (got_picture) {
1586             if(pkt->dts != AV_NOPTS_VALUE){
1587                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1588                 is->last_dts_for_fault_detection= pkt->dts;
1589             }
1590             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1591                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1592                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1593             }
1594         }
1595
1596         if(   (   decoder_reorder_pts==1
1597                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1598                || pkt->dts == AV_NOPTS_VALUE)
1599            && frame->reordered_opaque != AV_NOPTS_VALUE)
1600             *pts= frame->reordered_opaque;
1601         else if(pkt->dts != AV_NOPTS_VALUE)
1602             *pts= pkt->dts;
1603         else
1604             *pts= 0;
1605
1606 //            if (len1 < 0)
1607 //                break;
1608     if (got_picture){
1609         is->skip_frames_index += 1;
1610         if(is->skip_frames_index >= is->skip_frames){
1611             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1612             return 1;
1613         }
1614
1615     }
1616     return 0;
1617 }
1618
1619 #if CONFIG_AVFILTER
1620 typedef struct {
1621     VideoState *is;
1622     AVFrame *frame;
1623     int use_dr1;
1624 } FilterPriv;
1625
1626 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1627 {
1628     AVFilterContext *ctx = codec->opaque;
1629     AVFilterBufferRef  *ref;
1630     int perms = AV_PERM_WRITE;
1631     int i, w, h, stride[4];
1632     unsigned edge;
1633
1634     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1635         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1636         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1637         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1638     }
1639     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1640
1641     w = codec->width;
1642     h = codec->height;
1643     avcodec_align_dimensions2(codec, &w, &h, stride);
1644     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1645     w += edge << 1;
1646     h += edge << 1;
1647
1648     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1649         return -1;
1650
1651     ref->video->w = codec->width;
1652     ref->video->h = codec->height;
1653     for(i = 0; i < 4; i ++) {
1654         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1655         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1656
1657         if (ref->data[i]) {
1658             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1659         }
1660         pic->data[i]     = ref->data[i];
1661         pic->linesize[i] = ref->linesize[i];
1662     }
1663     pic->opaque = ref;
1664     pic->age    = INT_MAX;
1665     pic->type   = FF_BUFFER_TYPE_USER;
1666     pic->reordered_opaque = codec->reordered_opaque;
1667     return 0;
1668 }
1669
1670 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1671 {
1672     memset(pic->data, 0, sizeof(pic->data));
1673     avfilter_unref_buffer(pic->opaque);
1674 }
1675
1676 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1677 {
1678     AVFilterBufferRef *ref = pic->opaque;
1679
1680     if (pic->data[0] == NULL) {
1681         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1682         return codec->get_buffer(codec, pic);
1683     }
1684
1685     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1686         (codec->pix_fmt != ref->format)) {
1687         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1688         return -1;
1689     }
1690
1691     pic->reordered_opaque = codec->reordered_opaque;
1692     return 0;
1693 }
1694
1695 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1696 {
1697     FilterPriv *priv = ctx->priv;
1698     AVCodecContext *codec;
1699     if(!opaque) return -1;
1700
1701     priv->is = opaque;
1702     codec    = priv->is->video_st->codec;
1703     codec->opaque = ctx;
1704     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1705         priv->use_dr1 = 1;
1706         codec->get_buffer     = input_get_buffer;
1707         codec->release_buffer = input_release_buffer;
1708         codec->reget_buffer   = input_reget_buffer;
1709     }
1710
1711     priv->frame = avcodec_alloc_frame();
1712
1713     return 0;
1714 }
1715
1716 static void input_uninit(AVFilterContext *ctx)
1717 {
1718     FilterPriv *priv = ctx->priv;
1719     av_free(priv->frame);
1720 }
1721
1722 static int input_request_frame(AVFilterLink *link)
1723 {
1724     FilterPriv *priv = link->src->priv;
1725     AVFilterBufferRef *picref;
1726     int64_t pts = 0;
1727     AVPacket pkt;
1728     int ret;
1729
1730     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1731         av_free_packet(&pkt);
1732     if (ret < 0)
1733         return -1;
1734
1735     if(priv->use_dr1) {
1736         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1737     } else {
1738         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1739         av_image_copy(picref->data, picref->linesize,
1740                       priv->frame->data, priv->frame->linesize,
1741                       picref->format, link->w, link->h);
1742     }
1743     av_free_packet(&pkt);
1744
1745     picref->pts = pts;
1746     picref->pos = pkt.pos;
1747     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1748     avfilter_start_frame(link, picref);
1749     avfilter_draw_slice(link, 0, link->h, 1);
1750     avfilter_end_frame(link);
1751
1752     return 0;
1753 }
1754
1755 static int input_query_formats(AVFilterContext *ctx)
1756 {
1757     FilterPriv *priv = ctx->priv;
1758     enum PixelFormat pix_fmts[] = {
1759         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1760     };
1761
1762     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1763     return 0;
1764 }
1765
1766 static int input_config_props(AVFilterLink *link)
1767 {
1768     FilterPriv *priv  = link->src->priv;
1769     AVCodecContext *c = priv->is->video_st->codec;
1770
1771     link->w = c->width;
1772     link->h = c->height;
1773
1774     return 0;
1775 }
1776
1777 static AVFilter input_filter =
1778 {
1779     .name      = "ffplay_input",
1780
1781     .priv_size = sizeof(FilterPriv),
1782
1783     .init      = input_init,
1784     .uninit    = input_uninit,
1785
1786     .query_formats = input_query_formats,
1787
1788     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1789     .outputs   = (AVFilterPad[]) {{ .name = "default",
1790                                     .type = AVMEDIA_TYPE_VIDEO,
1791                                     .request_frame = input_request_frame,
1792                                     .config_props  = input_config_props, },
1793                                   { .name = NULL }},
1794 };
1795
1796 static void output_end_frame(AVFilterLink *link)
1797 {
1798 }
1799
1800 static int output_query_formats(AVFilterContext *ctx)
1801 {
1802     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1803
1804     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1805     return 0;
1806 }
1807
1808 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1809                                     int64_t *pts, int64_t *pos)
1810 {
1811     AVFilterBufferRef *pic;
1812
1813     if(avfilter_request_frame(ctx->inputs[0]))
1814         return -1;
1815     if(!(pic = ctx->inputs[0]->cur_buf))
1816         return -1;
1817     ctx->inputs[0]->cur_buf = NULL;
1818
1819     frame->opaque = pic;
1820     *pts          = pic->pts;
1821     *pos          = pic->pos;
1822
1823     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1824     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1825
1826     return 1;
1827 }
1828
1829 static AVFilter output_filter =
1830 {
1831     .name      = "ffplay_output",
1832
1833     .query_formats = output_query_formats,
1834
1835     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1836                                     .type          = AVMEDIA_TYPE_VIDEO,
1837                                     .end_frame     = output_end_frame,
1838                                     .min_perms     = AV_PERM_READ, },
1839                                   { .name = NULL }},
1840     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1841 };
1842 #endif  /* CONFIG_AVFILTER */
1843
1844 static int video_thread(void *arg)
1845 {
1846     VideoState *is = arg;
1847     AVFrame *frame= avcodec_alloc_frame();
1848     int64_t pts_int;
1849     double pts;
1850     int ret;
1851
1852 #if CONFIG_AVFILTER
1853     int64_t pos;
1854     char sws_flags_str[128];
1855     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1856     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1857     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1858     graph->scale_sws_opts = av_strdup(sws_flags_str);
1859
1860     if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1861     if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1862
1863     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1864     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1865
1866
1867     if(vfilters) {
1868         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1869         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1870
1871         outputs->name    = av_strdup("in");
1872         outputs->filter  = filt_src;
1873         outputs->pad_idx = 0;
1874         outputs->next    = NULL;
1875
1876         inputs->name    = av_strdup("out");
1877         inputs->filter  = filt_out;
1878         inputs->pad_idx = 0;
1879         inputs->next    = NULL;
1880
1881         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1882             goto the_end;
1883         av_freep(&vfilters);
1884     } else {
1885         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1886     }
1887     avfilter_graph_add_filter(graph, filt_src);
1888     avfilter_graph_add_filter(graph, filt_out);
1889
1890     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1891     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1892     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1893
1894     is->out_video_filter = filt_out;
1895 #endif
1896
1897     for(;;) {
1898 #if !CONFIG_AVFILTER
1899         AVPacket pkt;
1900 #endif
1901         while (is->paused && !is->videoq.abort_request)
1902             SDL_Delay(10);
1903 #if CONFIG_AVFILTER
1904         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1905 #else
1906         ret = get_video_frame(is, frame, &pts_int, &pkt);
1907 #endif
1908
1909         if (ret < 0) goto the_end;
1910
1911         if (!ret)
1912             continue;
1913
1914         pts = pts_int*av_q2d(is->video_st->time_base);
1915
1916 #if CONFIG_AVFILTER
1917         ret = output_picture2(is, frame, pts, pos);
1918 #else
1919         ret = output_picture2(is, frame, pts,  pkt.pos);
1920         av_free_packet(&pkt);
1921 #endif
1922         if (ret < 0)
1923             goto the_end;
1924
1925         if (step)
1926             if (cur_stream)
1927                 stream_pause(cur_stream);
1928     }
1929  the_end:
1930 #if CONFIG_AVFILTER
1931     avfilter_graph_destroy(graph);
1932     av_freep(&graph);
1933 #endif
1934     av_free(frame);
1935     return 0;
1936 }
1937
1938 static int subtitle_thread(void *arg)
1939 {
1940     VideoState *is = arg;
1941     SubPicture *sp;
1942     AVPacket pkt1, *pkt = &pkt1;
1943     int len1, got_subtitle;
1944     double pts;
1945     int i, j;
1946     int r, g, b, y, u, v, a;
1947
1948     for(;;) {
1949         while (is->paused && !is->subtitleq.abort_request) {
1950             SDL_Delay(10);
1951         }
1952         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1953             break;
1954
1955         if(pkt->data == flush_pkt.data){
1956             avcodec_flush_buffers(is->subtitle_st->codec);
1957             continue;
1958         }
1959         SDL_LockMutex(is->subpq_mutex);
1960         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1961                !is->subtitleq.abort_request) {
1962             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1963         }
1964         SDL_UnlockMutex(is->subpq_mutex);
1965
1966         if (is->subtitleq.abort_request)
1967             goto the_end;
1968
1969         sp = &is->subpq[is->subpq_windex];
1970
1971        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1972            this packet, if any */
1973         pts = 0;
1974         if (pkt->pts != AV_NOPTS_VALUE)
1975             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1976
1977         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1978                                     &sp->sub, &got_subtitle,
1979                                     pkt);
1980 //            if (len1 < 0)
1981 //                break;
1982         if (got_subtitle && sp->sub.format == 0) {
1983             sp->pts = pts;
1984
1985             for (i = 0; i < sp->sub.num_rects; i++)
1986             {
1987                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1988                 {
1989                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1990                     y = RGB_TO_Y_CCIR(r, g, b);
1991                     u = RGB_TO_U_CCIR(r, g, b, 0);
1992                     v = RGB_TO_V_CCIR(r, g, b, 0);
1993                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1994                 }
1995             }
1996
1997             /* now we can update the picture count */
1998             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1999                 is->subpq_windex = 0;
2000             SDL_LockMutex(is->subpq_mutex);
2001             is->subpq_size++;
2002             SDL_UnlockMutex(is->subpq_mutex);
2003         }
2004         av_free_packet(pkt);
2005 //        if (step)
2006 //            if (cur_stream)
2007 //                stream_pause(cur_stream);
2008     }
2009  the_end:
2010     return 0;
2011 }
2012
2013 /* copy samples for viewing in editor window */
2014 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2015 {
2016     int size, len, channels;
2017
2018     channels = is->audio_st->codec->channels;
2019
2020     size = samples_size / sizeof(short);
2021     while (size > 0) {
2022         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2023         if (len > size)
2024             len = size;
2025         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2026         samples += len;
2027         is->sample_array_index += len;
2028         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2029             is->sample_array_index = 0;
2030         size -= len;
2031     }
2032 }
2033
2034 /* return the new audio buffer size (samples can be added or deleted
2035    to get better sync if video or external master clock) */
2036 static int synchronize_audio(VideoState *is, short *samples,
2037                              int samples_size1, double pts)
2038 {
2039     int n, samples_size;
2040     double ref_clock;
2041
2042     n = 2 * is->audio_st->codec->channels;
2043     samples_size = samples_size1;
2044
2045     /* if not master, then we try to remove or add samples to correct the clock */
2046     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2047          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2048         double diff, avg_diff;
2049         int wanted_size, min_size, max_size, nb_samples;
2050
2051         ref_clock = get_master_clock(is);
2052         diff = get_audio_clock(is) - ref_clock;
2053
2054         if (diff < AV_NOSYNC_THRESHOLD) {
2055             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2056             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2057                 /* not enough measures to have a correct estimate */
2058                 is->audio_diff_avg_count++;
2059             } else {
2060                 /* estimate the A-V difference */
2061                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2062
2063                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2064                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2065                     nb_samples = samples_size / n;
2066
2067                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2068                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2069                     if (wanted_size < min_size)
2070                         wanted_size = min_size;
2071                     else if (wanted_size > max_size)
2072                         wanted_size = max_size;
2073
2074                     /* add or remove samples to correction the synchro */
2075                     if (wanted_size < samples_size) {
2076                         /* remove samples */
2077                         samples_size = wanted_size;
2078                     } else if (wanted_size > samples_size) {
2079                         uint8_t *samples_end, *q;
2080                         int nb;
2081
2082                         /* add samples */
2083                         nb = (samples_size - wanted_size);
2084                         samples_end = (uint8_t *)samples + samples_size - n;
2085                         q = samples_end + n;
2086                         while (nb > 0) {
2087                             memcpy(q, samples_end, n);
2088                             q += n;
2089                             nb -= n;
2090                         }
2091                         samples_size = wanted_size;
2092                     }
2093                 }
2094 #if 0
2095                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2096                        diff, avg_diff, samples_size - samples_size1,
2097                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2098 #endif
2099             }
2100         } else {
2101             /* too big difference : may be initial PTS errors, so
2102                reset A-V filter */
2103             is->audio_diff_avg_count = 0;
2104             is->audio_diff_cum = 0;
2105         }
2106     }
2107
2108     return samples_size;
2109 }
2110
2111 /* decode one audio frame and returns its uncompressed size */
2112 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2113 {
2114     AVPacket *pkt_temp = &is->audio_pkt_temp;
2115     AVPacket *pkt = &is->audio_pkt;
2116     AVCodecContext *dec= is->audio_st->codec;
2117     int n, len1, data_size;
2118     double pts;
2119
2120     for(;;) {
2121         /* NOTE: the audio packet can contain several frames */
2122         while (pkt_temp->size > 0) {
2123             data_size = sizeof(is->audio_buf1);
2124             len1 = avcodec_decode_audio3(dec,
2125                                         (int16_t *)is->audio_buf1, &data_size,
2126                                         pkt_temp);
2127             if (len1 < 0) {
2128                 /* if error, we skip the frame */
2129                 pkt_temp->size = 0;
2130                 break;
2131             }
2132
2133             pkt_temp->data += len1;
2134             pkt_temp->size -= len1;
2135             if (data_size <= 0)
2136                 continue;
2137
2138             if (dec->sample_fmt != is->audio_src_fmt) {
2139                 if (is->reformat_ctx)
2140                     av_audio_convert_free(is->reformat_ctx);
2141                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2142                                                          dec->sample_fmt, 1, NULL, 0);
2143                 if (!is->reformat_ctx) {
2144                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2145                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2146                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2147                         break;
2148                 }
2149                 is->audio_src_fmt= dec->sample_fmt;
2150             }
2151
2152             if (is->reformat_ctx) {
2153                 const void *ibuf[6]= {is->audio_buf1};
2154                 void *obuf[6]= {is->audio_buf2};
2155                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2156                 int ostride[6]= {2};
2157                 int len= data_size/istride[0];
2158                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2159                     printf("av_audio_convert() failed\n");
2160                     break;
2161                 }
2162                 is->audio_buf= is->audio_buf2;
2163                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2164                           remove this legacy cruft */
2165                 data_size= len*2;
2166             }else{
2167                 is->audio_buf= is->audio_buf1;
2168             }
2169
2170             /* if no pts, then compute it */
2171             pts = is->audio_clock;
2172             *pts_ptr = pts;
2173             n = 2 * dec->channels;
2174             is->audio_clock += (double)data_size /
2175                 (double)(n * dec->sample_rate);
2176 #if defined(DEBUG_SYNC)
2177             {
2178                 static double last_clock;
2179                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2180                        is->audio_clock - last_clock,
2181                        is->audio_clock, pts);
2182                 last_clock = is->audio_clock;
2183             }
2184 #endif
2185             return data_size;
2186         }
2187
2188         /* free the current packet */
2189         if (pkt->data)
2190             av_free_packet(pkt);
2191
2192         if (is->paused || is->audioq.abort_request) {
2193             return -1;
2194         }
2195
2196         /* read next packet */
2197         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2198             return -1;
2199         if(pkt->data == flush_pkt.data){
2200             avcodec_flush_buffers(dec);
2201             continue;
2202         }
2203
2204         pkt_temp->data = pkt->data;
2205         pkt_temp->size = pkt->size;
2206
2207         /* if update the audio clock with the pts */
2208         if (pkt->pts != AV_NOPTS_VALUE) {
2209             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2210         }
2211     }
2212 }
2213
2214 /* get the current audio output buffer size, in samples. With SDL, we
2215    cannot have a precise information */
2216 static int audio_write_get_buf_size(VideoState *is)
2217 {
2218     return is->audio_buf_size - is->audio_buf_index;
2219 }
2220
2221
2222 /* prepare a new audio buffer */
2223 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2224 {
2225     VideoState *is = opaque;
2226     int audio_size, len1;
2227     double pts;
2228
2229     audio_callback_time = av_gettime();
2230
2231     while (len > 0) {
2232         if (is->audio_buf_index >= is->audio_buf_size) {
2233            audio_size = audio_decode_frame(is, &pts);
2234            if (audio_size < 0) {
2235                 /* if error, just output silence */
2236                is->audio_buf = is->audio_buf1;
2237                is->audio_buf_size = 1024;
2238                memset(is->audio_buf, 0, is->audio_buf_size);
2239            } else {
2240                if (is->show_audio)
2241                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2242                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2243                                               pts);
2244                is->audio_buf_size = audio_size;
2245            }
2246            is->audio_buf_index = 0;
2247         }
2248         len1 = is->audio_buf_size - is->audio_buf_index;
2249         if (len1 > len)
2250             len1 = len;
2251         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2252         len -= len1;
2253         stream += len1;
2254         is->audio_buf_index += len1;
2255     }
2256 }
2257
2258 /* open a given stream. Return 0 if OK */
2259 static int stream_component_open(VideoState *is, int stream_index)
2260 {
2261     AVFormatContext *ic = is->ic;
2262     AVCodecContext *avctx;
2263     AVCodec *codec;
2264     SDL_AudioSpec wanted_spec, spec;
2265
2266     if (stream_index < 0 || stream_index >= ic->nb_streams)
2267         return -1;
2268     avctx = ic->streams[stream_index]->codec;
2269
2270     /* prepare audio output */
2271     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2272         if (avctx->channels > 0) {
2273             avctx->request_channels = FFMIN(2, avctx->channels);
2274         } else {
2275             avctx->request_channels = 2;
2276         }
2277     }
2278
2279     codec = avcodec_find_decoder(avctx->codec_id);
2280     avctx->debug_mv = debug_mv;
2281     avctx->debug = debug;
2282     avctx->workaround_bugs = workaround_bugs;
2283     avctx->lowres = lowres;
2284     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2285     avctx->idct_algo= idct;
2286     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2287     avctx->skip_frame= skip_frame;
2288     avctx->skip_idct= skip_idct;
2289     avctx->skip_loop_filter= skip_loop_filter;
2290     avctx->error_recognition= error_recognition;
2291     avctx->error_concealment= error_concealment;
2292     avcodec_thread_init(avctx, thread_count);
2293
2294     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2295
2296     if (!codec ||
2297         avcodec_open(avctx, codec) < 0)
2298         return -1;
2299
2300     /* prepare audio output */
2301     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2302         wanted_spec.freq = avctx->sample_rate;
2303         wanted_spec.format = AUDIO_S16SYS;
2304         wanted_spec.channels = avctx->channels;
2305         wanted_spec.silence = 0;
2306         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2307         wanted_spec.callback = sdl_audio_callback;
2308         wanted_spec.userdata = is;
2309         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2310             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2311             return -1;
2312         }
2313         is->audio_hw_buf_size = spec.size;
2314         is->audio_src_fmt= SAMPLE_FMT_S16;
2315     }
2316
2317     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2318     switch(avctx->codec_type) {
2319     case AVMEDIA_TYPE_AUDIO:
2320         is->audio_stream = stream_index;
2321         is->audio_st = ic->streams[stream_index];
2322         is->audio_buf_size = 0;
2323         is->audio_buf_index = 0;
2324
2325         /* init averaging filter */
2326         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2327         is->audio_diff_avg_count = 0;
2328         /* since we do not have a precise anough audio fifo fullness,
2329            we correct audio sync only if larger than this threshold */
2330         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2331
2332         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2333         packet_queue_init(&is->audioq);
2334         SDL_PauseAudio(0);
2335         break;
2336     case AVMEDIA_TYPE_VIDEO:
2337         is->video_stream = stream_index;
2338         is->video_st = ic->streams[stream_index];
2339
2340 //        is->video_current_pts_time = av_gettime();
2341
2342         packet_queue_init(&is->videoq);
2343         is->video_tid = SDL_CreateThread(video_thread, is);
2344         break;
2345     case AVMEDIA_TYPE_SUBTITLE:
2346         is->subtitle_stream = stream_index;
2347         is->subtitle_st = ic->streams[stream_index];
2348         packet_queue_init(&is->subtitleq);
2349
2350         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2351         break;
2352     default:
2353         break;
2354     }
2355     return 0;
2356 }
2357
2358 static void stream_component_close(VideoState *is, int stream_index)
2359 {
2360     AVFormatContext *ic = is->ic;
2361     AVCodecContext *avctx;
2362
2363     if (stream_index < 0 || stream_index >= ic->nb_streams)
2364         return;
2365     avctx = ic->streams[stream_index]->codec;
2366
2367     switch(avctx->codec_type) {
2368     case AVMEDIA_TYPE_AUDIO:
2369         packet_queue_abort(&is->audioq);
2370
2371         SDL_CloseAudio();
2372
2373         packet_queue_end(&is->audioq);
2374         if (is->reformat_ctx)
2375             av_audio_convert_free(is->reformat_ctx);
2376         is->reformat_ctx = NULL;
2377         break;
2378     case AVMEDIA_TYPE_VIDEO:
2379         packet_queue_abort(&is->videoq);
2380
2381         /* note: we also signal this mutex to make sure we deblock the
2382            video thread in all cases */
2383         SDL_LockMutex(is->pictq_mutex);
2384         SDL_CondSignal(is->pictq_cond);
2385         SDL_UnlockMutex(is->pictq_mutex);
2386
2387         SDL_WaitThread(is->video_tid, NULL);
2388
2389         packet_queue_end(&is->videoq);
2390         break;
2391     case AVMEDIA_TYPE_SUBTITLE:
2392         packet_queue_abort(&is->subtitleq);
2393
2394         /* note: we also signal this mutex to make sure we deblock the
2395            video thread in all cases */
2396         SDL_LockMutex(is->subpq_mutex);
2397         is->subtitle_stream_changed = 1;
2398
2399         SDL_CondSignal(is->subpq_cond);
2400         SDL_UnlockMutex(is->subpq_mutex);
2401
2402         SDL_WaitThread(is->subtitle_tid, NULL);
2403
2404         packet_queue_end(&is->subtitleq);
2405         break;
2406     default:
2407         break;
2408     }
2409
2410     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2411     avcodec_close(avctx);
2412     switch(avctx->codec_type) {
2413     case AVMEDIA_TYPE_AUDIO:
2414         is->audio_st = NULL;
2415         is->audio_stream = -1;
2416         break;
2417     case AVMEDIA_TYPE_VIDEO:
2418         is->video_st = NULL;
2419         is->video_stream = -1;
2420         break;
2421     case AVMEDIA_TYPE_SUBTITLE:
2422         is->subtitle_st = NULL;
2423         is->subtitle_stream = -1;
2424         break;
2425     default:
2426         break;
2427     }
2428 }
2429
2430 /* since we have only one decoding thread, we can use a global
2431    variable instead of a thread local variable */
2432 static VideoState *global_video_state;
2433
2434 static int decode_interrupt_cb(void)
2435 {
2436     return (global_video_state && global_video_state->abort_request);
2437 }
2438
2439 /* this thread gets the stream from the disk or the network */
2440 static int decode_thread(void *arg)
2441 {
2442     VideoState *is = arg;
2443     AVFormatContext *ic;
2444     int err, i, ret;
2445     int st_index[AVMEDIA_TYPE_NB];
2446     int st_count[AVMEDIA_TYPE_NB]={0};
2447     int st_best_packet_count[AVMEDIA_TYPE_NB];
2448     AVPacket pkt1, *pkt = &pkt1;
2449     AVFormatParameters params, *ap = &params;
2450     int eof=0;
2451     int pkt_in_play_range = 0;
2452
2453     ic = avformat_alloc_context();
2454
2455     memset(st_index, -1, sizeof(st_index));
2456     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2457     is->video_stream = -1;
2458     is->audio_stream = -1;
2459     is->subtitle_stream = -1;
2460
2461     global_video_state = is;
2462     url_set_interrupt_cb(decode_interrupt_cb);
2463
2464     memset(ap, 0, sizeof(*ap));
2465
2466     ap->prealloced_context = 1;
2467     ap->width = frame_width;
2468     ap->height= frame_height;
2469     ap->time_base= (AVRational){1, 25};
2470     ap->pix_fmt = frame_pix_fmt;
2471
2472     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2473
2474     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2475     if (err < 0) {
2476         print_error(is->filename, err);
2477         ret = -1;
2478         goto fail;
2479     }
2480     is->ic = ic;
2481
2482     if(genpts)
2483         ic->flags |= AVFMT_FLAG_GENPTS;
2484
2485     err = av_find_stream_info(ic);
2486     if (err < 0) {
2487         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2488         ret = -1;
2489         goto fail;
2490     }
2491     if(ic->pb)
2492         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2493
2494     if(seek_by_bytes<0)
2495         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2496
2497     /* if seeking requested, we execute it */
2498     if (start_time != AV_NOPTS_VALUE) {
2499         int64_t timestamp;
2500
2501         timestamp = start_time;
2502         /* add the stream start time */
2503         if (ic->start_time != AV_NOPTS_VALUE)
2504             timestamp += ic->start_time;
2505         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2506         if (ret < 0) {
2507             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2508                     is->filename, (double)timestamp / AV_TIME_BASE);
2509         }
2510     }
2511
2512     for(i = 0; i < ic->nb_streams; i++) {
2513         AVStream *st= ic->streams[i];
2514         AVCodecContext *avctx = st->codec;
2515         ic->streams[i]->discard = AVDISCARD_ALL;
2516         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2517             continue;
2518         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2519             continue;
2520
2521         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2522             continue;
2523         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2524
2525         switch(avctx->codec_type) {
2526         case AVMEDIA_TYPE_AUDIO:
2527             if (!audio_disable)
2528                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2529             break;
2530         case AVMEDIA_TYPE_VIDEO:
2531         case AVMEDIA_TYPE_SUBTITLE:
2532             if (!video_disable)
2533                 st_index[avctx->codec_type] = i;
2534             break;
2535         default:
2536             break;
2537         }
2538     }
2539     if (show_status) {
2540         dump_format(ic, 0, is->filename, 0);
2541     }
2542
2543     /* open the streams */
2544     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2545         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2546     }
2547
2548     ret=-1;
2549     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2550         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2551     }
2552     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2553     if(ret<0) {
2554         if (!display_disable)
2555             is->show_audio = 2;
2556     }
2557
2558     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2559         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2560     }
2561
2562     if (is->video_stream < 0 && is->audio_stream < 0) {
2563         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2564         ret = -1;
2565         goto fail;
2566     }
2567
2568     for(;;) {
2569         if (is->abort_request)
2570             break;
2571         if (is->paused != is->last_paused) {
2572             is->last_paused = is->paused;
2573             if (is->paused)
2574                 is->read_pause_return= av_read_pause(ic);
2575             else
2576                 av_read_play(ic);
2577         }
2578 #if CONFIG_RTSP_DEMUXER
2579         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2580             /* wait 10 ms to avoid trying to get another packet */
2581             /* XXX: horrible */
2582             SDL_Delay(10);
2583             continue;
2584         }
2585 #endif
2586         if (is->seek_req) {
2587             int64_t seek_target= is->seek_pos;
2588             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2589             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2590 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2591 //      of the seek_pos/seek_rel variables
2592
2593             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2594             if (ret < 0) {
2595                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2596             }else{
2597                 if (is->audio_stream >= 0) {
2598                     packet_queue_flush(&is->audioq);
2599                     packet_queue_put(&is->audioq, &flush_pkt);
2600                 }
2601                 if (is->subtitle_stream >= 0) {
2602                     packet_queue_flush(&is->subtitleq);
2603                     packet_queue_put(&is->subtitleq, &flush_pkt);
2604                 }
2605                 if (is->video_stream >= 0) {
2606                     packet_queue_flush(&is->videoq);
2607                     packet_queue_put(&is->videoq, &flush_pkt);
2608                 }
2609             }
2610             is->seek_req = 0;
2611             eof= 0;
2612         }
2613
2614         /* if the queue are full, no need to read more */
2615         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2616             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2617                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2618                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2619             /* wait 10 ms */
2620             SDL_Delay(10);
2621             continue;
2622         }
2623         if(url_feof(ic->pb) || eof) {
2624             if(is->video_stream >= 0){
2625                 av_init_packet(pkt);
2626                 pkt->data=NULL;
2627                 pkt->size=0;
2628                 pkt->stream_index= is->video_stream;
2629                 packet_queue_put(&is->videoq, pkt);
2630             }
2631             SDL_Delay(10);
2632             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2633                 if(loop!=1 && (!loop || --loop)){
2634                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2635                 }else if(autoexit){
2636                     ret=AVERROR_EOF;
2637                     goto fail;
2638                 }
2639             }
2640             continue;
2641         }
2642         ret = av_read_frame(ic, pkt);
2643         if (ret < 0) {
2644             if (ret == AVERROR_EOF)
2645                 eof=1;
2646             if (url_ferror(ic->pb))
2647                 break;
2648             SDL_Delay(100); /* wait for user event */
2649             continue;
2650         }
2651         /* check if packet is in play range specified by user, then queue, otherwise discard */
2652         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2653                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2654                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2655                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2656                 <= ((double)duration/1000000);
2657         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2658             packet_queue_put(&is->audioq, pkt);
2659         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2660             packet_queue_put(&is->videoq, pkt);
2661         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2662             packet_queue_put(&is->subtitleq, pkt);
2663         } else {
2664             av_free_packet(pkt);
2665         }
2666     }
2667     /* wait until the end */
2668     while (!is->abort_request) {
2669         SDL_Delay(100);
2670     }
2671
2672     ret = 0;
2673  fail:
2674     /* disable interrupting */
2675     global_video_state = NULL;
2676
2677     /* close each stream */
2678     if (is->audio_stream >= 0)
2679         stream_component_close(is, is->audio_stream);
2680     if (is->video_stream >= 0)
2681         stream_component_close(is, is->video_stream);
2682     if (is->subtitle_stream >= 0)
2683         stream_component_close(is, is->subtitle_stream);
2684     if (is->ic) {
2685         av_close_input_file(is->ic);
2686         is->ic = NULL; /* safety */
2687     }
2688     url_set_interrupt_cb(NULL);
2689
2690     if (ret != 0) {
2691         SDL_Event event;
2692
2693         event.type = FF_QUIT_EVENT;
2694         event.user.data1 = is;
2695         SDL_PushEvent(&event);
2696     }
2697     return 0;
2698 }
2699
2700 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2701 {
2702     VideoState *is;
2703
2704     is = av_mallocz(sizeof(VideoState));
2705     if (!is)
2706         return NULL;
2707     av_strlcpy(is->filename, filename, sizeof(is->filename));
2708     is->iformat = iformat;
2709     is->ytop = 0;
2710     is->xleft = 0;
2711
2712     /* start video display */
2713     is->pictq_mutex = SDL_CreateMutex();
2714     is->pictq_cond = SDL_CreateCond();
2715
2716     is->subpq_mutex = SDL_CreateMutex();
2717     is->subpq_cond = SDL_CreateCond();
2718
2719     is->av_sync_type = av_sync_type;
2720     is->parse_tid = SDL_CreateThread(decode_thread, is);
2721     if (!is->parse_tid) {
2722         av_free(is);
2723         return NULL;
2724     }
2725     return is;
2726 }
2727
2728 static void stream_cycle_channel(VideoState *is, int codec_type)
2729 {
2730     AVFormatContext *ic = is->ic;
2731     int start_index, stream_index;
2732     AVStream *st;
2733
2734     if (codec_type == AVMEDIA_TYPE_VIDEO)
2735         start_index = is->video_stream;
2736     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2737         start_index = is->audio_stream;
2738     else
2739         start_index = is->subtitle_stream;
2740     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2741         return;
2742     stream_index = start_index;
2743     for(;;) {
2744         if (++stream_index >= is->ic->nb_streams)
2745         {
2746             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2747             {
2748                 stream_index = -1;
2749                 goto the_end;
2750             } else
2751                 stream_index = 0;
2752         }
2753         if (stream_index == start_index)
2754             return;
2755         st = ic->streams[stream_index];
2756         if (st->codec->codec_type == codec_type) {
2757             /* check that parameters are OK */
2758             switch(codec_type) {
2759             case AVMEDIA_TYPE_AUDIO:
2760                 if (st->codec->sample_rate != 0 &&
2761                     st->codec->channels != 0)
2762                     goto the_end;
2763                 break;
2764             case AVMEDIA_TYPE_VIDEO:
2765             case AVMEDIA_TYPE_SUBTITLE:
2766                 goto the_end;
2767             default:
2768                 break;
2769             }
2770         }
2771     }
2772  the_end:
2773     stream_component_close(is, start_index);
2774     stream_component_open(is, stream_index);
2775 }
2776
2777
2778 static void toggle_full_screen(void)
2779 {
2780     is_full_screen = !is_full_screen;
2781     if (!fs_screen_width) {
2782         /* use default SDL method */
2783 //        SDL_WM_ToggleFullScreen(screen);
2784     }
2785     video_open(cur_stream);
2786 }
2787
2788 static void toggle_pause(void)
2789 {
2790     if (cur_stream)
2791         stream_pause(cur_stream);
2792     step = 0;
2793 }
2794
2795 static void step_to_next_frame(void)
2796 {
2797     if (cur_stream) {
2798         /* if the stream is paused unpause it, then step */
2799         if (cur_stream->paused)
2800             stream_pause(cur_stream);
2801     }
2802     step = 1;
2803 }
2804
2805 static void toggle_audio_display(void)
2806 {
2807     if (cur_stream) {
2808         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2809         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2810         fill_rectangle(screen,
2811                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2812                     bgcolor);
2813         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2814     }
2815 }
2816
2817 /* handle an event sent by the GUI */
2818 static void event_loop(void)
2819 {
2820     SDL_Event event;
2821     double incr, pos, frac;
2822
2823     for(;;) {
2824         double x;
2825         SDL_WaitEvent(&event);
2826         switch(event.type) {
2827         case SDL_KEYDOWN:
2828             if (exit_on_keydown) {
2829                 do_exit();
2830                 break;
2831             }
2832             switch(event.key.keysym.sym) {
2833             case SDLK_ESCAPE:
2834             case SDLK_q:
2835                 do_exit();
2836                 break;
2837             case SDLK_f:
2838                 toggle_full_screen();
2839                 break;
2840             case SDLK_p:
2841             case SDLK_SPACE:
2842                 toggle_pause();
2843                 break;
2844             case SDLK_s: //S: Step to next frame
2845                 step_to_next_frame();
2846                 break;
2847             case SDLK_a:
2848                 if (cur_stream)
2849                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2850                 break;
2851             case SDLK_v:
2852                 if (cur_stream)
2853                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2854                 break;
2855             case SDLK_t:
2856                 if (cur_stream)
2857                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2858                 break;
2859             case SDLK_w:
2860                 toggle_audio_display();
2861                 break;
2862             case SDLK_LEFT:
2863                 incr = -10.0;
2864                 goto do_seek;
2865             case SDLK_RIGHT:
2866                 incr = 10.0;
2867                 goto do_seek;
2868             case SDLK_UP:
2869                 incr = 60.0;
2870                 goto do_seek;
2871             case SDLK_DOWN:
2872                 incr = -60.0;
2873             do_seek:
2874                 if (cur_stream) {
2875                     if (seek_by_bytes) {
2876                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2877                             pos= cur_stream->video_current_pos;
2878                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2879                             pos= cur_stream->audio_pkt.pos;
2880                         }else
2881                             pos = url_ftell(cur_stream->ic->pb);
2882                         if (cur_stream->ic->bit_rate)
2883                             incr *= cur_stream->ic->bit_rate / 8.0;
2884                         else
2885                             incr *= 180000.0;
2886                         pos += incr;
2887                         stream_seek(cur_stream, pos, incr, 1);
2888                     } else {
2889                         pos = get_master_clock(cur_stream);
2890                         pos += incr;
2891                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2892                     }
2893                 }
2894                 break;
2895             default:
2896                 break;
2897             }
2898             break;
2899         case SDL_MOUSEBUTTONDOWN:
2900             if (exit_on_mousedown) {
2901                 do_exit();
2902                 break;
2903             }
2904         case SDL_MOUSEMOTION:
2905             if(event.type ==SDL_MOUSEBUTTONDOWN){
2906                 x= event.button.x;
2907             }else{
2908                 if(event.motion.state != SDL_PRESSED)
2909                     break;
2910                 x= event.motion.x;
2911             }
2912             if (cur_stream) {
2913                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2914                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2915                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2916                 }else{
2917                     int64_t ts;
2918                     int ns, hh, mm, ss;
2919                     int tns, thh, tmm, tss;
2920                     tns = cur_stream->ic->duration/1000000LL;
2921                     thh = tns/3600;
2922                     tmm = (tns%3600)/60;
2923                     tss = (tns%60);
2924                     frac = x/cur_stream->width;
2925                     ns = frac*tns;
2926                     hh = ns/3600;
2927                     mm = (ns%3600)/60;
2928                     ss = (ns%60);
2929                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2930                             hh, mm, ss, thh, tmm, tss);
2931                     ts = frac*cur_stream->ic->duration;
2932                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2933                         ts += cur_stream->ic->start_time;
2934                     stream_seek(cur_stream, ts, 0, 0);
2935                 }
2936             }
2937             break;
2938         case SDL_VIDEORESIZE:
2939             if (cur_stream) {
2940                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2941                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2942                 screen_width = cur_stream->width = event.resize.w;
2943                 screen_height= cur_stream->height= event.resize.h;
2944             }
2945             break;
2946         case SDL_QUIT:
2947         case FF_QUIT_EVENT:
2948             do_exit();
2949             break;
2950         case FF_ALLOC_EVENT:
2951             video_open(event.user.data1);
2952             alloc_picture(event.user.data1);
2953             break;
2954         case FF_REFRESH_EVENT:
2955             video_refresh_timer(event.user.data1);
2956             cur_stream->refresh=0;
2957             break;
2958         default:
2959             break;
2960         }
2961     }
2962 }
2963
2964 static void opt_frame_size(const char *arg)
2965 {
2966     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2967         fprintf(stderr, "Incorrect frame size\n");
2968         exit(1);
2969     }
2970     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2971         fprintf(stderr, "Frame size must be a multiple of 2\n");
2972         exit(1);
2973     }
2974 }
2975
2976 static int opt_width(const char *opt, const char *arg)
2977 {
2978     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2979     return 0;
2980 }
2981
2982 static int opt_height(const char *opt, const char *arg)
2983 {
2984     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2985     return 0;
2986 }
2987
2988 static void opt_format(const char *arg)
2989 {
2990     file_iformat = av_find_input_format(arg);
2991     if (!file_iformat) {
2992         fprintf(stderr, "Unknown input format: %s\n", arg);
2993         exit(1);
2994     }
2995 }
2996
2997 static void opt_frame_pix_fmt(const char *arg)
2998 {
2999     frame_pix_fmt = av_get_pix_fmt(arg);
3000 }
3001
3002 static int opt_sync(const char *opt, const char *arg)
3003 {
3004     if (!strcmp(arg, "audio"))
3005         av_sync_type = AV_SYNC_AUDIO_MASTER;
3006     else if (!strcmp(arg, "video"))
3007         av_sync_type = AV_SYNC_VIDEO_MASTER;
3008     else if (!strcmp(arg, "ext"))
3009         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3010     else {
3011         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3012         exit(1);
3013     }
3014     return 0;
3015 }
3016
3017 static int opt_seek(const char *opt, const char *arg)
3018 {
3019     start_time = parse_time_or_die(opt, arg, 1);
3020     return 0;
3021 }
3022
3023 static int opt_duration(const char *opt, const char *arg)
3024 {
3025     duration = parse_time_or_die(opt, arg, 1);
3026     return 0;
3027 }
3028
3029 static int opt_debug(const char *opt, const char *arg)
3030 {
3031     av_log_set_level(99);
3032     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3033     return 0;
3034 }
3035
3036 static int opt_vismv(const char *opt, const char *arg)
3037 {
3038     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3039     return 0;
3040 }
3041
3042 static int opt_thread_count(const char *opt, const char *arg)
3043 {
3044     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3045 #if !HAVE_THREADS
3046     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3047 #endif
3048     return 0;
3049 }
3050
3051 static const OptionDef options[] = {
3052 #include "cmdutils_common_opts.h"
3053     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3054     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3055     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3056     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3057     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3058     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3059     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3060     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3061     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3062     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3063     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3064     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3065     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3066     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3067     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3068     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3069     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3070     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3071     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3072     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3073     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3074     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3075     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3076     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3077     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3078     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3079     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3080     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3081     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3082     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3083     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3084     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3085     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3086     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3087     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3088     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3089     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3090 #if CONFIG_AVFILTER
3091     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3092 #endif
3093     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3094     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3095     { NULL, },
3096 };
3097
3098 static void show_usage(void)
3099 {
3100     printf("Simple media player\n");
3101     printf("usage: ffplay [options] input_file\n");
3102     printf("\n");
3103 }
3104
3105 static void show_help(void)
3106 {
3107     av_log_set_callback(log_callback_help);
3108     show_usage();
3109     show_help_options(options, "Main options:\n",
3110                       OPT_EXPERT, 0);
3111     show_help_options(options, "\nAdvanced options:\n",
3112                       OPT_EXPERT, OPT_EXPERT);
3113     printf("\n");
3114     av_opt_show2(avcodec_opts[0], NULL,
3115                  AV_OPT_FLAG_DECODING_PARAM, 0);
3116     printf("\n");
3117     av_opt_show2(avformat_opts, NULL,
3118                  AV_OPT_FLAG_DECODING_PARAM, 0);
3119 #if !CONFIG_AVFILTER
3120     printf("\n");
3121     av_opt_show2(sws_opts, NULL,
3122                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3123 #endif
3124     printf("\nWhile playing:\n"
3125            "q, ESC              quit\n"
3126            "f                   toggle full screen\n"
3127            "p, SPC              pause\n"
3128            "a                   cycle audio channel\n"
3129            "v                   cycle video channel\n"
3130            "t                   cycle subtitle channel\n"
3131            "w                   show audio waves\n"
3132            "s                   activate frame-step mode\n"
3133            "left/right          seek backward/forward 10 seconds\n"
3134            "down/up             seek backward/forward 1 minute\n"
3135            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3136            );
3137 }
3138
3139 static void opt_input_file(const char *filename)
3140 {
3141     if (input_filename) {
3142         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3143                 filename, input_filename);
3144         exit(1);
3145     }
3146     if (!strcmp(filename, "-"))
3147         filename = "pipe:";
3148     input_filename = filename;
3149 }
3150
3151 /* Called from the main */
3152 int main(int argc, char **argv)
3153 {
3154     int flags, i;
3155
3156     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3157
3158     /* register all codecs, demux and protocols */
3159     avcodec_register_all();
3160 #if CONFIG_AVDEVICE
3161     avdevice_register_all();
3162 #endif
3163 #if CONFIG_AVFILTER
3164     avfilter_register_all();
3165 #endif
3166     av_register_all();
3167
3168     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3169         avcodec_opts[i]= avcodec_alloc_context2(i);
3170     }
3171     avformat_opts = avformat_alloc_context();
3172 #if !CONFIG_AVFILTER
3173     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3174 #endif
3175
3176     show_banner();
3177
3178     parse_options(argc, argv, options, opt_input_file);
3179
3180     if (!input_filename) {
3181         show_usage();
3182         fprintf(stderr, "An input file must be specified\n");
3183         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3184         exit(1);
3185     }
3186
3187     if (display_disable) {
3188         video_disable = 1;
3189     }
3190     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3191 #if !defined(__MINGW32__) && !defined(__APPLE__)
3192     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3193 #endif
3194     if (SDL_Init (flags)) {
3195         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3196         exit(1);
3197     }
3198
3199     if (!display_disable) {
3200 #if HAVE_SDL_VIDEO_SIZE
3201         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3202         fs_screen_width = vi->current_w;
3203         fs_screen_height = vi->current_h;
3204 #endif
3205     }
3206
3207     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3208     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3209     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3210
3211     av_init_packet(&flush_pkt);
3212     flush_pkt.data= "FLUSH";
3213
3214     cur_stream = stream_open(input_filename, file_iformat);
3215
3216     event_loop();
3217
3218     /* never returns */
3219
3220     return 0;
3221 }