]> git.sesse.net Git - ffmpeg/blob - ffplay.c
Make url_fseek() return AVERROR_EOF rather than AVERROR(EPIPE) if end
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum SampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[CODEC_TYPE_NB]={
238     [CODEC_TYPE_AUDIO]=-1,
239     [CODEC_TYPE_VIDEO]=-1,
240     [CODEC_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int framedrop=1;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284
285 /* packet queue handling */
286 static void packet_queue_init(PacketQueue *q)
287 {
288     memset(q, 0, sizeof(PacketQueue));
289     q->mutex = SDL_CreateMutex();
290     q->cond = SDL_CreateCond();
291     packet_queue_put(q, &flush_pkt);
292 }
293
294 static void packet_queue_flush(PacketQueue *q)
295 {
296     AVPacketList *pkt, *pkt1;
297
298     SDL_LockMutex(q->mutex);
299     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300         pkt1 = pkt->next;
301         av_free_packet(&pkt->pkt);
302         av_freep(&pkt);
303     }
304     q->last_pkt = NULL;
305     q->first_pkt = NULL;
306     q->nb_packets = 0;
307     q->size = 0;
308     SDL_UnlockMutex(q->mutex);
309 }
310
311 static void packet_queue_end(PacketQueue *q)
312 {
313     packet_queue_flush(q);
314     SDL_DestroyMutex(q->mutex);
315     SDL_DestroyCond(q->cond);
316 }
317
318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319 {
320     AVPacketList *pkt1;
321
322     /* duplicate the packet */
323     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
324         return -1;
325
326     pkt1 = av_malloc(sizeof(AVPacketList));
327     if (!pkt1)
328         return -1;
329     pkt1->pkt = *pkt;
330     pkt1->next = NULL;
331
332
333     SDL_LockMutex(q->mutex);
334
335     if (!q->last_pkt)
336
337         q->first_pkt = pkt1;
338     else
339         q->last_pkt->next = pkt1;
340     q->last_pkt = pkt1;
341     q->nb_packets++;
342     q->size += pkt1->pkt.size + sizeof(*pkt1);
343     /* XXX: should duplicate packet data in DV case */
344     SDL_CondSignal(q->cond);
345
346     SDL_UnlockMutex(q->mutex);
347     return 0;
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352     SDL_LockMutex(q->mutex);
353
354     q->abort_request = 1;
355
356     SDL_CondSignal(q->cond);
357
358     SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364     AVPacketList *pkt1;
365     int ret;
366
367     SDL_LockMutex(q->mutex);
368
369     for(;;) {
370         if (q->abort_request) {
371             ret = -1;
372             break;
373         }
374
375         pkt1 = q->first_pkt;
376         if (pkt1) {
377             q->first_pkt = pkt1->next;
378             if (!q->first_pkt)
379                 q->last_pkt = NULL;
380             q->nb_packets--;
381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
382             *pkt = pkt1->pkt;
383             av_free(pkt1);
384             ret = 1;
385             break;
386         } else if (!block) {
387             ret = 0;
388             break;
389         } else {
390             SDL_CondWait(q->cond, q->mutex);
391         }
392     }
393     SDL_UnlockMutex(q->mutex);
394     return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398                                   int x, int y, int w, int h, int color)
399 {
400     SDL_Rect rect;
401     rect.x = x;
402     rect.y = y;
403     rect.w = w;
404     rect.h = h;
405     SDL_FillRect(screen, &rect, color);
406 }
407
408 #if 0
409 /* draw only the border of a rectangle */
410 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
411 {
412     int w1, w2, h1, h2;
413
414     /* fill the background */
415     w1 = x;
416     if (w1 < 0)
417         w1 = 0;
418     w2 = s->width - (x + w);
419     if (w2 < 0)
420         w2 = 0;
421     h1 = y;
422     if (h1 < 0)
423         h1 = 0;
424     h2 = s->height - (y + h);
425     if (h2 < 0)
426         h2 = 0;
427     fill_rectangle(screen,
428                    s->xleft, s->ytop,
429                    w1, s->height,
430                    color);
431     fill_rectangle(screen,
432                    s->xleft + s->width - w2, s->ytop,
433                    w2, s->height,
434                    color);
435     fill_rectangle(screen,
436                    s->xleft + w1, s->ytop,
437                    s->width - w1 - w2, h1,
438                    color);
439     fill_rectangle(screen,
440                    s->xleft + w1, s->ytop + s->height - h2,
441                    s->width - w1 - w2, h2,
442                    color);
443 }
444 #endif
445
446 #define ALPHA_BLEND(a, oldp, newp, s)\
447 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
448
449 #define RGBA_IN(r, g, b, a, s)\
450 {\
451     unsigned int v = ((const uint32_t *)(s))[0];\
452     a = (v >> 24) & 0xff;\
453     r = (v >> 16) & 0xff;\
454     g = (v >> 8) & 0xff;\
455     b = v & 0xff;\
456 }
457
458 #define YUVA_IN(y, u, v, a, s, pal)\
459 {\
460     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
461     a = (val >> 24) & 0xff;\
462     y = (val >> 16) & 0xff;\
463     u = (val >> 8) & 0xff;\
464     v = val & 0xff;\
465 }
466
467 #define YUVA_OUT(d, y, u, v, a)\
468 {\
469     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
470 }
471
472
473 #define BPP 1
474
475 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
476 {
477     int wrap, wrap3, width2, skip2;
478     int y, u, v, a, u1, v1, a1, w, h;
479     uint8_t *lum, *cb, *cr;
480     const uint8_t *p;
481     const uint32_t *pal;
482     int dstx, dsty, dstw, dsth;
483
484     dstw = av_clip(rect->w, 0, imgw);
485     dsth = av_clip(rect->h, 0, imgh);
486     dstx = av_clip(rect->x, 0, imgw - dstw);
487     dsty = av_clip(rect->y, 0, imgh - dsth);
488     lum = dst->data[0] + dsty * dst->linesize[0];
489     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
490     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
491
492     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
493     skip2 = dstx >> 1;
494     wrap = dst->linesize[0];
495     wrap3 = rect->pict.linesize[0];
496     p = rect->pict.data[0];
497     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
498
499     if (dsty & 1) {
500         lum += dstx;
501         cb += skip2;
502         cr += skip2;
503
504         if (dstx & 1) {
505             YUVA_IN(y, u, v, a, p, pal);
506             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509             cb++;
510             cr++;
511             lum++;
512             p += BPP;
513         }
514         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520
521             YUVA_IN(y, u, v, a, p + BPP, pal);
522             u1 += u;
523             v1 += v;
524             a1 += a;
525             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528             cb++;
529             cr++;
530             p += 2 * BPP;
531             lum += 2;
532         }
533         if (w) {
534             YUVA_IN(y, u, v, a, p, pal);
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
537             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
538             p++;
539             lum++;
540         }
541         p += wrap3 - dstw * BPP;
542         lum += wrap - dstw - dstx;
543         cb += dst->linesize[1] - width2 - skip2;
544         cr += dst->linesize[2] - width2 - skip2;
545     }
546     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
547         lum += dstx;
548         cb += skip2;
549         cr += skip2;
550
551         if (dstx & 1) {
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 = u;
554             v1 = v;
555             a1 = a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557             p += wrap3;
558             lum += wrap;
559             YUVA_IN(y, u, v, a, p, pal);
560             u1 += u;
561             v1 += v;
562             a1 += a;
563             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566             cb++;
567             cr++;
568             p += -wrap3 + BPP;
569             lum += -wrap + 1;
570         }
571         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 = u;
574             v1 = v;
575             a1 = a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577
578             YUVA_IN(y, u, v, a, p + BPP, pal);
579             u1 += u;
580             v1 += v;
581             a1 += a;
582             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
583             p += wrap3;
584             lum += wrap;
585
586             YUVA_IN(y, u, v, a, p, pal);
587             u1 += u;
588             v1 += v;
589             a1 += a;
590             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591
592             YUVA_IN(y, u, v, a, p + BPP, pal);
593             u1 += u;
594             v1 += v;
595             a1 += a;
596             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
597
598             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
599             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
600
601             cb++;
602             cr++;
603             p += -wrap3 + 2 * BPP;
604             lum += -wrap + 2;
605         }
606         if (w) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             p += wrap3;
613             lum += wrap;
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621             cb++;
622             cr++;
623             p += -wrap3 + BPP;
624             lum += -wrap + 1;
625         }
626         p += wrap3 + (wrap3 - dstw * BPP);
627         lum += wrap + (wrap - dstw - dstx);
628         cb += dst->linesize[1] - width2 - skip2;
629         cr += dst->linesize[2] - width2 - skip2;
630     }
631     /* handle odd height */
632     if (h) {
633         lum += dstx;
634         cb += skip2;
635         cr += skip2;
636
637         if (dstx & 1) {
638             YUVA_IN(y, u, v, a, p, pal);
639             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642             cb++;
643             cr++;
644             lum++;
645             p += BPP;
646         }
647         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
648             YUVA_IN(y, u, v, a, p, pal);
649             u1 = u;
650             v1 = v;
651             a1 = a;
652             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
653
654             YUVA_IN(y, u, v, a, p + BPP, pal);
655             u1 += u;
656             v1 += v;
657             a1 += a;
658             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
659             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
660             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
661             cb++;
662             cr++;
663             p += 2 * BPP;
664             lum += 2;
665         }
666         if (w) {
667             YUVA_IN(y, u, v, a, p, pal);
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
670             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
671         }
672     }
673 }
674
675 static void free_subpicture(SubPicture *sp)
676 {
677     int i;
678
679     for (i = 0; i < sp->sub.num_rects; i++)
680     {
681         av_freep(&sp->sub.rects[i]->pict.data[0]);
682         av_freep(&sp->sub.rects[i]->pict.data[1]);
683         av_freep(&sp->sub.rects[i]);
684     }
685
686     av_free(sp->sub.rects);
687
688     memset(&sp->sub, 0, sizeof(AVSubtitle));
689 }
690
691 static void video_image_display(VideoState *is)
692 {
693     VideoPicture *vp;
694     SubPicture *sp;
695     AVPicture pict;
696     float aspect_ratio;
697     int width, height, x, y;
698     SDL_Rect rect;
699     int i;
700
701     vp = &is->pictq[is->pictq_rindex];
702     if (vp->bmp) {
703 #if CONFIG_AVFILTER
704          if (vp->picref->pixel_aspect.num == 0)
705              aspect_ratio = 0;
706          else
707              aspect_ratio = av_q2d(vp->picref->pixel_aspect);
708 #else
709
710         /* XXX: use variable in the frame */
711         if (is->video_st->sample_aspect_ratio.num)
712             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
713         else if (is->video_st->codec->sample_aspect_ratio.num)
714             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
715         else
716             aspect_ratio = 0;
717 #endif
718         if (aspect_ratio <= 0.0)
719             aspect_ratio = 1.0;
720         aspect_ratio *= (float)vp->width / (float)vp->height;
721         /* if an active format is indicated, then it overrides the
722            mpeg format */
723 #if 0
724         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
725             is->dtg_active_format = is->video_st->codec->dtg_active_format;
726             printf("dtg_active_format=%d\n", is->dtg_active_format);
727         }
728 #endif
729 #if 0
730         switch(is->video_st->codec->dtg_active_format) {
731         case FF_DTG_AFD_SAME:
732         default:
733             /* nothing to do */
734             break;
735         case FF_DTG_AFD_4_3:
736             aspect_ratio = 4.0 / 3.0;
737             break;
738         case FF_DTG_AFD_16_9:
739             aspect_ratio = 16.0 / 9.0;
740             break;
741         case FF_DTG_AFD_14_9:
742             aspect_ratio = 14.0 / 9.0;
743             break;
744         case FF_DTG_AFD_4_3_SP_14_9:
745             aspect_ratio = 14.0 / 9.0;
746             break;
747         case FF_DTG_AFD_16_9_SP_14_9:
748             aspect_ratio = 14.0 / 9.0;
749             break;
750         case FF_DTG_AFD_SP_4_3:
751             aspect_ratio = 4.0 / 3.0;
752             break;
753         }
754 #endif
755
756         if (is->subtitle_st)
757         {
758             if (is->subpq_size > 0)
759             {
760                 sp = &is->subpq[is->subpq_rindex];
761
762                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
763                 {
764                     SDL_LockYUVOverlay (vp->bmp);
765
766                     pict.data[0] = vp->bmp->pixels[0];
767                     pict.data[1] = vp->bmp->pixels[2];
768                     pict.data[2] = vp->bmp->pixels[1];
769
770                     pict.linesize[0] = vp->bmp->pitches[0];
771                     pict.linesize[1] = vp->bmp->pitches[2];
772                     pict.linesize[2] = vp->bmp->pitches[1];
773
774                     for (i = 0; i < sp->sub.num_rects; i++)
775                         blend_subrect(&pict, sp->sub.rects[i],
776                                       vp->bmp->w, vp->bmp->h);
777
778                     SDL_UnlockYUVOverlay (vp->bmp);
779                 }
780             }
781         }
782
783
784         /* XXX: we suppose the screen has a 1.0 pixel ratio */
785         height = is->height;
786         width = ((int)rint(height * aspect_ratio)) & ~1;
787         if (width > is->width) {
788             width = is->width;
789             height = ((int)rint(width / aspect_ratio)) & ~1;
790         }
791         x = (is->width - width) / 2;
792         y = (is->height - height) / 2;
793         if (!is->no_background) {
794             /* fill the background */
795             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
796         } else {
797             is->no_background = 0;
798         }
799         rect.x = is->xleft + x;
800         rect.y = is->ytop  + y;
801         rect.w = width;
802         rect.h = height;
803         SDL_DisplayYUVOverlay(vp->bmp, &rect);
804     } else {
805 #if 0
806         fill_rectangle(screen,
807                        is->xleft, is->ytop, is->width, is->height,
808                        QERGB(0x00, 0x00, 0x00));
809 #endif
810     }
811 }
812
813 static inline int compute_mod(int a, int b)
814 {
815     a = a % b;
816     if (a >= 0)
817         return a;
818     else
819         return a + b;
820 }
821
822 static void video_audio_display(VideoState *s)
823 {
824     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
825     int ch, channels, h, h2, bgcolor, fgcolor;
826     int16_t time_diff;
827     int rdft_bits, nb_freq;
828
829     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
830         ;
831     nb_freq= 1<<(rdft_bits-1);
832
833     /* compute display index : center on currently output samples */
834     channels = s->audio_st->codec->channels;
835     nb_display_channels = channels;
836     if (!s->paused) {
837         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
838         n = 2 * channels;
839         delay = audio_write_get_buf_size(s);
840         delay /= n;
841
842         /* to be more precise, we take into account the time spent since
843            the last buffer computation */
844         if (audio_callback_time) {
845             time_diff = av_gettime() - audio_callback_time;
846             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
847         }
848
849         delay += 2*data_used;
850         if (delay < data_used)
851             delay = data_used;
852
853         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
854         if(s->show_audio==1){
855             h= INT_MIN;
856             for(i=0; i<1000; i+=channels){
857                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
858                 int a= s->sample_array[idx];
859                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
860                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
861                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
862                 int score= a-d;
863                 if(h<score && (b^c)<0){
864                     h= score;
865                     i_start= idx;
866                 }
867             }
868         }
869
870         s->last_i_start = i_start;
871     } else {
872         i_start = s->last_i_start;
873     }
874
875     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
876     if(s->show_audio==1){
877         fill_rectangle(screen,
878                        s->xleft, s->ytop, s->width, s->height,
879                        bgcolor);
880
881         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
882
883         /* total height for one channel */
884         h = s->height / nb_display_channels;
885         /* graph height / 2 */
886         h2 = (h * 9) / 20;
887         for(ch = 0;ch < nb_display_channels; ch++) {
888             i = i_start + ch;
889             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
890             for(x = 0; x < s->width; x++) {
891                 y = (s->sample_array[i] * h2) >> 15;
892                 if (y < 0) {
893                     y = -y;
894                     ys = y1 - y;
895                 } else {
896                     ys = y1;
897                 }
898                 fill_rectangle(screen,
899                                s->xleft + x, ys, 1, y,
900                                fgcolor);
901                 i += channels;
902                 if (i >= SAMPLE_ARRAY_SIZE)
903                     i -= SAMPLE_ARRAY_SIZE;
904             }
905         }
906
907         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
908
909         for(ch = 1;ch < nb_display_channels; ch++) {
910             y = s->ytop + ch * h;
911             fill_rectangle(screen,
912                            s->xleft, y, s->width, 1,
913                            fgcolor);
914         }
915         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
916     }else{
917         nb_display_channels= FFMIN(nb_display_channels, 2);
918         if(rdft_bits != s->rdft_bits){
919             av_rdft_end(s->rdft);
920             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
921             s->rdft_bits= rdft_bits;
922         }
923         {
924             FFTSample data[2][2*nb_freq];
925             for(ch = 0;ch < nb_display_channels; ch++) {
926                 i = i_start + ch;
927                 for(x = 0; x < 2*nb_freq; x++) {
928                     double w= (x-nb_freq)*(1.0/nb_freq);
929                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
930                     i += channels;
931                     if (i >= SAMPLE_ARRAY_SIZE)
932                         i -= SAMPLE_ARRAY_SIZE;
933                 }
934                 av_rdft_calc(s->rdft, data[ch]);
935             }
936             //least efficient way to do this, we should of course directly access it but its more than fast enough
937             for(y=0; y<s->height; y++){
938                 double w= 1/sqrt(nb_freq);
939                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
940                 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
941                 a= FFMIN(a,255);
942                 b= FFMIN(b,255);
943                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
944
945                 fill_rectangle(screen,
946                             s->xpos, s->height-y, 1, 1,
947                             fgcolor);
948             }
949         }
950         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
951         s->xpos++;
952         if(s->xpos >= s->width)
953             s->xpos= s->xleft;
954     }
955 }
956
957 static int video_open(VideoState *is){
958     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
959     int w,h;
960
961     if(is_full_screen) flags |= SDL_FULLSCREEN;
962     else               flags |= SDL_RESIZABLE;
963
964     if (is_full_screen && fs_screen_width) {
965         w = fs_screen_width;
966         h = fs_screen_height;
967     } else if(!is_full_screen && screen_width){
968         w = screen_width;
969         h = screen_height;
970 #if CONFIG_AVFILTER
971     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
972         w = is->out_video_filter->inputs[0]->w;
973         h = is->out_video_filter->inputs[0]->h;
974 #else
975     }else if (is->video_st && is->video_st->codec->width){
976         w = is->video_st->codec->width;
977         h = is->video_st->codec->height;
978 #endif
979     } else {
980         w = 640;
981         h = 480;
982     }
983     if(screen && is->width == screen->w && screen->w == w
984        && is->height== screen->h && screen->h == h)
985         return 0;
986
987 #ifndef __APPLE__
988     screen = SDL_SetVideoMode(w, h, 0, flags);
989 #else
990     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
991     screen = SDL_SetVideoMode(w, h, 24, flags);
992 #endif
993     if (!screen) {
994         fprintf(stderr, "SDL: could not set video mode - exiting\n");
995         return -1;
996     }
997     if (!window_title)
998         window_title = input_filename;
999     SDL_WM_SetCaption(window_title, window_title);
1000
1001     is->width = screen->w;
1002     is->height = screen->h;
1003
1004     return 0;
1005 }
1006
1007 /* display the current picture, if any */
1008 static void video_display(VideoState *is)
1009 {
1010     if(!screen)
1011         video_open(cur_stream);
1012     if (is->audio_st && is->show_audio)
1013         video_audio_display(is);
1014     else if (is->video_st)
1015         video_image_display(is);
1016 }
1017
1018 static int refresh_thread(void *opaque)
1019 {
1020     VideoState *is= opaque;
1021     while(!is->abort_request){
1022     SDL_Event event;
1023     event.type = FF_REFRESH_EVENT;
1024     event.user.data1 = opaque;
1025         if(!is->refresh){
1026             is->refresh=1;
1027     SDL_PushEvent(&event);
1028         }
1029         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1030     }
1031     return 0;
1032 }
1033
1034 /* get the current audio clock value */
1035 static double get_audio_clock(VideoState *is)
1036 {
1037     double pts;
1038     int hw_buf_size, bytes_per_sec;
1039     pts = is->audio_clock;
1040     hw_buf_size = audio_write_get_buf_size(is);
1041     bytes_per_sec = 0;
1042     if (is->audio_st) {
1043         bytes_per_sec = is->audio_st->codec->sample_rate *
1044             2 * is->audio_st->codec->channels;
1045     }
1046     if (bytes_per_sec)
1047         pts -= (double)hw_buf_size / bytes_per_sec;
1048     return pts;
1049 }
1050
1051 /* get the current video clock value */
1052 static double get_video_clock(VideoState *is)
1053 {
1054     if (is->paused) {
1055         return is->video_current_pts;
1056     } else {
1057         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1058     }
1059 }
1060
1061 /* get the current external clock value */
1062 static double get_external_clock(VideoState *is)
1063 {
1064     int64_t ti;
1065     ti = av_gettime();
1066     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1067 }
1068
1069 /* get the current master clock value */
1070 static double get_master_clock(VideoState *is)
1071 {
1072     double val;
1073
1074     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075         if (is->video_st)
1076             val = get_video_clock(is);
1077         else
1078             val = get_audio_clock(is);
1079     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080         if (is->audio_st)
1081             val = get_audio_clock(is);
1082         else
1083             val = get_video_clock(is);
1084     } else {
1085         val = get_external_clock(is);
1086     }
1087     return val;
1088 }
1089
1090 /* seek in the stream */
1091 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1092 {
1093     if (!is->seek_req) {
1094         is->seek_pos = pos;
1095         is->seek_rel = rel;
1096         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1097         if (seek_by_bytes)
1098             is->seek_flags |= AVSEEK_FLAG_BYTE;
1099         is->seek_req = 1;
1100     }
1101 }
1102
1103 /* pause or resume the video */
1104 static void stream_pause(VideoState *is)
1105 {
1106     if (is->paused) {
1107         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1108         if(is->read_pause_return != AVERROR(ENOSYS)){
1109             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1110         }
1111         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1112     }
1113     is->paused = !is->paused;
1114 }
1115
1116 static double compute_target_time(double frame_current_pts, VideoState *is)
1117 {
1118     double delay, sync_threshold, diff;
1119
1120     /* compute nominal delay */
1121     delay = frame_current_pts - is->frame_last_pts;
1122     if (delay <= 0 || delay >= 10.0) {
1123         /* if incorrect delay, use previous one */
1124         delay = is->frame_last_delay;
1125     } else {
1126         is->frame_last_delay = delay;
1127     }
1128     is->frame_last_pts = frame_current_pts;
1129
1130     /* update delay to follow master synchronisation source */
1131     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1132          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1133         /* if video is slave, we try to correct big delays by
1134            duplicating or deleting a frame */
1135         diff = get_video_clock(is) - get_master_clock(is);
1136
1137         /* skip or repeat frame. We take into account the
1138            delay to compute the threshold. I still don't know
1139            if it is the best guess */
1140         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1141         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1142             if (diff <= -sync_threshold)
1143                 delay = 0;
1144             else if (diff >= sync_threshold)
1145                 delay = 2 * delay;
1146         }
1147     }
1148     is->frame_timer += delay;
1149 #if defined(DEBUG_SYNC)
1150     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1151             delay, actual_delay, frame_current_pts, -diff);
1152 #endif
1153
1154     return is->frame_timer;
1155 }
1156
1157 /* called to display each frame */
1158 static void video_refresh_timer(void *opaque)
1159 {
1160     VideoState *is = opaque;
1161     VideoPicture *vp;
1162
1163     SubPicture *sp, *sp2;
1164
1165     if (is->video_st) {
1166 retry:
1167         if (is->pictq_size == 0) {
1168             //nothing to do, no picture to display in the que
1169         } else {
1170             double time= av_gettime()/1000000.0;
1171             double next_target;
1172             /* dequeue the picture */
1173             vp = &is->pictq[is->pictq_rindex];
1174
1175             if(time < vp->target_clock)
1176                 return;
1177             /* update current video pts */
1178             is->video_current_pts = vp->pts;
1179             is->video_current_pts_drift = is->video_current_pts - time;
1180             is->video_current_pos = vp->pos;
1181             if(is->pictq_size > 1){
1182                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1183                 assert(nextvp->target_clock >= vp->target_clock);
1184                 next_target= nextvp->target_clock;
1185             }else{
1186                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1187             }
1188             if(framedrop && time > next_target){
1189                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1190                 if(is->pictq_size > 1 || time > next_target + 0.5){
1191                     /* update queue size and signal for next picture */
1192                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1193                         is->pictq_rindex = 0;
1194
1195                     SDL_LockMutex(is->pictq_mutex);
1196                     is->pictq_size--;
1197                     SDL_CondSignal(is->pictq_cond);
1198                     SDL_UnlockMutex(is->pictq_mutex);
1199                     goto retry;
1200                 }
1201             }
1202
1203             if(is->subtitle_st) {
1204                 if (is->subtitle_stream_changed) {
1205                     SDL_LockMutex(is->subpq_mutex);
1206
1207                     while (is->subpq_size) {
1208                         free_subpicture(&is->subpq[is->subpq_rindex]);
1209
1210                         /* update queue size and signal for next picture */
1211                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212                             is->subpq_rindex = 0;
1213
1214                         is->subpq_size--;
1215                     }
1216                     is->subtitle_stream_changed = 0;
1217
1218                     SDL_CondSignal(is->subpq_cond);
1219                     SDL_UnlockMutex(is->subpq_mutex);
1220                 } else {
1221                     if (is->subpq_size > 0) {
1222                         sp = &is->subpq[is->subpq_rindex];
1223
1224                         if (is->subpq_size > 1)
1225                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226                         else
1227                             sp2 = NULL;
1228
1229                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231                         {
1232                             free_subpicture(sp);
1233
1234                             /* update queue size and signal for next picture */
1235                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236                                 is->subpq_rindex = 0;
1237
1238                             SDL_LockMutex(is->subpq_mutex);
1239                             is->subpq_size--;
1240                             SDL_CondSignal(is->subpq_cond);
1241                             SDL_UnlockMutex(is->subpq_mutex);
1242                         }
1243                     }
1244                 }
1245             }
1246
1247             /* display picture */
1248             video_display(is);
1249
1250             /* update queue size and signal for next picture */
1251             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1252                 is->pictq_rindex = 0;
1253
1254             SDL_LockMutex(is->pictq_mutex);
1255             is->pictq_size--;
1256             SDL_CondSignal(is->pictq_cond);
1257             SDL_UnlockMutex(is->pictq_mutex);
1258         }
1259     } else if (is->audio_st) {
1260         /* draw the next audio frame */
1261
1262         /* if only audio stream, then display the audio bars (better
1263            than nothing, just to test the implementation */
1264
1265         /* display picture */
1266         video_display(is);
1267     }
1268     if (show_status) {
1269         static int64_t last_time;
1270         int64_t cur_time;
1271         int aqsize, vqsize, sqsize;
1272         double av_diff;
1273
1274         cur_time = av_gettime();
1275         if (!last_time || (cur_time - last_time) >= 30000) {
1276             aqsize = 0;
1277             vqsize = 0;
1278             sqsize = 0;
1279             if (is->audio_st)
1280                 aqsize = is->audioq.size;
1281             if (is->video_st)
1282                 vqsize = is->videoq.size;
1283             if (is->subtitle_st)
1284                 sqsize = is->subtitleq.size;
1285             av_diff = 0;
1286             if (is->audio_st && is->video_st)
1287                 av_diff = get_audio_clock(is) - get_video_clock(is);
1288             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1290             fflush(stdout);
1291             last_time = cur_time;
1292         }
1293     }
1294 }
1295
1296 /* allocate a picture (needs to do that in main thread to avoid
1297    potential locking problems */
1298 static void alloc_picture(void *opaque)
1299 {
1300     VideoState *is = opaque;
1301     VideoPicture *vp;
1302
1303     vp = &is->pictq[is->pictq_windex];
1304
1305     if (vp->bmp)
1306         SDL_FreeYUVOverlay(vp->bmp);
1307
1308 #if CONFIG_AVFILTER
1309     if (vp->picref)
1310         avfilter_unref_pic(vp->picref);
1311     vp->picref = NULL;
1312
1313     vp->width   = is->out_video_filter->inputs[0]->w;
1314     vp->height  = is->out_video_filter->inputs[0]->h;
1315     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1316 #else
1317     vp->width   = is->video_st->codec->width;
1318     vp->height  = is->video_st->codec->height;
1319     vp->pix_fmt = is->video_st->codec->pix_fmt;
1320 #endif
1321
1322     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1323                                    SDL_YV12_OVERLAY,
1324                                    screen);
1325
1326     SDL_LockMutex(is->pictq_mutex);
1327     vp->allocated = 1;
1328     SDL_CondSignal(is->pictq_cond);
1329     SDL_UnlockMutex(is->pictq_mutex);
1330 }
1331
1332 /**
1333  *
1334  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1335  */
1336 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1337 {
1338     VideoPicture *vp;
1339     int dst_pix_fmt;
1340 #if CONFIG_AVFILTER
1341     AVPicture pict_src;
1342 #endif
1343     /* wait until we have space to put a new picture */
1344     SDL_LockMutex(is->pictq_mutex);
1345
1346     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1347         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1348
1349     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1350            !is->videoq.abort_request) {
1351         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352     }
1353     SDL_UnlockMutex(is->pictq_mutex);
1354
1355     if (is->videoq.abort_request)
1356         return -1;
1357
1358     vp = &is->pictq[is->pictq_windex];
1359
1360     /* alloc or resize hardware picture buffer */
1361     if (!vp->bmp ||
1362 #if CONFIG_AVFILTER
1363         vp->width  != is->out_video_filter->inputs[0]->w ||
1364         vp->height != is->out_video_filter->inputs[0]->h) {
1365 #else
1366         vp->width != is->video_st->codec->width ||
1367         vp->height != is->video_st->codec->height) {
1368 #endif
1369         SDL_Event event;
1370
1371         vp->allocated = 0;
1372
1373         /* the allocation must be done in the main thread to avoid
1374            locking problems */
1375         event.type = FF_ALLOC_EVENT;
1376         event.user.data1 = is;
1377         SDL_PushEvent(&event);
1378
1379         /* wait until the picture is allocated */
1380         SDL_LockMutex(is->pictq_mutex);
1381         while (!vp->allocated && !is->videoq.abort_request) {
1382             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1383         }
1384         SDL_UnlockMutex(is->pictq_mutex);
1385
1386         if (is->videoq.abort_request)
1387             return -1;
1388     }
1389
1390     /* if the frame is not skipped, then display it */
1391     if (vp->bmp) {
1392         AVPicture pict;
1393 #if CONFIG_AVFILTER
1394         if(vp->picref)
1395             avfilter_unref_pic(vp->picref);
1396         vp->picref = src_frame->opaque;
1397 #endif
1398
1399         /* get a pointer on the bitmap */
1400         SDL_LockYUVOverlay (vp->bmp);
1401
1402         dst_pix_fmt = PIX_FMT_YUV420P;
1403         memset(&pict,0,sizeof(AVPicture));
1404         pict.data[0] = vp->bmp->pixels[0];
1405         pict.data[1] = vp->bmp->pixels[2];
1406         pict.data[2] = vp->bmp->pixels[1];
1407
1408         pict.linesize[0] = vp->bmp->pitches[0];
1409         pict.linesize[1] = vp->bmp->pitches[2];
1410         pict.linesize[2] = vp->bmp->pitches[1];
1411
1412 #if CONFIG_AVFILTER
1413         pict_src.data[0] = src_frame->data[0];
1414         pict_src.data[1] = src_frame->data[1];
1415         pict_src.data[2] = src_frame->data[2];
1416
1417         pict_src.linesize[0] = src_frame->linesize[0];
1418         pict_src.linesize[1] = src_frame->linesize[1];
1419         pict_src.linesize[2] = src_frame->linesize[2];
1420
1421         //FIXME use direct rendering
1422         av_picture_copy(&pict, &pict_src,
1423                         vp->pix_fmt, vp->width, vp->height);
1424 #else
1425         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1426         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1427             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1428             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1429         if (is->img_convert_ctx == NULL) {
1430             fprintf(stderr, "Cannot initialize the conversion context\n");
1431             exit(1);
1432         }
1433         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1434                   0, vp->height, pict.data, pict.linesize);
1435 #endif
1436         /* update the bitmap content */
1437         SDL_UnlockYUVOverlay(vp->bmp);
1438
1439         vp->pts = pts;
1440         vp->pos = pos;
1441
1442         /* now we can update the picture count */
1443         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1444             is->pictq_windex = 0;
1445         SDL_LockMutex(is->pictq_mutex);
1446         vp->target_clock= compute_target_time(vp->pts, is);
1447
1448         is->pictq_size++;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450     }
1451     return 0;
1452 }
1453
1454 /**
1455  * compute the exact PTS for the picture if it is omitted in the stream
1456  * @param pts1 the dts of the pkt / pts of the frame
1457  */
1458 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1459 {
1460     double frame_delay, pts;
1461
1462     pts = pts1;
1463
1464     if (pts != 0) {
1465         /* update video clock with pts, if present */
1466         is->video_clock = pts;
1467     } else {
1468         pts = is->video_clock;
1469     }
1470     /* update video clock for next frame */
1471     frame_delay = av_q2d(is->video_st->codec->time_base);
1472     /* for MPEG2, the frame can be repeated, so we update the
1473        clock accordingly */
1474     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1475     is->video_clock += frame_delay;
1476
1477 #if defined(DEBUG_SYNC) && 0
1478     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1479            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1480 #endif
1481     return queue_picture(is, src_frame, pts, pos);
1482 }
1483
1484 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1485 {
1486     int len1, got_picture, i;
1487
1488         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1489             return -1;
1490
1491         if(pkt->data == flush_pkt.data){
1492             avcodec_flush_buffers(is->video_st->codec);
1493
1494             SDL_LockMutex(is->pictq_mutex);
1495             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1496             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1497                 is->pictq[i].target_clock= 0;
1498             }
1499             while (is->pictq_size && !is->videoq.abort_request) {
1500                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1501             }
1502             is->video_current_pos= -1;
1503             SDL_UnlockMutex(is->pictq_mutex);
1504
1505             is->last_dts_for_fault_detection=
1506             is->last_pts_for_fault_detection= INT64_MIN;
1507             is->frame_last_pts= AV_NOPTS_VALUE;
1508             is->frame_last_delay = 0;
1509             is->frame_timer = (double)av_gettime() / 1000000.0;
1510             is->skip_frames= 1;
1511             is->skip_frames_index= 0;
1512             return 0;
1513         }
1514
1515         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1516            this packet, if any */
1517         is->video_st->codec->reordered_opaque= pkt->pts;
1518         len1 = avcodec_decode_video2(is->video_st->codec,
1519                                     frame, &got_picture,
1520                                     pkt);
1521
1522         if (got_picture) {
1523             if(pkt->dts != AV_NOPTS_VALUE){
1524                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1525                 is->last_dts_for_fault_detection= pkt->dts;
1526             }
1527             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1528                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1529                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1530             }
1531         }
1532
1533         if(   (   decoder_reorder_pts==1
1534                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1535                || pkt->dts == AV_NOPTS_VALUE)
1536            && frame->reordered_opaque != AV_NOPTS_VALUE)
1537             *pts= frame->reordered_opaque;
1538         else if(pkt->dts != AV_NOPTS_VALUE)
1539             *pts= pkt->dts;
1540         else
1541             *pts= 0;
1542
1543 //            if (len1 < 0)
1544 //                break;
1545     if (got_picture){
1546         is->skip_frames_index += 1;
1547         if(is->skip_frames_index >= is->skip_frames){
1548             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1549             return 1;
1550         }
1551
1552     }
1553     return 0;
1554 }
1555
1556 #if CONFIG_AVFILTER
1557 typedef struct {
1558     VideoState *is;
1559     AVFrame *frame;
1560 } FilterPriv;
1561
1562 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1563 {
1564     FilterPriv *priv = ctx->priv;
1565     if(!opaque) return -1;
1566
1567     priv->is = opaque;
1568     priv->frame = avcodec_alloc_frame();
1569
1570     return 0;
1571 }
1572
1573 static void input_uninit(AVFilterContext *ctx)
1574 {
1575     FilterPriv *priv = ctx->priv;
1576     av_free(priv->frame);
1577 }
1578
1579 static int input_request_frame(AVFilterLink *link)
1580 {
1581     FilterPriv *priv = link->src->priv;
1582     AVFilterPicRef *picref;
1583     int64_t pts = 0;
1584     AVPacket pkt;
1585     int ret;
1586
1587     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1588         av_free_packet(&pkt);
1589     if (ret < 0)
1590         return -1;
1591
1592     /* FIXME: until I figure out how to hook everything up to the codec
1593      * right, we're just copying the entire frame. */
1594     picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1595     av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1596                     picref->pic->format, link->w, link->h);
1597     av_free_packet(&pkt);
1598
1599     picref->pts = pts;
1600     picref->pos = pkt.pos;
1601     picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1602     avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1603     avfilter_draw_slice(link, 0, link->h, 1);
1604     avfilter_end_frame(link);
1605     avfilter_unref_pic(picref);
1606
1607     return 0;
1608 }
1609
1610 static int input_query_formats(AVFilterContext *ctx)
1611 {
1612     FilterPriv *priv = ctx->priv;
1613     enum PixelFormat pix_fmts[] = {
1614         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1615     };
1616
1617     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1618     return 0;
1619 }
1620
1621 static int input_config_props(AVFilterLink *link)
1622 {
1623     FilterPriv *priv  = link->src->priv;
1624     AVCodecContext *c = priv->is->video_st->codec;
1625
1626     link->w = c->width;
1627     link->h = c->height;
1628
1629     return 0;
1630 }
1631
1632 static AVFilter input_filter =
1633 {
1634     .name      = "ffplay_input",
1635
1636     .priv_size = sizeof(FilterPriv),
1637
1638     .init      = input_init,
1639     .uninit    = input_uninit,
1640
1641     .query_formats = input_query_formats,
1642
1643     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1644     .outputs   = (AVFilterPad[]) {{ .name = "default",
1645                                     .type = CODEC_TYPE_VIDEO,
1646                                     .request_frame = input_request_frame,
1647                                     .config_props  = input_config_props, },
1648                                   { .name = NULL }},
1649 };
1650
1651 static void output_end_frame(AVFilterLink *link)
1652 {
1653 }
1654
1655 static int output_query_formats(AVFilterContext *ctx)
1656 {
1657     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1658
1659     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1660     return 0;
1661 }
1662
1663 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1664                                     int64_t *pts, int64_t *pos)
1665 {
1666     AVFilterPicRef *pic;
1667
1668     if(avfilter_request_frame(ctx->inputs[0]))
1669         return -1;
1670     if(!(pic = ctx->inputs[0]->cur_pic))
1671         return -1;
1672     ctx->inputs[0]->cur_pic = NULL;
1673
1674     frame->opaque = pic;
1675     *pts          = pic->pts;
1676     *pos          = pic->pos;
1677
1678     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1679     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1680
1681     return 1;
1682 }
1683
1684 static AVFilter output_filter =
1685 {
1686     .name      = "ffplay_output",
1687
1688     .query_formats = output_query_formats,
1689
1690     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1691                                     .type          = CODEC_TYPE_VIDEO,
1692                                     .end_frame     = output_end_frame,
1693                                     .min_perms     = AV_PERM_READ, },
1694                                   { .name = NULL }},
1695     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1696 };
1697 #endif  /* CONFIG_AVFILTER */
1698
1699 static int video_thread(void *arg)
1700 {
1701     VideoState *is = arg;
1702     AVFrame *frame= avcodec_alloc_frame();
1703     int64_t pts_int, pos;
1704     double pts;
1705     int ret;
1706
1707 #if CONFIG_AVFILTER
1708     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1709     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1710     graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1711
1712     if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1713     if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1714
1715     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1716     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1717
1718
1719     if(vfilters) {
1720         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1721         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1722
1723         outputs->name    = av_strdup("in");
1724         outputs->filter  = filt_src;
1725         outputs->pad_idx = 0;
1726         outputs->next    = NULL;
1727
1728         inputs->name    = av_strdup("out");
1729         inputs->filter  = filt_out;
1730         inputs->pad_idx = 0;
1731         inputs->next    = NULL;
1732
1733         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1734             goto the_end;
1735         av_freep(&vfilters);
1736     } else {
1737         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1738     }
1739     avfilter_graph_add_filter(graph, filt_src);
1740     avfilter_graph_add_filter(graph, filt_out);
1741
1742     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1743     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1744     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1745
1746     is->out_video_filter = filt_out;
1747 #endif
1748
1749     for(;;) {
1750 #if !CONFIG_AVFILTER
1751         AVPacket pkt;
1752 #endif
1753         while (is->paused && !is->videoq.abort_request)
1754             SDL_Delay(10);
1755 #if CONFIG_AVFILTER
1756         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1757 #else
1758         ret = get_video_frame(is, frame, &pts_int, &pkt);
1759 #endif
1760
1761         if (ret < 0) goto the_end;
1762
1763         if (!ret)
1764             continue;
1765
1766         pts = pts_int*av_q2d(is->video_st->time_base);
1767
1768 #if CONFIG_AVFILTER
1769         ret = output_picture2(is, frame, pts, pos);
1770 #else
1771         ret = output_picture2(is, frame, pts,  pkt.pos);
1772         av_free_packet(&pkt);
1773 #endif
1774         if (ret < 0)
1775             goto the_end;
1776
1777         if (step)
1778             if (cur_stream)
1779                 stream_pause(cur_stream);
1780     }
1781  the_end:
1782 #if CONFIG_AVFILTER
1783     avfilter_graph_destroy(graph);
1784     av_freep(&graph);
1785 #endif
1786     av_free(frame);
1787     return 0;
1788 }
1789
1790 static int subtitle_thread(void *arg)
1791 {
1792     VideoState *is = arg;
1793     SubPicture *sp;
1794     AVPacket pkt1, *pkt = &pkt1;
1795     int len1, got_subtitle;
1796     double pts;
1797     int i, j;
1798     int r, g, b, y, u, v, a;
1799
1800     for(;;) {
1801         while (is->paused && !is->subtitleq.abort_request) {
1802             SDL_Delay(10);
1803         }
1804         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1805             break;
1806
1807         if(pkt->data == flush_pkt.data){
1808             avcodec_flush_buffers(is->subtitle_st->codec);
1809             continue;
1810         }
1811         SDL_LockMutex(is->subpq_mutex);
1812         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1813                !is->subtitleq.abort_request) {
1814             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1815         }
1816         SDL_UnlockMutex(is->subpq_mutex);
1817
1818         if (is->subtitleq.abort_request)
1819             goto the_end;
1820
1821         sp = &is->subpq[is->subpq_windex];
1822
1823        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1824            this packet, if any */
1825         pts = 0;
1826         if (pkt->pts != AV_NOPTS_VALUE)
1827             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1828
1829         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1830                                     &sp->sub, &got_subtitle,
1831                                     pkt);
1832 //            if (len1 < 0)
1833 //                break;
1834         if (got_subtitle && sp->sub.format == 0) {
1835             sp->pts = pts;
1836
1837             for (i = 0; i < sp->sub.num_rects; i++)
1838             {
1839                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1840                 {
1841                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1842                     y = RGB_TO_Y_CCIR(r, g, b);
1843                     u = RGB_TO_U_CCIR(r, g, b, 0);
1844                     v = RGB_TO_V_CCIR(r, g, b, 0);
1845                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1846                 }
1847             }
1848
1849             /* now we can update the picture count */
1850             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1851                 is->subpq_windex = 0;
1852             SDL_LockMutex(is->subpq_mutex);
1853             is->subpq_size++;
1854             SDL_UnlockMutex(is->subpq_mutex);
1855         }
1856         av_free_packet(pkt);
1857 //        if (step)
1858 //            if (cur_stream)
1859 //                stream_pause(cur_stream);
1860     }
1861  the_end:
1862     return 0;
1863 }
1864
1865 /* copy samples for viewing in editor window */
1866 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1867 {
1868     int size, len, channels;
1869
1870     channels = is->audio_st->codec->channels;
1871
1872     size = samples_size / sizeof(short);
1873     while (size > 0) {
1874         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1875         if (len > size)
1876             len = size;
1877         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1878         samples += len;
1879         is->sample_array_index += len;
1880         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1881             is->sample_array_index = 0;
1882         size -= len;
1883     }
1884 }
1885
1886 /* return the new audio buffer size (samples can be added or deleted
1887    to get better sync if video or external master clock) */
1888 static int synchronize_audio(VideoState *is, short *samples,
1889                              int samples_size1, double pts)
1890 {
1891     int n, samples_size;
1892     double ref_clock;
1893
1894     n = 2 * is->audio_st->codec->channels;
1895     samples_size = samples_size1;
1896
1897     /* if not master, then we try to remove or add samples to correct the clock */
1898     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1899          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1900         double diff, avg_diff;
1901         int wanted_size, min_size, max_size, nb_samples;
1902
1903         ref_clock = get_master_clock(is);
1904         diff = get_audio_clock(is) - ref_clock;
1905
1906         if (diff < AV_NOSYNC_THRESHOLD) {
1907             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1908             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1909                 /* not enough measures to have a correct estimate */
1910                 is->audio_diff_avg_count++;
1911             } else {
1912                 /* estimate the A-V difference */
1913                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1914
1915                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1916                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1917                     nb_samples = samples_size / n;
1918
1919                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1920                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1921                     if (wanted_size < min_size)
1922                         wanted_size = min_size;
1923                     else if (wanted_size > max_size)
1924                         wanted_size = max_size;
1925
1926                     /* add or remove samples to correction the synchro */
1927                     if (wanted_size < samples_size) {
1928                         /* remove samples */
1929                         samples_size = wanted_size;
1930                     } else if (wanted_size > samples_size) {
1931                         uint8_t *samples_end, *q;
1932                         int nb;
1933
1934                         /* add samples */
1935                         nb = (samples_size - wanted_size);
1936                         samples_end = (uint8_t *)samples + samples_size - n;
1937                         q = samples_end + n;
1938                         while (nb > 0) {
1939                             memcpy(q, samples_end, n);
1940                             q += n;
1941                             nb -= n;
1942                         }
1943                         samples_size = wanted_size;
1944                     }
1945                 }
1946 #if 0
1947                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1948                        diff, avg_diff, samples_size - samples_size1,
1949                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1950 #endif
1951             }
1952         } else {
1953             /* too big difference : may be initial PTS errors, so
1954                reset A-V filter */
1955             is->audio_diff_avg_count = 0;
1956             is->audio_diff_cum = 0;
1957         }
1958     }
1959
1960     return samples_size;
1961 }
1962
1963 /* decode one audio frame and returns its uncompressed size */
1964 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1965 {
1966     AVPacket *pkt_temp = &is->audio_pkt_temp;
1967     AVPacket *pkt = &is->audio_pkt;
1968     AVCodecContext *dec= is->audio_st->codec;
1969     int n, len1, data_size;
1970     double pts;
1971
1972     for(;;) {
1973         /* NOTE: the audio packet can contain several frames */
1974         while (pkt_temp->size > 0) {
1975             data_size = sizeof(is->audio_buf1);
1976             len1 = avcodec_decode_audio3(dec,
1977                                         (int16_t *)is->audio_buf1, &data_size,
1978                                         pkt_temp);
1979             if (len1 < 0) {
1980                 /* if error, we skip the frame */
1981                 pkt_temp->size = 0;
1982                 break;
1983             }
1984
1985             pkt_temp->data += len1;
1986             pkt_temp->size -= len1;
1987             if (data_size <= 0)
1988                 continue;
1989
1990             if (dec->sample_fmt != is->audio_src_fmt) {
1991                 if (is->reformat_ctx)
1992                     av_audio_convert_free(is->reformat_ctx);
1993                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1994                                                          dec->sample_fmt, 1, NULL, 0);
1995                 if (!is->reformat_ctx) {
1996                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1997                         avcodec_get_sample_fmt_name(dec->sample_fmt),
1998                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1999                         break;
2000                 }
2001                 is->audio_src_fmt= dec->sample_fmt;
2002             }
2003
2004             if (is->reformat_ctx) {
2005                 const void *ibuf[6]= {is->audio_buf1};
2006                 void *obuf[6]= {is->audio_buf2};
2007                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2008                 int ostride[6]= {2};
2009                 int len= data_size/istride[0];
2010                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2011                     printf("av_audio_convert() failed\n");
2012                     break;
2013                 }
2014                 is->audio_buf= is->audio_buf2;
2015                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2016                           remove this legacy cruft */
2017                 data_size= len*2;
2018             }else{
2019                 is->audio_buf= is->audio_buf1;
2020             }
2021
2022             /* if no pts, then compute it */
2023             pts = is->audio_clock;
2024             *pts_ptr = pts;
2025             n = 2 * dec->channels;
2026             is->audio_clock += (double)data_size /
2027                 (double)(n * dec->sample_rate);
2028 #if defined(DEBUG_SYNC)
2029             {
2030                 static double last_clock;
2031                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2032                        is->audio_clock - last_clock,
2033                        is->audio_clock, pts);
2034                 last_clock = is->audio_clock;
2035             }
2036 #endif
2037             return data_size;
2038         }
2039
2040         /* free the current packet */
2041         if (pkt->data)
2042             av_free_packet(pkt);
2043
2044         if (is->paused || is->audioq.abort_request) {
2045             return -1;
2046         }
2047
2048         /* read next packet */
2049         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2050             return -1;
2051         if(pkt->data == flush_pkt.data){
2052             avcodec_flush_buffers(dec);
2053             continue;
2054         }
2055
2056         pkt_temp->data = pkt->data;
2057         pkt_temp->size = pkt->size;
2058
2059         /* if update the audio clock with the pts */
2060         if (pkt->pts != AV_NOPTS_VALUE) {
2061             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2062         }
2063     }
2064 }
2065
2066 /* get the current audio output buffer size, in samples. With SDL, we
2067    cannot have a precise information */
2068 static int audio_write_get_buf_size(VideoState *is)
2069 {
2070     return is->audio_buf_size - is->audio_buf_index;
2071 }
2072
2073
2074 /* prepare a new audio buffer */
2075 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2076 {
2077     VideoState *is = opaque;
2078     int audio_size, len1;
2079     double pts;
2080
2081     audio_callback_time = av_gettime();
2082
2083     while (len > 0) {
2084         if (is->audio_buf_index >= is->audio_buf_size) {
2085            audio_size = audio_decode_frame(is, &pts);
2086            if (audio_size < 0) {
2087                 /* if error, just output silence */
2088                is->audio_buf = is->audio_buf1;
2089                is->audio_buf_size = 1024;
2090                memset(is->audio_buf, 0, is->audio_buf_size);
2091            } else {
2092                if (is->show_audio)
2093                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2094                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2095                                               pts);
2096                is->audio_buf_size = audio_size;
2097            }
2098            is->audio_buf_index = 0;
2099         }
2100         len1 = is->audio_buf_size - is->audio_buf_index;
2101         if (len1 > len)
2102             len1 = len;
2103         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2104         len -= len1;
2105         stream += len1;
2106         is->audio_buf_index += len1;
2107     }
2108 }
2109
2110 /* open a given stream. Return 0 if OK */
2111 static int stream_component_open(VideoState *is, int stream_index)
2112 {
2113     AVFormatContext *ic = is->ic;
2114     AVCodecContext *avctx;
2115     AVCodec *codec;
2116     SDL_AudioSpec wanted_spec, spec;
2117
2118     if (stream_index < 0 || stream_index >= ic->nb_streams)
2119         return -1;
2120     avctx = ic->streams[stream_index]->codec;
2121
2122     /* prepare audio output */
2123     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2124         if (avctx->channels > 0) {
2125             avctx->request_channels = FFMIN(2, avctx->channels);
2126         } else {
2127             avctx->request_channels = 2;
2128         }
2129     }
2130
2131     codec = avcodec_find_decoder(avctx->codec_id);
2132     avctx->debug_mv = debug_mv;
2133     avctx->debug = debug;
2134     avctx->workaround_bugs = workaround_bugs;
2135     avctx->lowres = lowres;
2136     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2137     avctx->idct_algo= idct;
2138     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2139     avctx->skip_frame= skip_frame;
2140     avctx->skip_idct= skip_idct;
2141     avctx->skip_loop_filter= skip_loop_filter;
2142     avctx->error_recognition= error_recognition;
2143     avctx->error_concealment= error_concealment;
2144     avcodec_thread_init(avctx, thread_count);
2145
2146     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2147
2148     if (!codec ||
2149         avcodec_open(avctx, codec) < 0)
2150         return -1;
2151
2152     /* prepare audio output */
2153     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2154         wanted_spec.freq = avctx->sample_rate;
2155         wanted_spec.format = AUDIO_S16SYS;
2156         wanted_spec.channels = avctx->channels;
2157         wanted_spec.silence = 0;
2158         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2159         wanted_spec.callback = sdl_audio_callback;
2160         wanted_spec.userdata = is;
2161         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2162             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2163             return -1;
2164         }
2165         is->audio_hw_buf_size = spec.size;
2166         is->audio_src_fmt= SAMPLE_FMT_S16;
2167     }
2168
2169     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2170     switch(avctx->codec_type) {
2171     case CODEC_TYPE_AUDIO:
2172         is->audio_stream = stream_index;
2173         is->audio_st = ic->streams[stream_index];
2174         is->audio_buf_size = 0;
2175         is->audio_buf_index = 0;
2176
2177         /* init averaging filter */
2178         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2179         is->audio_diff_avg_count = 0;
2180         /* since we do not have a precise anough audio fifo fullness,
2181            we correct audio sync only if larger than this threshold */
2182         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2183
2184         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2185         packet_queue_init(&is->audioq);
2186         SDL_PauseAudio(0);
2187         break;
2188     case CODEC_TYPE_VIDEO:
2189         is->video_stream = stream_index;
2190         is->video_st = ic->streams[stream_index];
2191
2192 //        is->video_current_pts_time = av_gettime();
2193
2194         packet_queue_init(&is->videoq);
2195         is->video_tid = SDL_CreateThread(video_thread, is);
2196         break;
2197     case CODEC_TYPE_SUBTITLE:
2198         is->subtitle_stream = stream_index;
2199         is->subtitle_st = ic->streams[stream_index];
2200         packet_queue_init(&is->subtitleq);
2201
2202         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2203         break;
2204     default:
2205         break;
2206     }
2207     return 0;
2208 }
2209
2210 static void stream_component_close(VideoState *is, int stream_index)
2211 {
2212     AVFormatContext *ic = is->ic;
2213     AVCodecContext *avctx;
2214
2215     if (stream_index < 0 || stream_index >= ic->nb_streams)
2216         return;
2217     avctx = ic->streams[stream_index]->codec;
2218
2219     switch(avctx->codec_type) {
2220     case CODEC_TYPE_AUDIO:
2221         packet_queue_abort(&is->audioq);
2222
2223         SDL_CloseAudio();
2224
2225         packet_queue_end(&is->audioq);
2226         if (is->reformat_ctx)
2227             av_audio_convert_free(is->reformat_ctx);
2228         is->reformat_ctx = NULL;
2229         break;
2230     case CODEC_TYPE_VIDEO:
2231         packet_queue_abort(&is->videoq);
2232
2233         /* note: we also signal this mutex to make sure we deblock the
2234            video thread in all cases */
2235         SDL_LockMutex(is->pictq_mutex);
2236         SDL_CondSignal(is->pictq_cond);
2237         SDL_UnlockMutex(is->pictq_mutex);
2238
2239         SDL_WaitThread(is->video_tid, NULL);
2240
2241         packet_queue_end(&is->videoq);
2242         break;
2243     case CODEC_TYPE_SUBTITLE:
2244         packet_queue_abort(&is->subtitleq);
2245
2246         /* note: we also signal this mutex to make sure we deblock the
2247            video thread in all cases */
2248         SDL_LockMutex(is->subpq_mutex);
2249         is->subtitle_stream_changed = 1;
2250
2251         SDL_CondSignal(is->subpq_cond);
2252         SDL_UnlockMutex(is->subpq_mutex);
2253
2254         SDL_WaitThread(is->subtitle_tid, NULL);
2255
2256         packet_queue_end(&is->subtitleq);
2257         break;
2258     default:
2259         break;
2260     }
2261
2262     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2263     avcodec_close(avctx);
2264     switch(avctx->codec_type) {
2265     case CODEC_TYPE_AUDIO:
2266         is->audio_st = NULL;
2267         is->audio_stream = -1;
2268         break;
2269     case CODEC_TYPE_VIDEO:
2270         is->video_st = NULL;
2271         is->video_stream = -1;
2272         break;
2273     case CODEC_TYPE_SUBTITLE:
2274         is->subtitle_st = NULL;
2275         is->subtitle_stream = -1;
2276         break;
2277     default:
2278         break;
2279     }
2280 }
2281
2282 /* since we have only one decoding thread, we can use a global
2283    variable instead of a thread local variable */
2284 static VideoState *global_video_state;
2285
2286 static int decode_interrupt_cb(void)
2287 {
2288     return (global_video_state && global_video_state->abort_request);
2289 }
2290
2291 /* this thread gets the stream from the disk or the network */
2292 static int decode_thread(void *arg)
2293 {
2294     VideoState *is = arg;
2295     AVFormatContext *ic;
2296     int err, i, ret;
2297     int st_index[CODEC_TYPE_NB];
2298     int st_count[CODEC_TYPE_NB]={0};
2299     int st_best_packet_count[CODEC_TYPE_NB];
2300     AVPacket pkt1, *pkt = &pkt1;
2301     AVFormatParameters params, *ap = &params;
2302     int eof=0;
2303
2304     ic = avformat_alloc_context();
2305
2306     memset(st_index, -1, sizeof(st_index));
2307     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2308     is->video_stream = -1;
2309     is->audio_stream = -1;
2310     is->subtitle_stream = -1;
2311
2312     global_video_state = is;
2313     url_set_interrupt_cb(decode_interrupt_cb);
2314
2315     memset(ap, 0, sizeof(*ap));
2316
2317     ap->prealloced_context = 1;
2318     ap->width = frame_width;
2319     ap->height= frame_height;
2320     ap->time_base= (AVRational){1, 25};
2321     ap->pix_fmt = frame_pix_fmt;
2322
2323     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2324
2325     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2326     if (err < 0) {
2327         print_error(is->filename, err);
2328         ret = -1;
2329         goto fail;
2330     }
2331     is->ic = ic;
2332
2333     if(genpts)
2334         ic->flags |= AVFMT_FLAG_GENPTS;
2335
2336     err = av_find_stream_info(ic);
2337     if (err < 0) {
2338         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2339         ret = -1;
2340         goto fail;
2341     }
2342     if(ic->pb)
2343         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2344
2345     if(seek_by_bytes<0)
2346         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2347
2348     /* if seeking requested, we execute it */
2349     if (start_time != AV_NOPTS_VALUE) {
2350         int64_t timestamp;
2351
2352         timestamp = start_time;
2353         /* add the stream start time */
2354         if (ic->start_time != AV_NOPTS_VALUE)
2355             timestamp += ic->start_time;
2356         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2357         if (ret < 0) {
2358             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2359                     is->filename, (double)timestamp / AV_TIME_BASE);
2360         }
2361     }
2362
2363     for(i = 0; i < ic->nb_streams; i++) {
2364         AVStream *st= ic->streams[i];
2365         AVCodecContext *avctx = st->codec;
2366         ic->streams[i]->discard = AVDISCARD_ALL;
2367         if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2368             continue;
2369         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2370             continue;
2371
2372         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2373             continue;
2374         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2375
2376         switch(avctx->codec_type) {
2377         case CODEC_TYPE_AUDIO:
2378             if (!audio_disable)
2379                 st_index[CODEC_TYPE_AUDIO] = i;
2380             break;
2381         case CODEC_TYPE_VIDEO:
2382         case CODEC_TYPE_SUBTITLE:
2383             if (!video_disable)
2384                 st_index[avctx->codec_type] = i;
2385             break;
2386         default:
2387             break;
2388         }
2389     }
2390     if (show_status) {
2391         dump_format(ic, 0, is->filename, 0);
2392     }
2393
2394     /* open the streams */
2395     if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2396         stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2397     }
2398
2399     ret=-1;
2400     if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2401         ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2402     }
2403     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2404     if(ret<0) {
2405         if (!display_disable)
2406             is->show_audio = 2;
2407     }
2408
2409     if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2410         stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2411     }
2412
2413     if (is->video_stream < 0 && is->audio_stream < 0) {
2414         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2415         ret = -1;
2416         goto fail;
2417     }
2418
2419     for(;;) {
2420         if (is->abort_request)
2421             break;
2422         if (is->paused != is->last_paused) {
2423             is->last_paused = is->paused;
2424             if (is->paused)
2425                 is->read_pause_return= av_read_pause(ic);
2426             else
2427                 av_read_play(ic);
2428         }
2429 #if CONFIG_RTSP_DEMUXER
2430         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2431             /* wait 10 ms to avoid trying to get another packet */
2432             /* XXX: horrible */
2433             SDL_Delay(10);
2434             continue;
2435         }
2436 #endif
2437         if (is->seek_req) {
2438             int64_t seek_target= is->seek_pos;
2439             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2440             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2441 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2442 //      of the seek_pos/seek_rel variables
2443
2444             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2445             if (ret < 0) {
2446                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2447             }else{
2448                 if (is->audio_stream >= 0) {
2449                     packet_queue_flush(&is->audioq);
2450                     packet_queue_put(&is->audioq, &flush_pkt);
2451                 }
2452                 if (is->subtitle_stream >= 0) {
2453                     packet_queue_flush(&is->subtitleq);
2454                     packet_queue_put(&is->subtitleq, &flush_pkt);
2455                 }
2456                 if (is->video_stream >= 0) {
2457                     packet_queue_flush(&is->videoq);
2458                     packet_queue_put(&is->videoq, &flush_pkt);
2459                 }
2460             }
2461             is->seek_req = 0;
2462             eof= 0;
2463         }
2464
2465         /* if the queue are full, no need to read more */
2466         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2467             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2468                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2469                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2470             /* wait 10 ms */
2471             SDL_Delay(10);
2472             continue;
2473         }
2474         if(url_feof(ic->pb) || eof) {
2475             if(is->video_stream >= 0){
2476                 av_init_packet(pkt);
2477                 pkt->data=NULL;
2478                 pkt->size=0;
2479                 pkt->stream_index= is->video_stream;
2480                 packet_queue_put(&is->videoq, pkt);
2481             }
2482             SDL_Delay(10);
2483             if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2484                 ret=AVERROR_EOF;
2485                 goto fail;
2486             }
2487             continue;
2488         }
2489         ret = av_read_frame(ic, pkt);
2490         if (ret < 0) {
2491             if (ret == AVERROR_EOF)
2492                 eof=1;
2493             if (url_ferror(ic->pb))
2494                 break;
2495             SDL_Delay(100); /* wait for user event */
2496             continue;
2497         }
2498         if (pkt->stream_index == is->audio_stream) {
2499             packet_queue_put(&is->audioq, pkt);
2500         } else if (pkt->stream_index == is->video_stream) {
2501             packet_queue_put(&is->videoq, pkt);
2502         } else if (pkt->stream_index == is->subtitle_stream) {
2503             packet_queue_put(&is->subtitleq, pkt);
2504         } else {
2505             av_free_packet(pkt);
2506         }
2507     }
2508     /* wait until the end */
2509     while (!is->abort_request) {
2510         SDL_Delay(100);
2511     }
2512
2513     ret = 0;
2514  fail:
2515     /* disable interrupting */
2516     global_video_state = NULL;
2517
2518     /* close each stream */
2519     if (is->audio_stream >= 0)
2520         stream_component_close(is, is->audio_stream);
2521     if (is->video_stream >= 0)
2522         stream_component_close(is, is->video_stream);
2523     if (is->subtitle_stream >= 0)
2524         stream_component_close(is, is->subtitle_stream);
2525     if (is->ic) {
2526         av_close_input_file(is->ic);
2527         is->ic = NULL; /* safety */
2528     }
2529     url_set_interrupt_cb(NULL);
2530
2531     if (ret != 0) {
2532         SDL_Event event;
2533
2534         event.type = FF_QUIT_EVENT;
2535         event.user.data1 = is;
2536         SDL_PushEvent(&event);
2537     }
2538     return 0;
2539 }
2540
2541 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2542 {
2543     VideoState *is;
2544
2545     is = av_mallocz(sizeof(VideoState));
2546     if (!is)
2547         return NULL;
2548     av_strlcpy(is->filename, filename, sizeof(is->filename));
2549     is->iformat = iformat;
2550     is->ytop = 0;
2551     is->xleft = 0;
2552
2553     /* start video display */
2554     is->pictq_mutex = SDL_CreateMutex();
2555     is->pictq_cond = SDL_CreateCond();
2556
2557     is->subpq_mutex = SDL_CreateMutex();
2558     is->subpq_cond = SDL_CreateCond();
2559
2560     is->av_sync_type = av_sync_type;
2561     is->parse_tid = SDL_CreateThread(decode_thread, is);
2562     if (!is->parse_tid) {
2563         av_free(is);
2564         return NULL;
2565     }
2566     return is;
2567 }
2568
2569 static void stream_close(VideoState *is)
2570 {
2571     VideoPicture *vp;
2572     int i;
2573     /* XXX: use a special url_shutdown call to abort parse cleanly */
2574     is->abort_request = 1;
2575     SDL_WaitThread(is->parse_tid, NULL);
2576     SDL_WaitThread(is->refresh_tid, NULL);
2577
2578     /* free all pictures */
2579     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2580         vp = &is->pictq[i];
2581 #if CONFIG_AVFILTER
2582         if (vp->picref) {
2583             avfilter_unref_pic(vp->picref);
2584             vp->picref = NULL;
2585         }
2586 #endif
2587         if (vp->bmp) {
2588             SDL_FreeYUVOverlay(vp->bmp);
2589             vp->bmp = NULL;
2590         }
2591     }
2592     SDL_DestroyMutex(is->pictq_mutex);
2593     SDL_DestroyCond(is->pictq_cond);
2594     SDL_DestroyMutex(is->subpq_mutex);
2595     SDL_DestroyCond(is->subpq_cond);
2596 #if !CONFIG_AVFILTER
2597     if (is->img_convert_ctx)
2598         sws_freeContext(is->img_convert_ctx);
2599 #endif
2600     av_free(is);
2601 }
2602
2603 static void stream_cycle_channel(VideoState *is, int codec_type)
2604 {
2605     AVFormatContext *ic = is->ic;
2606     int start_index, stream_index;
2607     AVStream *st;
2608
2609     if (codec_type == CODEC_TYPE_VIDEO)
2610         start_index = is->video_stream;
2611     else if (codec_type == CODEC_TYPE_AUDIO)
2612         start_index = is->audio_stream;
2613     else
2614         start_index = is->subtitle_stream;
2615     if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2616         return;
2617     stream_index = start_index;
2618     for(;;) {
2619         if (++stream_index >= is->ic->nb_streams)
2620         {
2621             if (codec_type == CODEC_TYPE_SUBTITLE)
2622             {
2623                 stream_index = -1;
2624                 goto the_end;
2625             } else
2626                 stream_index = 0;
2627         }
2628         if (stream_index == start_index)
2629             return;
2630         st = ic->streams[stream_index];
2631         if (st->codec->codec_type == codec_type) {
2632             /* check that parameters are OK */
2633             switch(codec_type) {
2634             case CODEC_TYPE_AUDIO:
2635                 if (st->codec->sample_rate != 0 &&
2636                     st->codec->channels != 0)
2637                     goto the_end;
2638                 break;
2639             case CODEC_TYPE_VIDEO:
2640             case CODEC_TYPE_SUBTITLE:
2641                 goto the_end;
2642             default:
2643                 break;
2644             }
2645         }
2646     }
2647  the_end:
2648     stream_component_close(is, start_index);
2649     stream_component_open(is, stream_index);
2650 }
2651
2652
2653 static void toggle_full_screen(void)
2654 {
2655     is_full_screen = !is_full_screen;
2656     if (!fs_screen_width) {
2657         /* use default SDL method */
2658 //        SDL_WM_ToggleFullScreen(screen);
2659     }
2660     video_open(cur_stream);
2661 }
2662
2663 static void toggle_pause(void)
2664 {
2665     if (cur_stream)
2666         stream_pause(cur_stream);
2667     step = 0;
2668 }
2669
2670 static void step_to_next_frame(void)
2671 {
2672     if (cur_stream) {
2673         /* if the stream is paused unpause it, then step */
2674         if (cur_stream->paused)
2675             stream_pause(cur_stream);
2676     }
2677     step = 1;
2678 }
2679
2680 static void do_exit(void)
2681 {
2682     int i;
2683     if (cur_stream) {
2684         stream_close(cur_stream);
2685         cur_stream = NULL;
2686     }
2687     for (i = 0; i < CODEC_TYPE_NB; i++)
2688         av_free(avcodec_opts[i]);
2689     av_free(avformat_opts);
2690     av_free(sws_opts);
2691 #if CONFIG_AVFILTER
2692     avfilter_uninit();
2693 #endif
2694     if (show_status)
2695         printf("\n");
2696     SDL_Quit();
2697     exit(0);
2698 }
2699
2700 static void toggle_audio_display(void)
2701 {
2702     if (cur_stream) {
2703         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2704         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2705         fill_rectangle(screen,
2706                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2707                     bgcolor);
2708         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2709     }
2710 }
2711
2712 /* handle an event sent by the GUI */
2713 static void event_loop(void)
2714 {
2715     SDL_Event event;
2716     double incr, pos, frac;
2717
2718     for(;;) {
2719         double x;
2720         SDL_WaitEvent(&event);
2721         switch(event.type) {
2722         case SDL_KEYDOWN:
2723             switch(event.key.keysym.sym) {
2724             case SDLK_ESCAPE:
2725             case SDLK_q:
2726                 do_exit();
2727                 break;
2728             case SDLK_f:
2729                 toggle_full_screen();
2730                 break;
2731             case SDLK_p:
2732             case SDLK_SPACE:
2733                 toggle_pause();
2734                 break;
2735             case SDLK_s: //S: Step to next frame
2736                 step_to_next_frame();
2737                 break;
2738             case SDLK_a:
2739                 if (cur_stream)
2740                     stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2741                 break;
2742             case SDLK_v:
2743                 if (cur_stream)
2744                     stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2745                 break;
2746             case SDLK_t:
2747                 if (cur_stream)
2748                     stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2749                 break;
2750             case SDLK_w:
2751                 toggle_audio_display();
2752                 break;
2753             case SDLK_LEFT:
2754                 incr = -10.0;
2755                 goto do_seek;
2756             case SDLK_RIGHT:
2757                 incr = 10.0;
2758                 goto do_seek;
2759             case SDLK_UP:
2760                 incr = 60.0;
2761                 goto do_seek;
2762             case SDLK_DOWN:
2763                 incr = -60.0;
2764             do_seek:
2765                 if (cur_stream) {
2766                     if (seek_by_bytes) {
2767                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2768                             pos= cur_stream->video_current_pos;
2769                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2770                             pos= cur_stream->audio_pkt.pos;
2771                         }else
2772                             pos = url_ftell(cur_stream->ic->pb);
2773                         if (cur_stream->ic->bit_rate)
2774                             incr *= cur_stream->ic->bit_rate / 8.0;
2775                         else
2776                             incr *= 180000.0;
2777                         pos += incr;
2778                         stream_seek(cur_stream, pos, incr, 1);
2779                     } else {
2780                         pos = get_master_clock(cur_stream);
2781                         pos += incr;
2782                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2783                     }
2784                 }
2785                 break;
2786             default:
2787                 break;
2788             }
2789             break;
2790         case SDL_MOUSEBUTTONDOWN:
2791         case SDL_MOUSEMOTION:
2792             if(event.type ==SDL_MOUSEBUTTONDOWN){
2793                 x= event.button.x;
2794             }else{
2795                 if(event.motion.state != SDL_PRESSED)
2796                     break;
2797                 x= event.motion.x;
2798             }
2799             if (cur_stream) {
2800                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2801                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2802                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2803                 }else{
2804                     int64_t ts;
2805                     int ns, hh, mm, ss;
2806                     int tns, thh, tmm, tss;
2807                     tns = cur_stream->ic->duration/1000000LL;
2808                     thh = tns/3600;
2809                     tmm = (tns%3600)/60;
2810                     tss = (tns%60);
2811                     frac = x/cur_stream->width;
2812                     ns = frac*tns;
2813                     hh = ns/3600;
2814                     mm = (ns%3600)/60;
2815                     ss = (ns%60);
2816                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2817                             hh, mm, ss, thh, tmm, tss);
2818                     ts = frac*cur_stream->ic->duration;
2819                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2820                         ts += cur_stream->ic->start_time;
2821                     stream_seek(cur_stream, ts, 0, 0);
2822                 }
2823             }
2824             break;
2825         case SDL_VIDEORESIZE:
2826             if (cur_stream) {
2827                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2828                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2829                 screen_width = cur_stream->width = event.resize.w;
2830                 screen_height= cur_stream->height= event.resize.h;
2831             }
2832             break;
2833         case SDL_QUIT:
2834         case FF_QUIT_EVENT:
2835             do_exit();
2836             break;
2837         case FF_ALLOC_EVENT:
2838             video_open(event.user.data1);
2839             alloc_picture(event.user.data1);
2840             break;
2841         case FF_REFRESH_EVENT:
2842             video_refresh_timer(event.user.data1);
2843             cur_stream->refresh=0;
2844             break;
2845         default:
2846             break;
2847         }
2848     }
2849 }
2850
2851 static void opt_frame_size(const char *arg)
2852 {
2853     if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2854         fprintf(stderr, "Incorrect frame size\n");
2855         exit(1);
2856     }
2857     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2858         fprintf(stderr, "Frame size must be a multiple of 2\n");
2859         exit(1);
2860     }
2861 }
2862
2863 static int opt_width(const char *opt, const char *arg)
2864 {
2865     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2866     return 0;
2867 }
2868
2869 static int opt_height(const char *opt, const char *arg)
2870 {
2871     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2872     return 0;
2873 }
2874
2875 static void opt_format(const char *arg)
2876 {
2877     file_iformat = av_find_input_format(arg);
2878     if (!file_iformat) {
2879         fprintf(stderr, "Unknown input format: %s\n", arg);
2880         exit(1);
2881     }
2882 }
2883
2884 static void opt_frame_pix_fmt(const char *arg)
2885 {
2886     frame_pix_fmt = av_get_pix_fmt(arg);
2887 }
2888
2889 static int opt_sync(const char *opt, const char *arg)
2890 {
2891     if (!strcmp(arg, "audio"))
2892         av_sync_type = AV_SYNC_AUDIO_MASTER;
2893     else if (!strcmp(arg, "video"))
2894         av_sync_type = AV_SYNC_VIDEO_MASTER;
2895     else if (!strcmp(arg, "ext"))
2896         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2897     else {
2898         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2899         exit(1);
2900     }
2901     return 0;
2902 }
2903
2904 static int opt_seek(const char *opt, const char *arg)
2905 {
2906     start_time = parse_time_or_die(opt, arg, 1);
2907     return 0;
2908 }
2909
2910 static int opt_debug(const char *opt, const char *arg)
2911 {
2912     av_log_set_level(99);
2913     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2914     return 0;
2915 }
2916
2917 static int opt_vismv(const char *opt, const char *arg)
2918 {
2919     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2920     return 0;
2921 }
2922
2923 static int opt_thread_count(const char *opt, const char *arg)
2924 {
2925     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2926 #if !HAVE_THREADS
2927     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2928 #endif
2929     return 0;
2930 }
2931
2932 static const OptionDef options[] = {
2933 #include "cmdutils_common_opts.h"
2934     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2935     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2936     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2937     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2938     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2939     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2940     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2941     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2942     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2943     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2944     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2945     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2946     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2947     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2948     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2949     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2950     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2951     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2952     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2953     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2954     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2955     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2956     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2957     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2958     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2959     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2960     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2961     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2962     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2963     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2964     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2965     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2966     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2967 #if CONFIG_AVFILTER
2968     { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2969 #endif
2970     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2971     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2972     { NULL, },
2973 };
2974
2975 static void show_usage(void)
2976 {
2977     printf("Simple media player\n");
2978     printf("usage: ffplay [options] input_file\n");
2979     printf("\n");
2980 }
2981
2982 static void show_help(void)
2983 {
2984     show_usage();
2985     show_help_options(options, "Main options:\n",
2986                       OPT_EXPERT, 0);
2987     show_help_options(options, "\nAdvanced options:\n",
2988                       OPT_EXPERT, OPT_EXPERT);
2989     printf("\nWhile playing:\n"
2990            "q, ESC              quit\n"
2991            "f                   toggle full screen\n"
2992            "p, SPC              pause\n"
2993            "a                   cycle audio channel\n"
2994            "v                   cycle video channel\n"
2995            "t                   cycle subtitle channel\n"
2996            "w                   show audio waves\n"
2997            "left/right          seek backward/forward 10 seconds\n"
2998            "down/up             seek backward/forward 1 minute\n"
2999            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3000            );
3001 }
3002
3003 static void opt_input_file(const char *filename)
3004 {
3005     if (input_filename) {
3006         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3007                 filename, input_filename);
3008         exit(1);
3009     }
3010     if (!strcmp(filename, "-"))
3011         filename = "pipe:";
3012     input_filename = filename;
3013 }
3014
3015 /* Called from the main */
3016 int main(int argc, char **argv)
3017 {
3018     int flags, i;
3019
3020     /* register all codecs, demux and protocols */
3021     avcodec_register_all();
3022     avdevice_register_all();
3023 #if CONFIG_AVFILTER
3024     avfilter_register_all();
3025 #endif
3026     av_register_all();
3027
3028     for(i=0; i<CODEC_TYPE_NB; i++){
3029         avcodec_opts[i]= avcodec_alloc_context2(i);
3030     }
3031     avformat_opts = avformat_alloc_context();
3032 #if !CONFIG_AVFILTER
3033     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3034 #endif
3035
3036     show_banner();
3037
3038     parse_options(argc, argv, options, opt_input_file);
3039
3040     if (!input_filename) {
3041         show_usage();
3042         fprintf(stderr, "An input file must be specified\n");
3043         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3044         exit(1);
3045     }
3046
3047     if (display_disable) {
3048         video_disable = 1;
3049     }
3050     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3051 #if !defined(__MINGW32__) && !defined(__APPLE__)
3052     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3053 #endif
3054     if (SDL_Init (flags)) {
3055         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3056         exit(1);
3057     }
3058
3059     if (!display_disable) {
3060 #if HAVE_SDL_VIDEO_SIZE
3061         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3062         fs_screen_width = vi->current_w;
3063         fs_screen_height = vi->current_h;
3064 #endif
3065     }
3066
3067     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3068     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3069     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3070
3071     av_init_packet(&flush_pkt);
3072     flush_pkt.data= "FLUSH";
3073
3074     cur_stream = stream_open(input_filename, file_iformat);
3075
3076     event_loop();
3077
3078     /* never returns */
3079
3080     return 0;
3081 }