]> git.sesse.net Git - ffmpeg/blob - ffplay.c
vp3: Use memset to clear the fragment array
[ffmpeg] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum SampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[CODEC_TYPE_NB]={
237     [CODEC_TYPE_AUDIO]=-1,
238     [CODEC_TYPE_VIDEO]=-1,
239     [CODEC_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int framedrop=1;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for(;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #if 0
408 /* draw only the border of a rectangle */
409 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
410 {
411     int w1, w2, h1, h2;
412
413     /* fill the background */
414     w1 = x;
415     if (w1 < 0)
416         w1 = 0;
417     w2 = s->width - (x + w);
418     if (w2 < 0)
419         w2 = 0;
420     h1 = y;
421     if (h1 < 0)
422         h1 = 0;
423     h2 = s->height - (y + h);
424     if (h2 < 0)
425         h2 = 0;
426     fill_rectangle(screen,
427                    s->xleft, s->ytop,
428                    w1, s->height,
429                    color);
430     fill_rectangle(screen,
431                    s->xleft + s->width - w2, s->ytop,
432                    w2, s->height,
433                    color);
434     fill_rectangle(screen,
435                    s->xleft + w1, s->ytop,
436                    s->width - w1 - w2, h1,
437                    color);
438     fill_rectangle(screen,
439                    s->xleft + w1, s->ytop + s->height - h2,
440                    s->width - w1 - w2, h2,
441                    color);
442 }
443 #endif
444
445 #define ALPHA_BLEND(a, oldp, newp, s)\
446 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
447
448 #define RGBA_IN(r, g, b, a, s)\
449 {\
450     unsigned int v = ((const uint32_t *)(s))[0];\
451     a = (v >> 24) & 0xff;\
452     r = (v >> 16) & 0xff;\
453     g = (v >> 8) & 0xff;\
454     b = v & 0xff;\
455 }
456
457 #define YUVA_IN(y, u, v, a, s, pal)\
458 {\
459     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
460     a = (val >> 24) & 0xff;\
461     y = (val >> 16) & 0xff;\
462     u = (val >> 8) & 0xff;\
463     v = val & 0xff;\
464 }
465
466 #define YUVA_OUT(d, y, u, v, a)\
467 {\
468     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
469 }
470
471
472 #define BPP 1
473
474 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
475 {
476     int wrap, wrap3, width2, skip2;
477     int y, u, v, a, u1, v1, a1, w, h;
478     uint8_t *lum, *cb, *cr;
479     const uint8_t *p;
480     const uint32_t *pal;
481     int dstx, dsty, dstw, dsth;
482
483     dstw = av_clip(rect->w, 0, imgw);
484     dsth = av_clip(rect->h, 0, imgh);
485     dstx = av_clip(rect->x, 0, imgw - dstw);
486     dsty = av_clip(rect->y, 0, imgh - dsth);
487     lum = dst->data[0] + dsty * dst->linesize[0];
488     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
489     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
490
491     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
492     skip2 = dstx >> 1;
493     wrap = dst->linesize[0];
494     wrap3 = rect->pict.linesize[0];
495     p = rect->pict.data[0];
496     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
497
498     if (dsty & 1) {
499         lum += dstx;
500         cb += skip2;
501         cr += skip2;
502
503         if (dstx & 1) {
504             YUVA_IN(y, u, v, a, p, pal);
505             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
506             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
507             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
508             cb++;
509             cr++;
510             lum++;
511             p += BPP;
512         }
513         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 = u;
516             v1 = v;
517             a1 = a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519
520             YUVA_IN(y, u, v, a, p + BPP, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += 2 * BPP;
530             lum += 2;
531         }
532         if (w) {
533             YUVA_IN(y, u, v, a, p, pal);
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
536             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
537             p++;
538             lum++;
539         }
540         p += wrap3 - dstw * BPP;
541         lum += wrap - dstw - dstx;
542         cb += dst->linesize[1] - width2 - skip2;
543         cr += dst->linesize[2] - width2 - skip2;
544     }
545     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
546         lum += dstx;
547         cb += skip2;
548         cr += skip2;
549
550         if (dstx & 1) {
551             YUVA_IN(y, u, v, a, p, pal);
552             u1 = u;
553             v1 = v;
554             a1 = a;
555             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556             p += wrap3;
557             lum += wrap;
558             YUVA_IN(y, u, v, a, p, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
564             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
565             cb++;
566             cr++;
567             p += -wrap3 + BPP;
568             lum += -wrap + 1;
569         }
570         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 = u;
573             v1 = v;
574             a1 = a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576
577             YUVA_IN(y, u, v, a, p + BPP, pal);
578             u1 += u;
579             v1 += v;
580             a1 += a;
581             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
582             p += wrap3;
583             lum += wrap;
584
585             YUVA_IN(y, u, v, a, p, pal);
586             u1 += u;
587             v1 += v;
588             a1 += a;
589             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
590
591             YUVA_IN(y, u, v, a, p + BPP, pal);
592             u1 += u;
593             v1 += v;
594             a1 += a;
595             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
596
597             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
598             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
599
600             cb++;
601             cr++;
602             p += -wrap3 + 2 * BPP;
603             lum += -wrap + 2;
604         }
605         if (w) {
606             YUVA_IN(y, u, v, a, p, pal);
607             u1 = u;
608             v1 = v;
609             a1 = a;
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611             p += wrap3;
612             lum += wrap;
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 += u;
615             v1 += v;
616             a1 += a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
619             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
620             cb++;
621             cr++;
622             p += -wrap3 + BPP;
623             lum += -wrap + 1;
624         }
625         p += wrap3 + (wrap3 - dstw * BPP);
626         lum += wrap + (wrap - dstw - dstx);
627         cb += dst->linesize[1] - width2 - skip2;
628         cr += dst->linesize[2] - width2 - skip2;
629     }
630     /* handle odd height */
631     if (h) {
632         lum += dstx;
633         cb += skip2;
634         cr += skip2;
635
636         if (dstx & 1) {
637             YUVA_IN(y, u, v, a, p, pal);
638             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
639             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
640             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
641             cb++;
642             cr++;
643             lum++;
644             p += BPP;
645         }
646         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
647             YUVA_IN(y, u, v, a, p, pal);
648             u1 = u;
649             v1 = v;
650             a1 = a;
651             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
652
653             YUVA_IN(y, u, v, a, p + BPP, pal);
654             u1 += u;
655             v1 += v;
656             a1 += a;
657             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
658             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
659             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
660             cb++;
661             cr++;
662             p += 2 * BPP;
663             lum += 2;
664         }
665         if (w) {
666             YUVA_IN(y, u, v, a, p, pal);
667             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
668             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
669             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
670         }
671     }
672 }
673
674 static void free_subpicture(SubPicture *sp)
675 {
676     int i;
677
678     for (i = 0; i < sp->sub.num_rects; i++)
679     {
680         av_freep(&sp->sub.rects[i]->pict.data[0]);
681         av_freep(&sp->sub.rects[i]->pict.data[1]);
682         av_freep(&sp->sub.rects[i]);
683     }
684
685     av_free(sp->sub.rects);
686
687     memset(&sp->sub, 0, sizeof(AVSubtitle));
688 }
689
690 static void video_image_display(VideoState *is)
691 {
692     VideoPicture *vp;
693     SubPicture *sp;
694     AVPicture pict;
695     float aspect_ratio;
696     int width, height, x, y;
697     SDL_Rect rect;
698     int i;
699
700     vp = &is->pictq[is->pictq_rindex];
701     if (vp->bmp) {
702 #if CONFIG_AVFILTER
703          if (vp->picref->pixel_aspect.num == 0)
704              aspect_ratio = 0;
705          else
706              aspect_ratio = av_q2d(vp->picref->pixel_aspect);
707 #else
708
709         /* XXX: use variable in the frame */
710         if (is->video_st->sample_aspect_ratio.num)
711             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
712         else if (is->video_st->codec->sample_aspect_ratio.num)
713             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
714         else
715             aspect_ratio = 0;
716 #endif
717         if (aspect_ratio <= 0.0)
718             aspect_ratio = 1.0;
719         aspect_ratio *= (float)vp->width / (float)vp->height;
720         /* if an active format is indicated, then it overrides the
721            mpeg format */
722 #if 0
723         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
724             is->dtg_active_format = is->video_st->codec->dtg_active_format;
725             printf("dtg_active_format=%d\n", is->dtg_active_format);
726         }
727 #endif
728 #if 0
729         switch(is->video_st->codec->dtg_active_format) {
730         case FF_DTG_AFD_SAME:
731         default:
732             /* nothing to do */
733             break;
734         case FF_DTG_AFD_4_3:
735             aspect_ratio = 4.0 / 3.0;
736             break;
737         case FF_DTG_AFD_16_9:
738             aspect_ratio = 16.0 / 9.0;
739             break;
740         case FF_DTG_AFD_14_9:
741             aspect_ratio = 14.0 / 9.0;
742             break;
743         case FF_DTG_AFD_4_3_SP_14_9:
744             aspect_ratio = 14.0 / 9.0;
745             break;
746         case FF_DTG_AFD_16_9_SP_14_9:
747             aspect_ratio = 14.0 / 9.0;
748             break;
749         case FF_DTG_AFD_SP_4_3:
750             aspect_ratio = 4.0 / 3.0;
751             break;
752         }
753 #endif
754
755         if (is->subtitle_st)
756         {
757             if (is->subpq_size > 0)
758             {
759                 sp = &is->subpq[is->subpq_rindex];
760
761                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
762                 {
763                     SDL_LockYUVOverlay (vp->bmp);
764
765                     pict.data[0] = vp->bmp->pixels[0];
766                     pict.data[1] = vp->bmp->pixels[2];
767                     pict.data[2] = vp->bmp->pixels[1];
768
769                     pict.linesize[0] = vp->bmp->pitches[0];
770                     pict.linesize[1] = vp->bmp->pitches[2];
771                     pict.linesize[2] = vp->bmp->pitches[1];
772
773                     for (i = 0; i < sp->sub.num_rects; i++)
774                         blend_subrect(&pict, sp->sub.rects[i],
775                                       vp->bmp->w, vp->bmp->h);
776
777                     SDL_UnlockYUVOverlay (vp->bmp);
778                 }
779             }
780         }
781
782
783         /* XXX: we suppose the screen has a 1.0 pixel ratio */
784         height = is->height;
785         width = ((int)rint(height * aspect_ratio)) & ~1;
786         if (width > is->width) {
787             width = is->width;
788             height = ((int)rint(width / aspect_ratio)) & ~1;
789         }
790         x = (is->width - width) / 2;
791         y = (is->height - height) / 2;
792         if (!is->no_background) {
793             /* fill the background */
794             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
795         } else {
796             is->no_background = 0;
797         }
798         rect.x = is->xleft + x;
799         rect.y = is->ytop  + y;
800         rect.w = width;
801         rect.h = height;
802         SDL_DisplayYUVOverlay(vp->bmp, &rect);
803     } else {
804 #if 0
805         fill_rectangle(screen,
806                        is->xleft, is->ytop, is->width, is->height,
807                        QERGB(0x00, 0x00, 0x00));
808 #endif
809     }
810 }
811
812 static inline int compute_mod(int a, int b)
813 {
814     a = a % b;
815     if (a >= 0)
816         return a;
817     else
818         return a + b;
819 }
820
821 static void video_audio_display(VideoState *s)
822 {
823     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
824     int ch, channels, h, h2, bgcolor, fgcolor;
825     int16_t time_diff;
826     int rdft_bits, nb_freq;
827
828     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
829         ;
830     nb_freq= 1<<(rdft_bits-1);
831
832     /* compute display index : center on currently output samples */
833     channels = s->audio_st->codec->channels;
834     nb_display_channels = channels;
835     if (!s->paused) {
836         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
837         n = 2 * channels;
838         delay = audio_write_get_buf_size(s);
839         delay /= n;
840
841         /* to be more precise, we take into account the time spent since
842            the last buffer computation */
843         if (audio_callback_time) {
844             time_diff = av_gettime() - audio_callback_time;
845             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
846         }
847
848         delay += 2*data_used;
849         if (delay < data_used)
850             delay = data_used;
851
852         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
853         if(s->show_audio==1){
854             h= INT_MIN;
855             for(i=0; i<1000; i+=channels){
856                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
857                 int a= s->sample_array[idx];
858                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
859                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
860                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
861                 int score= a-d;
862                 if(h<score && (b^c)<0){
863                     h= score;
864                     i_start= idx;
865                 }
866             }
867         }
868
869         s->last_i_start = i_start;
870     } else {
871         i_start = s->last_i_start;
872     }
873
874     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
875     if(s->show_audio==1){
876         fill_rectangle(screen,
877                        s->xleft, s->ytop, s->width, s->height,
878                        bgcolor);
879
880         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
881
882         /* total height for one channel */
883         h = s->height / nb_display_channels;
884         /* graph height / 2 */
885         h2 = (h * 9) / 20;
886         for(ch = 0;ch < nb_display_channels; ch++) {
887             i = i_start + ch;
888             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
889             for(x = 0; x < s->width; x++) {
890                 y = (s->sample_array[i] * h2) >> 15;
891                 if (y < 0) {
892                     y = -y;
893                     ys = y1 - y;
894                 } else {
895                     ys = y1;
896                 }
897                 fill_rectangle(screen,
898                                s->xleft + x, ys, 1, y,
899                                fgcolor);
900                 i += channels;
901                 if (i >= SAMPLE_ARRAY_SIZE)
902                     i -= SAMPLE_ARRAY_SIZE;
903             }
904         }
905
906         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
907
908         for(ch = 1;ch < nb_display_channels; ch++) {
909             y = s->ytop + ch * h;
910             fill_rectangle(screen,
911                            s->xleft, y, s->width, 1,
912                            fgcolor);
913         }
914         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
915     }else{
916         nb_display_channels= FFMIN(nb_display_channels, 2);
917         if(rdft_bits != s->rdft_bits){
918             av_rdft_end(s->rdft);
919             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
920             s->rdft_bits= rdft_bits;
921         }
922         {
923             FFTSample data[2][2*nb_freq];
924             for(ch = 0;ch < nb_display_channels; ch++) {
925                 i = i_start + ch;
926                 for(x = 0; x < 2*nb_freq; x++) {
927                     double w= (x-nb_freq)*(1.0/nb_freq);
928                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
929                     i += channels;
930                     if (i >= SAMPLE_ARRAY_SIZE)
931                         i -= SAMPLE_ARRAY_SIZE;
932                 }
933                 av_rdft_calc(s->rdft, data[ch]);
934             }
935             //least efficient way to do this, we should of course directly access it but its more than fast enough
936             for(y=0; y<s->height; y++){
937                 double w= 1/sqrt(nb_freq);
938                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
939                 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
940                 a= FFMIN(a,255);
941                 b= FFMIN(b,255);
942                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
943
944                 fill_rectangle(screen,
945                             s->xpos, s->height-y, 1, 1,
946                             fgcolor);
947             }
948         }
949         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
950         s->xpos++;
951         if(s->xpos >= s->width)
952             s->xpos= s->xleft;
953     }
954 }
955
956 static int video_open(VideoState *is){
957     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
958     int w,h;
959
960     if(is_full_screen) flags |= SDL_FULLSCREEN;
961     else               flags |= SDL_RESIZABLE;
962
963     if (is_full_screen && fs_screen_width) {
964         w = fs_screen_width;
965         h = fs_screen_height;
966     } else if(!is_full_screen && screen_width){
967         w = screen_width;
968         h = screen_height;
969 #if CONFIG_AVFILTER
970     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
971         w = is->out_video_filter->inputs[0]->w;
972         h = is->out_video_filter->inputs[0]->h;
973 #else
974     }else if (is->video_st && is->video_st->codec->width){
975         w = is->video_st->codec->width;
976         h = is->video_st->codec->height;
977 #endif
978     } else {
979         w = 640;
980         h = 480;
981     }
982     if(screen && is->width == screen->w && screen->w == w
983        && is->height== screen->h && screen->h == h)
984         return 0;
985
986 #ifndef __APPLE__
987     screen = SDL_SetVideoMode(w, h, 0, flags);
988 #else
989     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
990     screen = SDL_SetVideoMode(w, h, 24, flags);
991 #endif
992     if (!screen) {
993         fprintf(stderr, "SDL: could not set video mode - exiting\n");
994         return -1;
995     }
996     SDL_WM_SetCaption("FFplay", "FFplay");
997
998     is->width = screen->w;
999     is->height = screen->h;
1000
1001     return 0;
1002 }
1003
1004 /* display the current picture, if any */
1005 static void video_display(VideoState *is)
1006 {
1007     if(!screen)
1008         video_open(cur_stream);
1009     if (is->audio_st && is->show_audio)
1010         video_audio_display(is);
1011     else if (is->video_st)
1012         video_image_display(is);
1013 }
1014
1015 static int refresh_thread(void *opaque)
1016 {
1017     VideoState *is= opaque;
1018     while(!is->abort_request){
1019     SDL_Event event;
1020     event.type = FF_REFRESH_EVENT;
1021     event.user.data1 = opaque;
1022         if(!is->refresh){
1023             is->refresh=1;
1024     SDL_PushEvent(&event);
1025         }
1026         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027     }
1028     return 0;
1029 }
1030
1031 /* get the current audio clock value */
1032 static double get_audio_clock(VideoState *is)
1033 {
1034     double pts;
1035     int hw_buf_size, bytes_per_sec;
1036     pts = is->audio_clock;
1037     hw_buf_size = audio_write_get_buf_size(is);
1038     bytes_per_sec = 0;
1039     if (is->audio_st) {
1040         bytes_per_sec = is->audio_st->codec->sample_rate *
1041             2 * is->audio_st->codec->channels;
1042     }
1043     if (bytes_per_sec)
1044         pts -= (double)hw_buf_size / bytes_per_sec;
1045     return pts;
1046 }
1047
1048 /* get the current video clock value */
1049 static double get_video_clock(VideoState *is)
1050 {
1051     if (is->paused) {
1052         return is->video_current_pts;
1053     } else {
1054         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055     }
1056 }
1057
1058 /* get the current external clock value */
1059 static double get_external_clock(VideoState *is)
1060 {
1061     int64_t ti;
1062     ti = av_gettime();
1063     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064 }
1065
1066 /* get the current master clock value */
1067 static double get_master_clock(VideoState *is)
1068 {
1069     double val;
1070
1071     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072         if (is->video_st)
1073             val = get_video_clock(is);
1074         else
1075             val = get_audio_clock(is);
1076     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077         if (is->audio_st)
1078             val = get_audio_clock(is);
1079         else
1080             val = get_video_clock(is);
1081     } else {
1082         val = get_external_clock(is);
1083     }
1084     return val;
1085 }
1086
1087 /* seek in the stream */
1088 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089 {
1090     if (!is->seek_req) {
1091         is->seek_pos = pos;
1092         is->seek_rel = rel;
1093         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094         if (seek_by_bytes)
1095             is->seek_flags |= AVSEEK_FLAG_BYTE;
1096         is->seek_req = 1;
1097     }
1098 }
1099
1100 /* pause or resume the video */
1101 static void stream_pause(VideoState *is)
1102 {
1103     if (is->paused) {
1104         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105         if(is->read_pause_return != AVERROR(ENOSYS)){
1106             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107         }
1108         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109     }
1110     is->paused = !is->paused;
1111 }
1112
1113 static double compute_target_time(double frame_current_pts, VideoState *is)
1114 {
1115     double delay, sync_threshold, diff;
1116
1117     /* compute nominal delay */
1118     delay = frame_current_pts - is->frame_last_pts;
1119     if (delay <= 0 || delay >= 10.0) {
1120         /* if incorrect delay, use previous one */
1121         delay = is->frame_last_delay;
1122     } else {
1123         is->frame_last_delay = delay;
1124     }
1125     is->frame_last_pts = frame_current_pts;
1126
1127     /* update delay to follow master synchronisation source */
1128     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130         /* if video is slave, we try to correct big delays by
1131            duplicating or deleting a frame */
1132         diff = get_video_clock(is) - get_master_clock(is);
1133
1134         /* skip or repeat frame. We take into account the
1135            delay to compute the threshold. I still don't know
1136            if it is the best guess */
1137         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139             if (diff <= -sync_threshold)
1140                 delay = 0;
1141             else if (diff >= sync_threshold)
1142                 delay = 2 * delay;
1143         }
1144     }
1145     is->frame_timer += delay;
1146 #if defined(DEBUG_SYNC)
1147     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148             delay, actual_delay, frame_current_pts, -diff);
1149 #endif
1150
1151     return is->frame_timer;
1152 }
1153
1154 /* called to display each frame */
1155 static void video_refresh_timer(void *opaque)
1156 {
1157     VideoState *is = opaque;
1158     VideoPicture *vp;
1159
1160     SubPicture *sp, *sp2;
1161
1162     if (is->video_st) {
1163 retry:
1164         if (is->pictq_size == 0) {
1165             //nothing to do, no picture to display in the que
1166         } else {
1167             double time= av_gettime()/1000000.0;
1168             double next_target;
1169             /* dequeue the picture */
1170             vp = &is->pictq[is->pictq_rindex];
1171
1172             if(time < vp->target_clock)
1173                 return;
1174             /* update current video pts */
1175             is->video_current_pts = vp->pts;
1176             is->video_current_pts_drift = is->video_current_pts - time;
1177             is->video_current_pos = vp->pos;
1178             if(is->pictq_size > 1){
1179                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180                 assert(nextvp->target_clock >= vp->target_clock);
1181                 next_target= nextvp->target_clock;
1182             }else{
1183                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184             }
1185             if(framedrop && time > next_target){
1186                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187                 if(is->pictq_size > 1 || time > next_target + 0.5){
1188                     /* update queue size and signal for next picture */
1189                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190                         is->pictq_rindex = 0;
1191
1192                     SDL_LockMutex(is->pictq_mutex);
1193                     is->pictq_size--;
1194                     SDL_CondSignal(is->pictq_cond);
1195                     SDL_UnlockMutex(is->pictq_mutex);
1196                     goto retry;
1197                 }
1198             }
1199
1200             if(is->subtitle_st) {
1201                 if (is->subtitle_stream_changed) {
1202                     SDL_LockMutex(is->subpq_mutex);
1203
1204                     while (is->subpq_size) {
1205                         free_subpicture(&is->subpq[is->subpq_rindex]);
1206
1207                         /* update queue size and signal for next picture */
1208                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209                             is->subpq_rindex = 0;
1210
1211                         is->subpq_size--;
1212                     }
1213                     is->subtitle_stream_changed = 0;
1214
1215                     SDL_CondSignal(is->subpq_cond);
1216                     SDL_UnlockMutex(is->subpq_mutex);
1217                 } else {
1218                     if (is->subpq_size > 0) {
1219                         sp = &is->subpq[is->subpq_rindex];
1220
1221                         if (is->subpq_size > 1)
1222                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223                         else
1224                             sp2 = NULL;
1225
1226                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228                         {
1229                             free_subpicture(sp);
1230
1231                             /* update queue size and signal for next picture */
1232                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233                                 is->subpq_rindex = 0;
1234
1235                             SDL_LockMutex(is->subpq_mutex);
1236                             is->subpq_size--;
1237                             SDL_CondSignal(is->subpq_cond);
1238                             SDL_UnlockMutex(is->subpq_mutex);
1239                         }
1240                     }
1241                 }
1242             }
1243
1244             /* display picture */
1245             video_display(is);
1246
1247             /* update queue size and signal for next picture */
1248             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249                 is->pictq_rindex = 0;
1250
1251             SDL_LockMutex(is->pictq_mutex);
1252             is->pictq_size--;
1253             SDL_CondSignal(is->pictq_cond);
1254             SDL_UnlockMutex(is->pictq_mutex);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         video_display(is);
1264     }
1265     if (show_status) {
1266         static int64_t last_time;
1267         int64_t cur_time;
1268         int aqsize, vqsize, sqsize;
1269         double av_diff;
1270
1271         cur_time = av_gettime();
1272         if (!last_time || (cur_time - last_time) >= 30000) {
1273             aqsize = 0;
1274             vqsize = 0;
1275             sqsize = 0;
1276             if (is->audio_st)
1277                 aqsize = is->audioq.size;
1278             if (is->video_st)
1279                 vqsize = is->videoq.size;
1280             if (is->subtitle_st)
1281                 sqsize = is->subtitleq.size;
1282             av_diff = 0;
1283             if (is->audio_st && is->video_st)
1284                 av_diff = get_audio_clock(is) - get_video_clock(is);
1285             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1286                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1287             fflush(stdout);
1288             last_time = cur_time;
1289         }
1290     }
1291 }
1292
1293 /* allocate a picture (needs to do that in main thread to avoid
1294    potential locking problems */
1295 static void alloc_picture(void *opaque)
1296 {
1297     VideoState *is = opaque;
1298     VideoPicture *vp;
1299
1300     vp = &is->pictq[is->pictq_windex];
1301
1302     if (vp->bmp)
1303         SDL_FreeYUVOverlay(vp->bmp);
1304
1305 #if CONFIG_AVFILTER
1306     if (vp->picref)
1307         avfilter_unref_pic(vp->picref);
1308     vp->picref = NULL;
1309
1310     vp->width   = is->out_video_filter->inputs[0]->w;
1311     vp->height  = is->out_video_filter->inputs[0]->h;
1312     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1313 #else
1314     vp->width   = is->video_st->codec->width;
1315     vp->height  = is->video_st->codec->height;
1316     vp->pix_fmt = is->video_st->codec->pix_fmt;
1317 #endif
1318
1319     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1320                                    SDL_YV12_OVERLAY,
1321                                    screen);
1322
1323     SDL_LockMutex(is->pictq_mutex);
1324     vp->allocated = 1;
1325     SDL_CondSignal(is->pictq_cond);
1326     SDL_UnlockMutex(is->pictq_mutex);
1327 }
1328
1329 /**
1330  *
1331  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1332  */
1333 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1334 {
1335     VideoPicture *vp;
1336     int dst_pix_fmt;
1337 #if CONFIG_AVFILTER
1338     AVPicture pict_src;
1339 #endif
1340     /* wait until we have space to put a new picture */
1341     SDL_LockMutex(is->pictq_mutex);
1342
1343     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1344         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1345
1346     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1347            !is->videoq.abort_request) {
1348         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1349     }
1350     SDL_UnlockMutex(is->pictq_mutex);
1351
1352     if (is->videoq.abort_request)
1353         return -1;
1354
1355     vp = &is->pictq[is->pictq_windex];
1356
1357     /* alloc or resize hardware picture buffer */
1358     if (!vp->bmp ||
1359 #if CONFIG_AVFILTER
1360         vp->width  != is->out_video_filter->inputs[0]->w ||
1361         vp->height != is->out_video_filter->inputs[0]->h) {
1362 #else
1363         vp->width != is->video_st->codec->width ||
1364         vp->height != is->video_st->codec->height) {
1365 #endif
1366         SDL_Event event;
1367
1368         vp->allocated = 0;
1369
1370         /* the allocation must be done in the main thread to avoid
1371            locking problems */
1372         event.type = FF_ALLOC_EVENT;
1373         event.user.data1 = is;
1374         SDL_PushEvent(&event);
1375
1376         /* wait until the picture is allocated */
1377         SDL_LockMutex(is->pictq_mutex);
1378         while (!vp->allocated && !is->videoq.abort_request) {
1379             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1380         }
1381         SDL_UnlockMutex(is->pictq_mutex);
1382
1383         if (is->videoq.abort_request)
1384             return -1;
1385     }
1386
1387     /* if the frame is not skipped, then display it */
1388     if (vp->bmp) {
1389         AVPicture pict;
1390 #if CONFIG_AVFILTER
1391         if(vp->picref)
1392             avfilter_unref_pic(vp->picref);
1393         vp->picref = src_frame->opaque;
1394 #endif
1395
1396         /* get a pointer on the bitmap */
1397         SDL_LockYUVOverlay (vp->bmp);
1398
1399         dst_pix_fmt = PIX_FMT_YUV420P;
1400         memset(&pict,0,sizeof(AVPicture));
1401         pict.data[0] = vp->bmp->pixels[0];
1402         pict.data[1] = vp->bmp->pixels[2];
1403         pict.data[2] = vp->bmp->pixels[1];
1404
1405         pict.linesize[0] = vp->bmp->pitches[0];
1406         pict.linesize[1] = vp->bmp->pitches[2];
1407         pict.linesize[2] = vp->bmp->pitches[1];
1408
1409 #if CONFIG_AVFILTER
1410         pict_src.data[0] = src_frame->data[0];
1411         pict_src.data[1] = src_frame->data[1];
1412         pict_src.data[2] = src_frame->data[2];
1413
1414         pict_src.linesize[0] = src_frame->linesize[0];
1415         pict_src.linesize[1] = src_frame->linesize[1];
1416         pict_src.linesize[2] = src_frame->linesize[2];
1417
1418         //FIXME use direct rendering
1419         av_picture_copy(&pict, &pict_src,
1420                         vp->pix_fmt, vp->width, vp->height);
1421 #else
1422         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1423         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1424             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1425             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1426         if (is->img_convert_ctx == NULL) {
1427             fprintf(stderr, "Cannot initialize the conversion context\n");
1428             exit(1);
1429         }
1430         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1431                   0, vp->height, pict.data, pict.linesize);
1432 #endif
1433         /* update the bitmap content */
1434         SDL_UnlockYUVOverlay(vp->bmp);
1435
1436         vp->pts = pts;
1437         vp->pos = pos;
1438
1439         /* now we can update the picture count */
1440         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1441             is->pictq_windex = 0;
1442         SDL_LockMutex(is->pictq_mutex);
1443         vp->target_clock= compute_target_time(vp->pts, is);
1444
1445         is->pictq_size++;
1446         SDL_UnlockMutex(is->pictq_mutex);
1447     }
1448     return 0;
1449 }
1450
1451 /**
1452  * compute the exact PTS for the picture if it is omitted in the stream
1453  * @param pts1 the dts of the pkt / pts of the frame
1454  */
1455 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1456 {
1457     double frame_delay, pts;
1458
1459     pts = pts1;
1460
1461     if (pts != 0) {
1462         /* update video clock with pts, if present */
1463         is->video_clock = pts;
1464     } else {
1465         pts = is->video_clock;
1466     }
1467     /* update video clock for next frame */
1468     frame_delay = av_q2d(is->video_st->codec->time_base);
1469     /* for MPEG2, the frame can be repeated, so we update the
1470        clock accordingly */
1471     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1472     is->video_clock += frame_delay;
1473
1474 #if defined(DEBUG_SYNC) && 0
1475     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1476            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1477 #endif
1478     return queue_picture(is, src_frame, pts, pos);
1479 }
1480
1481 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1482 {
1483     int len1, got_picture, i;
1484
1485         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1486             return -1;
1487
1488         if(pkt->data == flush_pkt.data){
1489             avcodec_flush_buffers(is->video_st->codec);
1490
1491             SDL_LockMutex(is->pictq_mutex);
1492             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1493             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1494                 is->pictq[i].target_clock= 0;
1495             }
1496             while (is->pictq_size && !is->videoq.abort_request) {
1497                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1498             }
1499             is->video_current_pos= -1;
1500             SDL_UnlockMutex(is->pictq_mutex);
1501
1502             is->last_dts_for_fault_detection=
1503             is->last_pts_for_fault_detection= INT64_MIN;
1504             is->frame_last_pts= AV_NOPTS_VALUE;
1505             is->frame_last_delay = 0;
1506             is->frame_timer = (double)av_gettime() / 1000000.0;
1507             is->skip_frames= 1;
1508             is->skip_frames_index= 0;
1509             return 0;
1510         }
1511
1512         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1513            this packet, if any */
1514         is->video_st->codec->reordered_opaque= pkt->pts;
1515         len1 = avcodec_decode_video2(is->video_st->codec,
1516                                     frame, &got_picture,
1517                                     pkt);
1518
1519         if (got_picture) {
1520             if(pkt->dts != AV_NOPTS_VALUE){
1521                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1522                 is->last_dts_for_fault_detection= pkt->dts;
1523             }
1524             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1525                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1526                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1527             }
1528         }
1529
1530         if(   (   decoder_reorder_pts==1
1531                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1532                || pkt->dts == AV_NOPTS_VALUE)
1533            && frame->reordered_opaque != AV_NOPTS_VALUE)
1534             *pts= frame->reordered_opaque;
1535         else if(pkt->dts != AV_NOPTS_VALUE)
1536             *pts= pkt->dts;
1537         else
1538             *pts= 0;
1539
1540 //            if (len1 < 0)
1541 //                break;
1542     if (got_picture){
1543         is->skip_frames_index += 1;
1544         if(is->skip_frames_index >= is->skip_frames){
1545             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1546             return 1;
1547         }
1548
1549     }
1550     return 0;
1551 }
1552
1553 #if CONFIG_AVFILTER
1554 typedef struct {
1555     VideoState *is;
1556     AVFrame *frame;
1557 } FilterPriv;
1558
1559 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1560 {
1561     FilterPriv *priv = ctx->priv;
1562     if(!opaque) return -1;
1563
1564     priv->is = opaque;
1565     priv->frame = avcodec_alloc_frame();
1566
1567     return 0;
1568 }
1569
1570 static void input_uninit(AVFilterContext *ctx)
1571 {
1572     FilterPriv *priv = ctx->priv;
1573     av_free(priv->frame);
1574 }
1575
1576 static int input_request_frame(AVFilterLink *link)
1577 {
1578     FilterPriv *priv = link->src->priv;
1579     AVFilterPicRef *picref;
1580     int64_t pts = 0;
1581     AVPacket pkt;
1582     int ret;
1583
1584     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1585         av_free_packet(&pkt);
1586     if (ret < 0)
1587         return -1;
1588
1589     /* FIXME: until I figure out how to hook everything up to the codec
1590      * right, we're just copying the entire frame. */
1591     picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1592     av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1593                     picref->pic->format, link->w, link->h);
1594     av_free_packet(&pkt);
1595
1596     picref->pts = pts;
1597     picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1598     avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1599     avfilter_draw_slice(link, 0, link->h, 1);
1600     avfilter_end_frame(link);
1601     avfilter_unref_pic(picref);
1602
1603     return 0;
1604 }
1605
1606 static int input_query_formats(AVFilterContext *ctx)
1607 {
1608     FilterPriv *priv = ctx->priv;
1609     enum PixelFormat pix_fmts[] = {
1610         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1611     };
1612
1613     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1614     return 0;
1615 }
1616
1617 static int input_config_props(AVFilterLink *link)
1618 {
1619     FilterPriv *priv  = link->src->priv;
1620     AVCodecContext *c = priv->is->video_st->codec;
1621
1622     link->w = c->width;
1623     link->h = c->height;
1624
1625     return 0;
1626 }
1627
1628 static AVFilter input_filter =
1629 {
1630     .name      = "ffplay_input",
1631
1632     .priv_size = sizeof(FilterPriv),
1633
1634     .init      = input_init,
1635     .uninit    = input_uninit,
1636
1637     .query_formats = input_query_formats,
1638
1639     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1640     .outputs   = (AVFilterPad[]) {{ .name = "default",
1641                                     .type = CODEC_TYPE_VIDEO,
1642                                     .request_frame = input_request_frame,
1643                                     .config_props  = input_config_props, },
1644                                   { .name = NULL }},
1645 };
1646
1647 static void output_end_frame(AVFilterLink *link)
1648 {
1649 }
1650
1651 static int output_query_formats(AVFilterContext *ctx)
1652 {
1653     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1654
1655     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1656     return 0;
1657 }
1658
1659 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1660                                     int64_t *pts)
1661 {
1662     AVFilterPicRef *pic;
1663
1664     if(avfilter_request_frame(ctx->inputs[0]))
1665         return -1;
1666     if(!(pic = ctx->inputs[0]->cur_pic))
1667         return -1;
1668     ctx->inputs[0]->cur_pic = NULL;
1669
1670     frame->opaque = pic;
1671     *pts          = pic->pts;
1672
1673     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1674     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1675
1676     return 1;
1677 }
1678
1679 static AVFilter output_filter =
1680 {
1681     .name      = "ffplay_output",
1682
1683     .query_formats = output_query_formats,
1684
1685     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1686                                     .type          = CODEC_TYPE_VIDEO,
1687                                     .end_frame     = output_end_frame,
1688                                     .min_perms     = AV_PERM_READ, },
1689                                   { .name = NULL }},
1690     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1691 };
1692 #endif  /* CONFIG_AVFILTER */
1693
1694 static int video_thread(void *arg)
1695 {
1696     VideoState *is = arg;
1697     AVFrame *frame= avcodec_alloc_frame();
1698     int64_t pts_int;
1699     double pts;
1700     int ret;
1701
1702 #if CONFIG_AVFILTER
1703     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1704     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1705     graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1706
1707     if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1708     if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1709
1710     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1711     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1712
1713
1714     if(vfilters) {
1715         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1716         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1717
1718         outputs->name    = av_strdup("in");
1719         outputs->filter  = filt_src;
1720         outputs->pad_idx = 0;
1721         outputs->next    = NULL;
1722
1723         inputs->name    = av_strdup("out");
1724         inputs->filter  = filt_out;
1725         inputs->pad_idx = 0;
1726         inputs->next    = NULL;
1727
1728         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1729             goto the_end;
1730         av_freep(&vfilters);
1731     } else {
1732         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1733     }
1734     avfilter_graph_add_filter(graph, filt_src);
1735     avfilter_graph_add_filter(graph, filt_out);
1736
1737     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1738     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1739     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1740
1741     is->out_video_filter = filt_out;
1742 #endif
1743
1744     for(;;) {
1745 #if !CONFIG_AVFILTER
1746         AVPacket pkt;
1747 #endif
1748         while (is->paused && !is->videoq.abort_request)
1749             SDL_Delay(10);
1750 #if CONFIG_AVFILTER
1751         ret = get_filtered_video_frame(filt_out, frame, &pts_int);
1752 #else
1753         ret = get_video_frame(is, frame, &pts_int, &pkt);
1754 #endif
1755
1756         if (ret < 0) goto the_end;
1757
1758         if (!ret)
1759             continue;
1760
1761         pts = pts_int*av_q2d(is->video_st->time_base);
1762
1763 #if CONFIG_AVFILTER
1764         ret = output_picture2(is, frame, pts,  -1); /* fixme: unknown pos */
1765 #else
1766         ret = output_picture2(is, frame, pts,  pkt.pos);
1767         av_free_packet(&pkt);
1768 #endif
1769         if (ret < 0)
1770             goto the_end;
1771
1772         if (step)
1773             if (cur_stream)
1774                 stream_pause(cur_stream);
1775     }
1776  the_end:
1777 #if CONFIG_AVFILTER
1778     avfilter_graph_destroy(graph);
1779     av_freep(&graph);
1780 #endif
1781     av_free(frame);
1782     return 0;
1783 }
1784
1785 static int subtitle_thread(void *arg)
1786 {
1787     VideoState *is = arg;
1788     SubPicture *sp;
1789     AVPacket pkt1, *pkt = &pkt1;
1790     int len1, got_subtitle;
1791     double pts;
1792     int i, j;
1793     int r, g, b, y, u, v, a;
1794
1795     for(;;) {
1796         while (is->paused && !is->subtitleq.abort_request) {
1797             SDL_Delay(10);
1798         }
1799         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1800             break;
1801
1802         if(pkt->data == flush_pkt.data){
1803             avcodec_flush_buffers(is->subtitle_st->codec);
1804             continue;
1805         }
1806         SDL_LockMutex(is->subpq_mutex);
1807         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1808                !is->subtitleq.abort_request) {
1809             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1810         }
1811         SDL_UnlockMutex(is->subpq_mutex);
1812
1813         if (is->subtitleq.abort_request)
1814             goto the_end;
1815
1816         sp = &is->subpq[is->subpq_windex];
1817
1818        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1819            this packet, if any */
1820         pts = 0;
1821         if (pkt->pts != AV_NOPTS_VALUE)
1822             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1823
1824         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1825                                     &sp->sub, &got_subtitle,
1826                                     pkt);
1827 //            if (len1 < 0)
1828 //                break;
1829         if (got_subtitle && sp->sub.format == 0) {
1830             sp->pts = pts;
1831
1832             for (i = 0; i < sp->sub.num_rects; i++)
1833             {
1834                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1835                 {
1836                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1837                     y = RGB_TO_Y_CCIR(r, g, b);
1838                     u = RGB_TO_U_CCIR(r, g, b, 0);
1839                     v = RGB_TO_V_CCIR(r, g, b, 0);
1840                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1841                 }
1842             }
1843
1844             /* now we can update the picture count */
1845             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1846                 is->subpq_windex = 0;
1847             SDL_LockMutex(is->subpq_mutex);
1848             is->subpq_size++;
1849             SDL_UnlockMutex(is->subpq_mutex);
1850         }
1851         av_free_packet(pkt);
1852 //        if (step)
1853 //            if (cur_stream)
1854 //                stream_pause(cur_stream);
1855     }
1856  the_end:
1857     return 0;
1858 }
1859
1860 /* copy samples for viewing in editor window */
1861 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1862 {
1863     int size, len, channels;
1864
1865     channels = is->audio_st->codec->channels;
1866
1867     size = samples_size / sizeof(short);
1868     while (size > 0) {
1869         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1870         if (len > size)
1871             len = size;
1872         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1873         samples += len;
1874         is->sample_array_index += len;
1875         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1876             is->sample_array_index = 0;
1877         size -= len;
1878     }
1879 }
1880
1881 /* return the new audio buffer size (samples can be added or deleted
1882    to get better sync if video or external master clock) */
1883 static int synchronize_audio(VideoState *is, short *samples,
1884                              int samples_size1, double pts)
1885 {
1886     int n, samples_size;
1887     double ref_clock;
1888
1889     n = 2 * is->audio_st->codec->channels;
1890     samples_size = samples_size1;
1891
1892     /* if not master, then we try to remove or add samples to correct the clock */
1893     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1894          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1895         double diff, avg_diff;
1896         int wanted_size, min_size, max_size, nb_samples;
1897
1898         ref_clock = get_master_clock(is);
1899         diff = get_audio_clock(is) - ref_clock;
1900
1901         if (diff < AV_NOSYNC_THRESHOLD) {
1902             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1903             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1904                 /* not enough measures to have a correct estimate */
1905                 is->audio_diff_avg_count++;
1906             } else {
1907                 /* estimate the A-V difference */
1908                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1909
1910                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1911                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1912                     nb_samples = samples_size / n;
1913
1914                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1915                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1916                     if (wanted_size < min_size)
1917                         wanted_size = min_size;
1918                     else if (wanted_size > max_size)
1919                         wanted_size = max_size;
1920
1921                     /* add or remove samples to correction the synchro */
1922                     if (wanted_size < samples_size) {
1923                         /* remove samples */
1924                         samples_size = wanted_size;
1925                     } else if (wanted_size > samples_size) {
1926                         uint8_t *samples_end, *q;
1927                         int nb;
1928
1929                         /* add samples */
1930                         nb = (samples_size - wanted_size);
1931                         samples_end = (uint8_t *)samples + samples_size - n;
1932                         q = samples_end + n;
1933                         while (nb > 0) {
1934                             memcpy(q, samples_end, n);
1935                             q += n;
1936                             nb -= n;
1937                         }
1938                         samples_size = wanted_size;
1939                     }
1940                 }
1941 #if 0
1942                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1943                        diff, avg_diff, samples_size - samples_size1,
1944                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1945 #endif
1946             }
1947         } else {
1948             /* too big difference : may be initial PTS errors, so
1949                reset A-V filter */
1950             is->audio_diff_avg_count = 0;
1951             is->audio_diff_cum = 0;
1952         }
1953     }
1954
1955     return samples_size;
1956 }
1957
1958 /* decode one audio frame and returns its uncompressed size */
1959 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1960 {
1961     AVPacket *pkt_temp = &is->audio_pkt_temp;
1962     AVPacket *pkt = &is->audio_pkt;
1963     AVCodecContext *dec= is->audio_st->codec;
1964     int n, len1, data_size;
1965     double pts;
1966
1967     for(;;) {
1968         /* NOTE: the audio packet can contain several frames */
1969         while (pkt_temp->size > 0) {
1970             data_size = sizeof(is->audio_buf1);
1971             len1 = avcodec_decode_audio3(dec,
1972                                         (int16_t *)is->audio_buf1, &data_size,
1973                                         pkt_temp);
1974             if (len1 < 0) {
1975                 /* if error, we skip the frame */
1976                 pkt_temp->size = 0;
1977                 break;
1978             }
1979
1980             pkt_temp->data += len1;
1981             pkt_temp->size -= len1;
1982             if (data_size <= 0)
1983                 continue;
1984
1985             if (dec->sample_fmt != is->audio_src_fmt) {
1986                 if (is->reformat_ctx)
1987                     av_audio_convert_free(is->reformat_ctx);
1988                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1989                                                          dec->sample_fmt, 1, NULL, 0);
1990                 if (!is->reformat_ctx) {
1991                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1992                         avcodec_get_sample_fmt_name(dec->sample_fmt),
1993                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1994                         break;
1995                 }
1996                 is->audio_src_fmt= dec->sample_fmt;
1997             }
1998
1999             if (is->reformat_ctx) {
2000                 const void *ibuf[6]= {is->audio_buf1};
2001                 void *obuf[6]= {is->audio_buf2};
2002                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2003                 int ostride[6]= {2};
2004                 int len= data_size/istride[0];
2005                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2006                     printf("av_audio_convert() failed\n");
2007                     break;
2008                 }
2009                 is->audio_buf= is->audio_buf2;
2010                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2011                           remove this legacy cruft */
2012                 data_size= len*2;
2013             }else{
2014                 is->audio_buf= is->audio_buf1;
2015             }
2016
2017             /* if no pts, then compute it */
2018             pts = is->audio_clock;
2019             *pts_ptr = pts;
2020             n = 2 * dec->channels;
2021             is->audio_clock += (double)data_size /
2022                 (double)(n * dec->sample_rate);
2023 #if defined(DEBUG_SYNC)
2024             {
2025                 static double last_clock;
2026                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2027                        is->audio_clock - last_clock,
2028                        is->audio_clock, pts);
2029                 last_clock = is->audio_clock;
2030             }
2031 #endif
2032             return data_size;
2033         }
2034
2035         /* free the current packet */
2036         if (pkt->data)
2037             av_free_packet(pkt);
2038
2039         if (is->paused || is->audioq.abort_request) {
2040             return -1;
2041         }
2042
2043         /* read next packet */
2044         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2045             return -1;
2046         if(pkt->data == flush_pkt.data){
2047             avcodec_flush_buffers(dec);
2048             continue;
2049         }
2050
2051         pkt_temp->data = pkt->data;
2052         pkt_temp->size = pkt->size;
2053
2054         /* if update the audio clock with the pts */
2055         if (pkt->pts != AV_NOPTS_VALUE) {
2056             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2057         }
2058     }
2059 }
2060
2061 /* get the current audio output buffer size, in samples. With SDL, we
2062    cannot have a precise information */
2063 static int audio_write_get_buf_size(VideoState *is)
2064 {
2065     return is->audio_buf_size - is->audio_buf_index;
2066 }
2067
2068
2069 /* prepare a new audio buffer */
2070 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2071 {
2072     VideoState *is = opaque;
2073     int audio_size, len1;
2074     double pts;
2075
2076     audio_callback_time = av_gettime();
2077
2078     while (len > 0) {
2079         if (is->audio_buf_index >= is->audio_buf_size) {
2080            audio_size = audio_decode_frame(is, &pts);
2081            if (audio_size < 0) {
2082                 /* if error, just output silence */
2083                is->audio_buf = is->audio_buf1;
2084                is->audio_buf_size = 1024;
2085                memset(is->audio_buf, 0, is->audio_buf_size);
2086            } else {
2087                if (is->show_audio)
2088                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2089                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2090                                               pts);
2091                is->audio_buf_size = audio_size;
2092            }
2093            is->audio_buf_index = 0;
2094         }
2095         len1 = is->audio_buf_size - is->audio_buf_index;
2096         if (len1 > len)
2097             len1 = len;
2098         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2099         len -= len1;
2100         stream += len1;
2101         is->audio_buf_index += len1;
2102     }
2103 }
2104
2105 /* open a given stream. Return 0 if OK */
2106 static int stream_component_open(VideoState *is, int stream_index)
2107 {
2108     AVFormatContext *ic = is->ic;
2109     AVCodecContext *avctx;
2110     AVCodec *codec;
2111     SDL_AudioSpec wanted_spec, spec;
2112
2113     if (stream_index < 0 || stream_index >= ic->nb_streams)
2114         return -1;
2115     avctx = ic->streams[stream_index]->codec;
2116
2117     /* prepare audio output */
2118     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2119         if (avctx->channels > 0) {
2120             avctx->request_channels = FFMIN(2, avctx->channels);
2121         } else {
2122             avctx->request_channels = 2;
2123         }
2124     }
2125
2126     codec = avcodec_find_decoder(avctx->codec_id);
2127     avctx->debug_mv = debug_mv;
2128     avctx->debug = debug;
2129     avctx->workaround_bugs = workaround_bugs;
2130     avctx->lowres = lowres;
2131     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2132     avctx->idct_algo= idct;
2133     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2134     avctx->skip_frame= skip_frame;
2135     avctx->skip_idct= skip_idct;
2136     avctx->skip_loop_filter= skip_loop_filter;
2137     avctx->error_recognition= error_recognition;
2138     avctx->error_concealment= error_concealment;
2139     avcodec_thread_init(avctx, thread_count);
2140
2141     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2142
2143     if (!codec ||
2144         avcodec_open(avctx, codec) < 0)
2145         return -1;
2146
2147     /* prepare audio output */
2148     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2149         wanted_spec.freq = avctx->sample_rate;
2150         wanted_spec.format = AUDIO_S16SYS;
2151         wanted_spec.channels = avctx->channels;
2152         wanted_spec.silence = 0;
2153         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2154         wanted_spec.callback = sdl_audio_callback;
2155         wanted_spec.userdata = is;
2156         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2157             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2158             return -1;
2159         }
2160         is->audio_hw_buf_size = spec.size;
2161         is->audio_src_fmt= SAMPLE_FMT_S16;
2162     }
2163
2164     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2165     switch(avctx->codec_type) {
2166     case CODEC_TYPE_AUDIO:
2167         is->audio_stream = stream_index;
2168         is->audio_st = ic->streams[stream_index];
2169         is->audio_buf_size = 0;
2170         is->audio_buf_index = 0;
2171
2172         /* init averaging filter */
2173         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2174         is->audio_diff_avg_count = 0;
2175         /* since we do not have a precise anough audio fifo fullness,
2176            we correct audio sync only if larger than this threshold */
2177         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2178
2179         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2180         packet_queue_init(&is->audioq);
2181         SDL_PauseAudio(0);
2182         break;
2183     case CODEC_TYPE_VIDEO:
2184         is->video_stream = stream_index;
2185         is->video_st = ic->streams[stream_index];
2186
2187 //        is->video_current_pts_time = av_gettime();
2188
2189         packet_queue_init(&is->videoq);
2190         is->video_tid = SDL_CreateThread(video_thread, is);
2191         break;
2192     case CODEC_TYPE_SUBTITLE:
2193         is->subtitle_stream = stream_index;
2194         is->subtitle_st = ic->streams[stream_index];
2195         packet_queue_init(&is->subtitleq);
2196
2197         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2198         break;
2199     default:
2200         break;
2201     }
2202     return 0;
2203 }
2204
2205 static void stream_component_close(VideoState *is, int stream_index)
2206 {
2207     AVFormatContext *ic = is->ic;
2208     AVCodecContext *avctx;
2209
2210     if (stream_index < 0 || stream_index >= ic->nb_streams)
2211         return;
2212     avctx = ic->streams[stream_index]->codec;
2213
2214     switch(avctx->codec_type) {
2215     case CODEC_TYPE_AUDIO:
2216         packet_queue_abort(&is->audioq);
2217
2218         SDL_CloseAudio();
2219
2220         packet_queue_end(&is->audioq);
2221         if (is->reformat_ctx)
2222             av_audio_convert_free(is->reformat_ctx);
2223         is->reformat_ctx = NULL;
2224         break;
2225     case CODEC_TYPE_VIDEO:
2226         packet_queue_abort(&is->videoq);
2227
2228         /* note: we also signal this mutex to make sure we deblock the
2229            video thread in all cases */
2230         SDL_LockMutex(is->pictq_mutex);
2231         SDL_CondSignal(is->pictq_cond);
2232         SDL_UnlockMutex(is->pictq_mutex);
2233
2234         SDL_WaitThread(is->video_tid, NULL);
2235
2236         packet_queue_end(&is->videoq);
2237         break;
2238     case CODEC_TYPE_SUBTITLE:
2239         packet_queue_abort(&is->subtitleq);
2240
2241         /* note: we also signal this mutex to make sure we deblock the
2242            video thread in all cases */
2243         SDL_LockMutex(is->subpq_mutex);
2244         is->subtitle_stream_changed = 1;
2245
2246         SDL_CondSignal(is->subpq_cond);
2247         SDL_UnlockMutex(is->subpq_mutex);
2248
2249         SDL_WaitThread(is->subtitle_tid, NULL);
2250
2251         packet_queue_end(&is->subtitleq);
2252         break;
2253     default:
2254         break;
2255     }
2256
2257     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2258     avcodec_close(avctx);
2259     switch(avctx->codec_type) {
2260     case CODEC_TYPE_AUDIO:
2261         is->audio_st = NULL;
2262         is->audio_stream = -1;
2263         break;
2264     case CODEC_TYPE_VIDEO:
2265         is->video_st = NULL;
2266         is->video_stream = -1;
2267         break;
2268     case CODEC_TYPE_SUBTITLE:
2269         is->subtitle_st = NULL;
2270         is->subtitle_stream = -1;
2271         break;
2272     default:
2273         break;
2274     }
2275 }
2276
2277 /* since we have only one decoding thread, we can use a global
2278    variable instead of a thread local variable */
2279 static VideoState *global_video_state;
2280
2281 static int decode_interrupt_cb(void)
2282 {
2283     return (global_video_state && global_video_state->abort_request);
2284 }
2285
2286 /* this thread gets the stream from the disk or the network */
2287 static int decode_thread(void *arg)
2288 {
2289     VideoState *is = arg;
2290     AVFormatContext *ic;
2291     int err, i, ret;
2292     int st_index[CODEC_TYPE_NB];
2293     int st_count[CODEC_TYPE_NB]={0};
2294     int st_best_packet_count[CODEC_TYPE_NB];
2295     AVPacket pkt1, *pkt = &pkt1;
2296     AVFormatParameters params, *ap = &params;
2297     int eof=0;
2298
2299     ic = avformat_alloc_context();
2300
2301     memset(st_index, -1, sizeof(st_index));
2302     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2303     is->video_stream = -1;
2304     is->audio_stream = -1;
2305     is->subtitle_stream = -1;
2306
2307     global_video_state = is;
2308     url_set_interrupt_cb(decode_interrupt_cb);
2309
2310     memset(ap, 0, sizeof(*ap));
2311
2312     ap->prealloced_context = 1;
2313     ap->width = frame_width;
2314     ap->height= frame_height;
2315     ap->time_base= (AVRational){1, 25};
2316     ap->pix_fmt = frame_pix_fmt;
2317
2318     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2319
2320     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2321     if (err < 0) {
2322         print_error(is->filename, err);
2323         ret = -1;
2324         goto fail;
2325     }
2326     is->ic = ic;
2327
2328     if(genpts)
2329         ic->flags |= AVFMT_FLAG_GENPTS;
2330
2331     err = av_find_stream_info(ic);
2332     if (err < 0) {
2333         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2334         ret = -1;
2335         goto fail;
2336     }
2337     if(ic->pb)
2338         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2339
2340     if(seek_by_bytes<0)
2341         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2342
2343     /* if seeking requested, we execute it */
2344     if (start_time != AV_NOPTS_VALUE) {
2345         int64_t timestamp;
2346
2347         timestamp = start_time;
2348         /* add the stream start time */
2349         if (ic->start_time != AV_NOPTS_VALUE)
2350             timestamp += ic->start_time;
2351         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2352         if (ret < 0) {
2353             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2354                     is->filename, (double)timestamp / AV_TIME_BASE);
2355         }
2356     }
2357
2358     for(i = 0; i < ic->nb_streams; i++) {
2359         AVStream *st= ic->streams[i];
2360         AVCodecContext *avctx = st->codec;
2361         ic->streams[i]->discard = AVDISCARD_ALL;
2362         if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2363             continue;
2364         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2365             continue;
2366
2367         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2368             continue;
2369         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2370
2371         switch(avctx->codec_type) {
2372         case CODEC_TYPE_AUDIO:
2373             if (!audio_disable)
2374                 st_index[CODEC_TYPE_AUDIO] = i;
2375             break;
2376         case CODEC_TYPE_VIDEO:
2377         case CODEC_TYPE_SUBTITLE:
2378             if (!video_disable)
2379                 st_index[avctx->codec_type] = i;
2380             break;
2381         default:
2382             break;
2383         }
2384     }
2385     if (show_status) {
2386         dump_format(ic, 0, is->filename, 0);
2387     }
2388
2389     /* open the streams */
2390     if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2391         stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2392     }
2393
2394     ret=-1;
2395     if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2396         ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2397     }
2398     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2399     if(ret<0) {
2400         if (!display_disable)
2401             is->show_audio = 2;
2402     }
2403
2404     if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2405         stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2406     }
2407
2408     if (is->video_stream < 0 && is->audio_stream < 0) {
2409         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2410         ret = -1;
2411         goto fail;
2412     }
2413
2414     for(;;) {
2415         if (is->abort_request)
2416             break;
2417         if (is->paused != is->last_paused) {
2418             is->last_paused = is->paused;
2419             if (is->paused)
2420                 is->read_pause_return= av_read_pause(ic);
2421             else
2422                 av_read_play(ic);
2423         }
2424 #if CONFIG_RTSP_DEMUXER
2425         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2426             /* wait 10 ms to avoid trying to get another packet */
2427             /* XXX: horrible */
2428             SDL_Delay(10);
2429             continue;
2430         }
2431 #endif
2432         if (is->seek_req) {
2433             int64_t seek_target= is->seek_pos;
2434             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2435             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2436 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2437 //      of the seek_pos/seek_rel variables
2438
2439             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2440             if (ret < 0) {
2441                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2442             }else{
2443                 if (is->audio_stream >= 0) {
2444                     packet_queue_flush(&is->audioq);
2445                     packet_queue_put(&is->audioq, &flush_pkt);
2446                 }
2447                 if (is->subtitle_stream >= 0) {
2448                     packet_queue_flush(&is->subtitleq);
2449                     packet_queue_put(&is->subtitleq, &flush_pkt);
2450                 }
2451                 if (is->video_stream >= 0) {
2452                     packet_queue_flush(&is->videoq);
2453                     packet_queue_put(&is->videoq, &flush_pkt);
2454                 }
2455             }
2456             is->seek_req = 0;
2457             eof= 0;
2458         }
2459
2460         /* if the queue are full, no need to read more */
2461         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2462             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2463                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2464                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2465             /* wait 10 ms */
2466             SDL_Delay(10);
2467             continue;
2468         }
2469         if(url_feof(ic->pb) || eof) {
2470             if(is->video_stream >= 0){
2471                 av_init_packet(pkt);
2472                 pkt->data=NULL;
2473                 pkt->size=0;
2474                 pkt->stream_index= is->video_stream;
2475                 packet_queue_put(&is->videoq, pkt);
2476             }
2477             SDL_Delay(10);
2478             if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2479                 ret=AVERROR_EOF;
2480                 goto fail;
2481             }
2482             continue;
2483         }
2484         ret = av_read_frame(ic, pkt);
2485         if (ret < 0) {
2486             if (ret == AVERROR_EOF)
2487                 eof=1;
2488             if (url_ferror(ic->pb))
2489                 break;
2490             SDL_Delay(100); /* wait for user event */
2491             continue;
2492         }
2493         if (pkt->stream_index == is->audio_stream) {
2494             packet_queue_put(&is->audioq, pkt);
2495         } else if (pkt->stream_index == is->video_stream) {
2496             packet_queue_put(&is->videoq, pkt);
2497         } else if (pkt->stream_index == is->subtitle_stream) {
2498             packet_queue_put(&is->subtitleq, pkt);
2499         } else {
2500             av_free_packet(pkt);
2501         }
2502     }
2503     /* wait until the end */
2504     while (!is->abort_request) {
2505         SDL_Delay(100);
2506     }
2507
2508     ret = 0;
2509  fail:
2510     /* disable interrupting */
2511     global_video_state = NULL;
2512
2513     /* close each stream */
2514     if (is->audio_stream >= 0)
2515         stream_component_close(is, is->audio_stream);
2516     if (is->video_stream >= 0)
2517         stream_component_close(is, is->video_stream);
2518     if (is->subtitle_stream >= 0)
2519         stream_component_close(is, is->subtitle_stream);
2520     if (is->ic) {
2521         av_close_input_file(is->ic);
2522         is->ic = NULL; /* safety */
2523     }
2524     url_set_interrupt_cb(NULL);
2525
2526     if (ret != 0) {
2527         SDL_Event event;
2528
2529         event.type = FF_QUIT_EVENT;
2530         event.user.data1 = is;
2531         SDL_PushEvent(&event);
2532     }
2533     return 0;
2534 }
2535
2536 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2537 {
2538     VideoState *is;
2539
2540     is = av_mallocz(sizeof(VideoState));
2541     if (!is)
2542         return NULL;
2543     av_strlcpy(is->filename, filename, sizeof(is->filename));
2544     is->iformat = iformat;
2545     is->ytop = 0;
2546     is->xleft = 0;
2547
2548     /* start video display */
2549     is->pictq_mutex = SDL_CreateMutex();
2550     is->pictq_cond = SDL_CreateCond();
2551
2552     is->subpq_mutex = SDL_CreateMutex();
2553     is->subpq_cond = SDL_CreateCond();
2554
2555     is->av_sync_type = av_sync_type;
2556     is->parse_tid = SDL_CreateThread(decode_thread, is);
2557     if (!is->parse_tid) {
2558         av_free(is);
2559         return NULL;
2560     }
2561     return is;
2562 }
2563
2564 static void stream_close(VideoState *is)
2565 {
2566     VideoPicture *vp;
2567     int i;
2568     /* XXX: use a special url_shutdown call to abort parse cleanly */
2569     is->abort_request = 1;
2570     SDL_WaitThread(is->parse_tid, NULL);
2571     SDL_WaitThread(is->refresh_tid, NULL);
2572
2573     /* free all pictures */
2574     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2575         vp = &is->pictq[i];
2576 #if CONFIG_AVFILTER
2577         if (vp->picref) {
2578             avfilter_unref_pic(vp->picref);
2579             vp->picref = NULL;
2580         }
2581 #endif
2582         if (vp->bmp) {
2583             SDL_FreeYUVOverlay(vp->bmp);
2584             vp->bmp = NULL;
2585         }
2586     }
2587     SDL_DestroyMutex(is->pictq_mutex);
2588     SDL_DestroyCond(is->pictq_cond);
2589     SDL_DestroyMutex(is->subpq_mutex);
2590     SDL_DestroyCond(is->subpq_cond);
2591 #if !CONFIG_AVFILTER
2592     if (is->img_convert_ctx)
2593         sws_freeContext(is->img_convert_ctx);
2594 #endif
2595     av_free(is);
2596 }
2597
2598 static void stream_cycle_channel(VideoState *is, int codec_type)
2599 {
2600     AVFormatContext *ic = is->ic;
2601     int start_index, stream_index;
2602     AVStream *st;
2603
2604     if (codec_type == CODEC_TYPE_VIDEO)
2605         start_index = is->video_stream;
2606     else if (codec_type == CODEC_TYPE_AUDIO)
2607         start_index = is->audio_stream;
2608     else
2609         start_index = is->subtitle_stream;
2610     if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2611         return;
2612     stream_index = start_index;
2613     for(;;) {
2614         if (++stream_index >= is->ic->nb_streams)
2615         {
2616             if (codec_type == CODEC_TYPE_SUBTITLE)
2617             {
2618                 stream_index = -1;
2619                 goto the_end;
2620             } else
2621                 stream_index = 0;
2622         }
2623         if (stream_index == start_index)
2624             return;
2625         st = ic->streams[stream_index];
2626         if (st->codec->codec_type == codec_type) {
2627             /* check that parameters are OK */
2628             switch(codec_type) {
2629             case CODEC_TYPE_AUDIO:
2630                 if (st->codec->sample_rate != 0 &&
2631                     st->codec->channels != 0)
2632                     goto the_end;
2633                 break;
2634             case CODEC_TYPE_VIDEO:
2635             case CODEC_TYPE_SUBTITLE:
2636                 goto the_end;
2637             default:
2638                 break;
2639             }
2640         }
2641     }
2642  the_end:
2643     stream_component_close(is, start_index);
2644     stream_component_open(is, stream_index);
2645 }
2646
2647
2648 static void toggle_full_screen(void)
2649 {
2650     is_full_screen = !is_full_screen;
2651     if (!fs_screen_width) {
2652         /* use default SDL method */
2653 //        SDL_WM_ToggleFullScreen(screen);
2654     }
2655     video_open(cur_stream);
2656 }
2657
2658 static void toggle_pause(void)
2659 {
2660     if (cur_stream)
2661         stream_pause(cur_stream);
2662     step = 0;
2663 }
2664
2665 static void step_to_next_frame(void)
2666 {
2667     if (cur_stream) {
2668         /* if the stream is paused unpause it, then step */
2669         if (cur_stream->paused)
2670             stream_pause(cur_stream);
2671     }
2672     step = 1;
2673 }
2674
2675 static void do_exit(void)
2676 {
2677     int i;
2678     if (cur_stream) {
2679         stream_close(cur_stream);
2680         cur_stream = NULL;
2681     }
2682     for (i = 0; i < CODEC_TYPE_NB; i++)
2683         av_free(avcodec_opts[i]);
2684     av_free(avformat_opts);
2685     av_free(sws_opts);
2686 #if CONFIG_AVFILTER
2687     avfilter_uninit();
2688 #endif
2689     if (show_status)
2690         printf("\n");
2691     SDL_Quit();
2692     exit(0);
2693 }
2694
2695 static void toggle_audio_display(void)
2696 {
2697     if (cur_stream) {
2698         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2699         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2700         fill_rectangle(screen,
2701                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2702                     bgcolor);
2703         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2704     }
2705 }
2706
2707 /* handle an event sent by the GUI */
2708 static void event_loop(void)
2709 {
2710     SDL_Event event;
2711     double incr, pos, frac;
2712
2713     for(;;) {
2714         double x;
2715         SDL_WaitEvent(&event);
2716         switch(event.type) {
2717         case SDL_KEYDOWN:
2718             switch(event.key.keysym.sym) {
2719             case SDLK_ESCAPE:
2720             case SDLK_q:
2721                 do_exit();
2722                 break;
2723             case SDLK_f:
2724                 toggle_full_screen();
2725                 break;
2726             case SDLK_p:
2727             case SDLK_SPACE:
2728                 toggle_pause();
2729                 break;
2730             case SDLK_s: //S: Step to next frame
2731                 step_to_next_frame();
2732                 break;
2733             case SDLK_a:
2734                 if (cur_stream)
2735                     stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2736                 break;
2737             case SDLK_v:
2738                 if (cur_stream)
2739                     stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2740                 break;
2741             case SDLK_t:
2742                 if (cur_stream)
2743                     stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2744                 break;
2745             case SDLK_w:
2746                 toggle_audio_display();
2747                 break;
2748             case SDLK_LEFT:
2749                 incr = -10.0;
2750                 goto do_seek;
2751             case SDLK_RIGHT:
2752                 incr = 10.0;
2753                 goto do_seek;
2754             case SDLK_UP:
2755                 incr = 60.0;
2756                 goto do_seek;
2757             case SDLK_DOWN:
2758                 incr = -60.0;
2759             do_seek:
2760                 if (cur_stream) {
2761                     if (seek_by_bytes) {
2762                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2763                             pos= cur_stream->video_current_pos;
2764                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2765                             pos= cur_stream->audio_pkt.pos;
2766                         }else
2767                             pos = url_ftell(cur_stream->ic->pb);
2768                         if (cur_stream->ic->bit_rate)
2769                             incr *= cur_stream->ic->bit_rate / 8.0;
2770                         else
2771                             incr *= 180000.0;
2772                         pos += incr;
2773                         stream_seek(cur_stream, pos, incr, 1);
2774                     } else {
2775                         pos = get_master_clock(cur_stream);
2776                         pos += incr;
2777                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2778                     }
2779                 }
2780                 break;
2781             default:
2782                 break;
2783             }
2784             break;
2785         case SDL_MOUSEBUTTONDOWN:
2786         case SDL_MOUSEMOTION:
2787             if(event.type ==SDL_MOUSEBUTTONDOWN){
2788                 x= event.button.x;
2789             }else{
2790                 if(event.motion.state != SDL_PRESSED)
2791                     break;
2792                 x= event.motion.x;
2793             }
2794             if (cur_stream) {
2795                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2796                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2797                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2798                 }else{
2799                     int64_t ts;
2800                     int ns, hh, mm, ss;
2801                     int tns, thh, tmm, tss;
2802                     tns = cur_stream->ic->duration/1000000LL;
2803                     thh = tns/3600;
2804                     tmm = (tns%3600)/60;
2805                     tss = (tns%60);
2806                     frac = x/cur_stream->width;
2807                     ns = frac*tns;
2808                     hh = ns/3600;
2809                     mm = (ns%3600)/60;
2810                     ss = (ns%60);
2811                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2812                             hh, mm, ss, thh, tmm, tss);
2813                     ts = frac*cur_stream->ic->duration;
2814                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2815                         ts += cur_stream->ic->start_time;
2816                     stream_seek(cur_stream, ts, 0, 0);
2817                 }
2818             }
2819             break;
2820         case SDL_VIDEORESIZE:
2821             if (cur_stream) {
2822                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2823                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2824                 screen_width = cur_stream->width = event.resize.w;
2825                 screen_height= cur_stream->height= event.resize.h;
2826             }
2827             break;
2828         case SDL_QUIT:
2829         case FF_QUIT_EVENT:
2830             do_exit();
2831             break;
2832         case FF_ALLOC_EVENT:
2833             video_open(event.user.data1);
2834             alloc_picture(event.user.data1);
2835             break;
2836         case FF_REFRESH_EVENT:
2837             video_refresh_timer(event.user.data1);
2838             cur_stream->refresh=0;
2839             break;
2840         default:
2841             break;
2842         }
2843     }
2844 }
2845
2846 static void opt_frame_size(const char *arg)
2847 {
2848     if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2849         fprintf(stderr, "Incorrect frame size\n");
2850         exit(1);
2851     }
2852     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2853         fprintf(stderr, "Frame size must be a multiple of 2\n");
2854         exit(1);
2855     }
2856 }
2857
2858 static int opt_width(const char *opt, const char *arg)
2859 {
2860     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2861     return 0;
2862 }
2863
2864 static int opt_height(const char *opt, const char *arg)
2865 {
2866     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2867     return 0;
2868 }
2869
2870 static void opt_format(const char *arg)
2871 {
2872     file_iformat = av_find_input_format(arg);
2873     if (!file_iformat) {
2874         fprintf(stderr, "Unknown input format: %s\n", arg);
2875         exit(1);
2876     }
2877 }
2878
2879 static void opt_frame_pix_fmt(const char *arg)
2880 {
2881     frame_pix_fmt = av_get_pix_fmt(arg);
2882 }
2883
2884 static int opt_sync(const char *opt, const char *arg)
2885 {
2886     if (!strcmp(arg, "audio"))
2887         av_sync_type = AV_SYNC_AUDIO_MASTER;
2888     else if (!strcmp(arg, "video"))
2889         av_sync_type = AV_SYNC_VIDEO_MASTER;
2890     else if (!strcmp(arg, "ext"))
2891         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2892     else {
2893         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2894         exit(1);
2895     }
2896     return 0;
2897 }
2898
2899 static int opt_seek(const char *opt, const char *arg)
2900 {
2901     start_time = parse_time_or_die(opt, arg, 1);
2902     return 0;
2903 }
2904
2905 static int opt_debug(const char *opt, const char *arg)
2906 {
2907     av_log_set_level(99);
2908     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2909     return 0;
2910 }
2911
2912 static int opt_vismv(const char *opt, const char *arg)
2913 {
2914     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2915     return 0;
2916 }
2917
2918 static int opt_thread_count(const char *opt, const char *arg)
2919 {
2920     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2921 #if !HAVE_THREADS
2922     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2923 #endif
2924     return 0;
2925 }
2926
2927 static const OptionDef options[] = {
2928 #include "cmdutils_common_opts.h"
2929     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2930     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2931     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2932     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2933     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2934     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2935     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2936     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2937     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2938     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2939     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2940     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2941     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2942     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2943     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2944     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2945     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2946     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2947     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2948     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2949     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2950     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2951     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2952     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2953     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2954     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2955     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2956     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2957     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2958     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2959     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2960     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2961 #if CONFIG_AVFILTER
2962     { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2963 #endif
2964     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2965     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2966     { NULL, },
2967 };
2968
2969 static void show_usage(void)
2970 {
2971     printf("Simple media player\n");
2972     printf("usage: ffplay [options] input_file\n");
2973     printf("\n");
2974 }
2975
2976 static void show_help(void)
2977 {
2978     show_usage();
2979     show_help_options(options, "Main options:\n",
2980                       OPT_EXPERT, 0);
2981     show_help_options(options, "\nAdvanced options:\n",
2982                       OPT_EXPERT, OPT_EXPERT);
2983     printf("\nWhile playing:\n"
2984            "q, ESC              quit\n"
2985            "f                   toggle full screen\n"
2986            "p, SPC              pause\n"
2987            "a                   cycle audio channel\n"
2988            "v                   cycle video channel\n"
2989            "t                   cycle subtitle channel\n"
2990            "w                   show audio waves\n"
2991            "left/right          seek backward/forward 10 seconds\n"
2992            "down/up             seek backward/forward 1 minute\n"
2993            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2994            );
2995 }
2996
2997 static void opt_input_file(const char *filename)
2998 {
2999     if (input_filename) {
3000         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3001                 filename, input_filename);
3002         exit(1);
3003     }
3004     if (!strcmp(filename, "-"))
3005         filename = "pipe:";
3006     input_filename = filename;
3007 }
3008
3009 /* Called from the main */
3010 int main(int argc, char **argv)
3011 {
3012     int flags, i;
3013
3014     /* register all codecs, demux and protocols */
3015     avcodec_register_all();
3016     avdevice_register_all();
3017 #if CONFIG_AVFILTER
3018     avfilter_register_all();
3019 #endif
3020     av_register_all();
3021
3022     for(i=0; i<CODEC_TYPE_NB; i++){
3023         avcodec_opts[i]= avcodec_alloc_context2(i);
3024     }
3025     avformat_opts = avformat_alloc_context();
3026 #if !CONFIG_AVFILTER
3027     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3028 #endif
3029
3030     show_banner();
3031
3032     parse_options(argc, argv, options, opt_input_file);
3033
3034     if (!input_filename) {
3035         show_usage();
3036         fprintf(stderr, "An input file must be specified\n");
3037         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3038         exit(1);
3039     }
3040
3041     if (display_disable) {
3042         video_disable = 1;
3043     }
3044     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3045 #if !defined(__MINGW32__) && !defined(__APPLE__)
3046     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3047 #endif
3048     if (SDL_Init (flags)) {
3049         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3050         exit(1);
3051     }
3052
3053     if (!display_disable) {
3054 #if HAVE_SDL_VIDEO_SIZE
3055         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3056         fs_screen_width = vi->current_w;
3057         fs_screen_height = vi->current_h;
3058 #endif
3059     }
3060
3061     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3062     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3063     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3064
3065     av_init_packet(&flush_pkt);
3066     flush_pkt.data= "FLUSH";
3067
3068     cur_stream = stream_open(input_filename, file_iformat);
3069
3070     event_loop();
3071
3072     /* never returns */
3073
3074     return 0;
3075 }