]> git.sesse.net Git - ffmpeg/blob - avplay.c
macosx: use the default surface on newer sdl
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     //    QETimer *video_timer;
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208     PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212 #endif
213
214     float skip_frames;
215     float skip_frames_index;
216     int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int debug = 0;
243 static int debug_mv = 0;
244 static int step = 0;
245 static int thread_count = 1;
246 static int workaround_bugs = 1;
247 static int fast = 0;
248 static int genpts = 0;
249 static int lowres = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
254 static int error_recognition = FF_ER_CAREFUL;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts= -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop=1;
261 static int framedrop=1;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 void exit_program(int ret)
282 {
283     exit(ret);
284 }
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291     memset(q, 0, sizeof(PacketQueue));
292     q->mutex = SDL_CreateMutex();
293     q->cond = SDL_CreateCond();
294     packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299     AVPacketList *pkt, *pkt1;
300
301     SDL_LockMutex(q->mutex);
302     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303         pkt1 = pkt->next;
304         av_free_packet(&pkt->pkt);
305         av_freep(&pkt);
306     }
307     q->last_pkt = NULL;
308     q->first_pkt = NULL;
309     q->nb_packets = 0;
310     q->size = 0;
311     SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316     packet_queue_flush(q);
317     SDL_DestroyMutex(q->mutex);
318     SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323     AVPacketList *pkt1;
324
325     /* duplicate the packet */
326     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327         return -1;
328
329     pkt1 = av_malloc(sizeof(AVPacketList));
330     if (!pkt1)
331         return -1;
332     pkt1->pkt = *pkt;
333     pkt1->next = NULL;
334
335
336     SDL_LockMutex(q->mutex);
337
338     if (!q->last_pkt)
339
340         q->first_pkt = pkt1;
341     else
342         q->last_pkt->next = pkt1;
343     q->last_pkt = pkt1;
344     q->nb_packets++;
345     q->size += pkt1->pkt.size + sizeof(*pkt1);
346     /* XXX: should duplicate packet data in DV case */
347     SDL_CondSignal(q->cond);
348
349     SDL_UnlockMutex(q->mutex);
350     return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for(;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #define ALPHA_BLEND(a, oldp, newp, s)\
412 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
413
414 #define RGBA_IN(r, g, b, a, s)\
415 {\
416     unsigned int v = ((const uint32_t *)(s))[0];\
417     a = (v >> 24) & 0xff;\
418     r = (v >> 16) & 0xff;\
419     g = (v >> 8) & 0xff;\
420     b = v & 0xff;\
421 }
422
423 #define YUVA_IN(y, u, v, a, s, pal)\
424 {\
425     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
426     a = (val >> 24) & 0xff;\
427     y = (val >> 16) & 0xff;\
428     u = (val >> 8) & 0xff;\
429     v = val & 0xff;\
430 }
431
432 #define YUVA_OUT(d, y, u, v, a)\
433 {\
434     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
435 }
436
437
438 #define BPP 1
439
440 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
441 {
442     int wrap, wrap3, width2, skip2;
443     int y, u, v, a, u1, v1, a1, w, h;
444     uint8_t *lum, *cb, *cr;
445     const uint8_t *p;
446     const uint32_t *pal;
447     int dstx, dsty, dstw, dsth;
448
449     dstw = av_clip(rect->w, 0, imgw);
450     dsth = av_clip(rect->h, 0, imgh);
451     dstx = av_clip(rect->x, 0, imgw - dstw);
452     dsty = av_clip(rect->y, 0, imgh - dsth);
453     lum = dst->data[0] + dsty * dst->linesize[0];
454     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
455     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
456
457     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
458     skip2 = dstx >> 1;
459     wrap = dst->linesize[0];
460     wrap3 = rect->pict.linesize[0];
461     p = rect->pict.data[0];
462     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
463
464     if (dsty & 1) {
465         lum += dstx;
466         cb += skip2;
467         cr += skip2;
468
469         if (dstx & 1) {
470             YUVA_IN(y, u, v, a, p, pal);
471             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
472             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
473             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
474             cb++;
475             cr++;
476             lum++;
477             p += BPP;
478         }
479         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
480             YUVA_IN(y, u, v, a, p, pal);
481             u1 = u;
482             v1 = v;
483             a1 = a;
484             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
485
486             YUVA_IN(y, u, v, a, p + BPP, pal);
487             u1 += u;
488             v1 += v;
489             a1 += a;
490             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
491             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
492             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
493             cb++;
494             cr++;
495             p += 2 * BPP;
496             lum += 2;
497         }
498         if (w) {
499             YUVA_IN(y, u, v, a, p, pal);
500             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
501             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
502             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
503             p++;
504             lum++;
505         }
506         p += wrap3 - dstw * BPP;
507         lum += wrap - dstw - dstx;
508         cb += dst->linesize[1] - width2 - skip2;
509         cr += dst->linesize[2] - width2 - skip2;
510     }
511     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
512         lum += dstx;
513         cb += skip2;
514         cr += skip2;
515
516         if (dstx & 1) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             p += wrap3;
523             lum += wrap;
524             YUVA_IN(y, u, v, a, p, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += -wrap3 + BPP;
534             lum += -wrap + 1;
535         }
536         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
537             YUVA_IN(y, u, v, a, p, pal);
538             u1 = u;
539             v1 = v;
540             a1 = a;
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542
543             YUVA_IN(y, u, v, a, p + BPP, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
548             p += wrap3;
549             lum += wrap;
550
551             YUVA_IN(y, u, v, a, p, pal);
552             u1 += u;
553             v1 += v;
554             a1 += a;
555             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556
557             YUVA_IN(y, u, v, a, p + BPP, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
562
563             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
564             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
565
566             cb++;
567             cr++;
568             p += -wrap3 + 2 * BPP;
569             lum += -wrap + 2;
570         }
571         if (w) {
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 = u;
574             v1 = v;
575             a1 = a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             p += wrap3;
578             lum += wrap;
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
585             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
586             cb++;
587             cr++;
588             p += -wrap3 + BPP;
589             lum += -wrap + 1;
590         }
591         p += wrap3 + (wrap3 - dstw * BPP);
592         lum += wrap + (wrap - dstw - dstx);
593         cb += dst->linesize[1] - width2 - skip2;
594         cr += dst->linesize[2] - width2 - skip2;
595     }
596     /* handle odd height */
597     if (h) {
598         lum += dstx;
599         cb += skip2;
600         cr += skip2;
601
602         if (dstx & 1) {
603             YUVA_IN(y, u, v, a, p, pal);
604             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
605             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
606             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
607             cb++;
608             cr++;
609             lum++;
610             p += BPP;
611         }
612         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618
619             YUVA_IN(y, u, v, a, p + BPP, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
626             cb++;
627             cr++;
628             p += 2 * BPP;
629             lum += 2;
630         }
631         if (w) {
632             YUVA_IN(y, u, v, a, p, pal);
633             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
634             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
635             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636         }
637     }
638 }
639
640 static void free_subpicture(SubPicture *sp)
641 {
642     avsubtitle_free(&sp->sub);
643 }
644
645 static void video_image_display(VideoState *is)
646 {
647     VideoPicture *vp;
648     SubPicture *sp;
649     AVPicture pict;
650     float aspect_ratio;
651     int width, height, x, y;
652     SDL_Rect rect;
653     int i;
654
655     vp = &is->pictq[is->pictq_rindex];
656     if (vp->bmp) {
657 #if CONFIG_AVFILTER
658          if (vp->picref->video->pixel_aspect.num == 0)
659              aspect_ratio = 0;
660          else
661              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
662 #else
663
664         /* XXX: use variable in the frame */
665         if (is->video_st->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667         else if (is->video_st->codec->sample_aspect_ratio.num)
668             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669         else
670             aspect_ratio = 0;
671 #endif
672         if (aspect_ratio <= 0.0)
673             aspect_ratio = 1.0;
674         aspect_ratio *= (float)vp->width / (float)vp->height;
675
676         if (is->subtitle_st)
677         {
678             if (is->subpq_size > 0)
679             {
680                 sp = &is->subpq[is->subpq_rindex];
681
682                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
683                 {
684                     SDL_LockYUVOverlay (vp->bmp);
685
686                     pict.data[0] = vp->bmp->pixels[0];
687                     pict.data[1] = vp->bmp->pixels[2];
688                     pict.data[2] = vp->bmp->pixels[1];
689
690                     pict.linesize[0] = vp->bmp->pitches[0];
691                     pict.linesize[1] = vp->bmp->pitches[2];
692                     pict.linesize[2] = vp->bmp->pitches[1];
693
694                     for (i = 0; i < sp->sub.num_rects; i++)
695                         blend_subrect(&pict, sp->sub.rects[i],
696                                       vp->bmp->w, vp->bmp->h);
697
698                     SDL_UnlockYUVOverlay (vp->bmp);
699                 }
700             }
701         }
702
703
704         /* XXX: we suppose the screen has a 1.0 pixel ratio */
705         height = is->height;
706         width = ((int)rint(height * aspect_ratio)) & ~1;
707         if (width > is->width) {
708             width = is->width;
709             height = ((int)rint(width / aspect_ratio)) & ~1;
710         }
711         x = (is->width - width) / 2;
712         y = (is->height - height) / 2;
713         is->no_background = 0;
714         rect.x = is->xleft + x;
715         rect.y = is->ytop  + y;
716         rect.w = width;
717         rect.h = height;
718         SDL_DisplayYUVOverlay(vp->bmp, &rect);
719     }
720 }
721
722 /* get the current audio output buffer size, in samples. With SDL, we
723    cannot have a precise information */
724 static int audio_write_get_buf_size(VideoState *is)
725 {
726     return is->audio_buf_size - is->audio_buf_index;
727 }
728
729 static inline int compute_mod(int a, int b)
730 {
731     a = a % b;
732     if (a >= 0)
733         return a;
734     else
735         return a + b;
736 }
737
738 static void video_audio_display(VideoState *s)
739 {
740     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
741     int ch, channels, h, h2, bgcolor, fgcolor;
742     int16_t time_diff;
743     int rdft_bits, nb_freq;
744
745     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
746         ;
747     nb_freq= 1<<(rdft_bits-1);
748
749     /* compute display index : center on currently output samples */
750     channels = s->audio_st->codec->channels;
751     nb_display_channels = channels;
752     if (!s->paused) {
753         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
754         n = 2 * channels;
755         delay = audio_write_get_buf_size(s);
756         delay /= n;
757
758         /* to be more precise, we take into account the time spent since
759            the last buffer computation */
760         if (audio_callback_time) {
761             time_diff = av_gettime() - audio_callback_time;
762             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
763         }
764
765         delay += 2*data_used;
766         if (delay < data_used)
767             delay = data_used;
768
769         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
770         if(s->show_audio==1){
771             h= INT_MIN;
772             for(i=0; i<1000; i+=channels){
773                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
774                 int a= s->sample_array[idx];
775                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
776                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
777                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
778                 int score= a-d;
779                 if(h<score && (b^c)<0){
780                     h= score;
781                     i_start= idx;
782                 }
783             }
784         }
785
786         s->last_i_start = i_start;
787     } else {
788         i_start = s->last_i_start;
789     }
790
791     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
792     if(s->show_audio==1){
793         fill_rectangle(screen,
794                        s->xleft, s->ytop, s->width, s->height,
795                        bgcolor);
796
797         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
798
799         /* total height for one channel */
800         h = s->height / nb_display_channels;
801         /* graph height / 2 */
802         h2 = (h * 9) / 20;
803         for(ch = 0;ch < nb_display_channels; ch++) {
804             i = i_start + ch;
805             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
806             for(x = 0; x < s->width; x++) {
807                 y = (s->sample_array[i] * h2) >> 15;
808                 if (y < 0) {
809                     y = -y;
810                     ys = y1 - y;
811                 } else {
812                     ys = y1;
813                 }
814                 fill_rectangle(screen,
815                                s->xleft + x, ys, 1, y,
816                                fgcolor);
817                 i += channels;
818                 if (i >= SAMPLE_ARRAY_SIZE)
819                     i -= SAMPLE_ARRAY_SIZE;
820             }
821         }
822
823         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
824
825         for(ch = 1;ch < nb_display_channels; ch++) {
826             y = s->ytop + ch * h;
827             fill_rectangle(screen,
828                            s->xleft, y, s->width, 1,
829                            fgcolor);
830         }
831         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
832     }else{
833         nb_display_channels= FFMIN(nb_display_channels, 2);
834         if(rdft_bits != s->rdft_bits){
835             av_rdft_end(s->rdft);
836             av_free(s->rdft_data);
837             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
838             s->rdft_bits= rdft_bits;
839             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
840         }
841         {
842             FFTSample *data[2];
843             for(ch = 0;ch < nb_display_channels; ch++) {
844                 data[ch] = s->rdft_data + 2*nb_freq*ch;
845                 i = i_start + ch;
846                 for(x = 0; x < 2*nb_freq; x++) {
847                     double w= (x-nb_freq)*(1.0/nb_freq);
848                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
849                     i += channels;
850                     if (i >= SAMPLE_ARRAY_SIZE)
851                         i -= SAMPLE_ARRAY_SIZE;
852                 }
853                 av_rdft_calc(s->rdft, data[ch]);
854             }
855             //least efficient way to do this, we should of course directly access it but its more than fast enough
856             for(y=0; y<s->height; y++){
857                 double w= 1/sqrt(nb_freq);
858                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
859                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
860                        + data[1][2*y+1]*data[1][2*y+1])) : a;
861                 a= FFMIN(a,255);
862                 b= FFMIN(b,255);
863                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
864
865                 fill_rectangle(screen,
866                             s->xpos, s->height-y, 1, 1,
867                             fgcolor);
868             }
869         }
870         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
871         s->xpos++;
872         if(s->xpos >= s->width)
873             s->xpos= s->xleft;
874     }
875 }
876
877 static int video_open(VideoState *is){
878     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
879     int w,h;
880
881     if(is_full_screen) flags |= SDL_FULLSCREEN;
882     else               flags |= SDL_RESIZABLE;
883
884     if (is_full_screen && fs_screen_width) {
885         w = fs_screen_width;
886         h = fs_screen_height;
887     } else if(!is_full_screen && screen_width){
888         w = screen_width;
889         h = screen_height;
890 #if CONFIG_AVFILTER
891     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
892         w = is->out_video_filter->inputs[0]->w;
893         h = is->out_video_filter->inputs[0]->h;
894 #else
895     }else if (is->video_st && is->video_st->codec->width){
896         w = is->video_st->codec->width;
897         h = is->video_st->codec->height;
898 #endif
899     } else {
900         w = 640;
901         h = 480;
902     }
903     if(screen && is->width == screen->w && screen->w == w
904        && is->height== screen->h && screen->h == h)
905         return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911     screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913     if (!screen) {
914         fprintf(stderr, "SDL: could not set video mode - exiting\n");
915         return -1;
916     }
917     if (!window_title)
918         window_title = input_filename;
919     SDL_WM_SetCaption(window_title, window_title);
920
921     is->width = screen->w;
922     is->height = screen->h;
923
924     return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930     if(!screen)
931         video_open(cur_stream);
932     if (is->audio_st && is->show_audio)
933         video_audio_display(is);
934     else if (is->video_st)
935         video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940     VideoState *is= opaque;
941     while(!is->abort_request){
942         SDL_Event event;
943         event.type = FF_REFRESH_EVENT;
944         event.user.data1 = opaque;
945         if(!is->refresh){
946             is->refresh=1;
947             SDL_PushEvent(&event);
948         }
949         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950     }
951     return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957     double pts;
958     int hw_buf_size, bytes_per_sec;
959     pts = is->audio_clock;
960     hw_buf_size = audio_write_get_buf_size(is);
961     bytes_per_sec = 0;
962     if (is->audio_st) {
963         bytes_per_sec = is->audio_st->codec->sample_rate *
964             2 * is->audio_st->codec->channels;
965     }
966     if (bytes_per_sec)
967         pts -= (double)hw_buf_size / bytes_per_sec;
968     return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974     if (is->paused) {
975         return is->video_current_pts;
976     } else {
977         return is->video_current_pts_drift + av_gettime() / 1000000.0;
978     }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984     int64_t ti;
985     ti = av_gettime();
986     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992     double val;
993
994     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995         if (is->video_st)
996             val = get_video_clock(is);
997         else
998             val = get_audio_clock(is);
999     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000         if (is->audio_st)
1001             val = get_audio_clock(is);
1002         else
1003             val = get_video_clock(is);
1004     } else {
1005         val = get_external_clock(is);
1006     }
1007     return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013     if (!is->seek_req) {
1014         is->seek_pos = pos;
1015         is->seek_rel = rel;
1016         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017         if (seek_by_bytes)
1018             is->seek_flags |= AVSEEK_FLAG_BYTE;
1019         is->seek_req = 1;
1020     }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026     if (is->paused) {
1027         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028         if(is->read_pause_return != AVERROR(ENOSYS)){
1029             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1030         }
1031         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1032     }
1033     is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038     double delay, sync_threshold, diff;
1039
1040     /* compute nominal delay */
1041     delay = frame_current_pts - is->frame_last_pts;
1042     if (delay <= 0 || delay >= 10.0) {
1043         /* if incorrect delay, use previous one */
1044         delay = is->frame_last_delay;
1045     } else {
1046         is->frame_last_delay = delay;
1047     }
1048     is->frame_last_pts = frame_current_pts;
1049
1050     /* update delay to follow master synchronisation source */
1051     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053         /* if video is slave, we try to correct big delays by
1054            duplicating or deleting a frame */
1055         diff = get_video_clock(is) - get_master_clock(is);
1056
1057         /* skip or repeat frame. We take into account the
1058            delay to compute the threshold. I still don't know
1059            if it is the best guess */
1060         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062             if (diff <= -sync_threshold)
1063                 delay = 0;
1064             else if (diff >= sync_threshold)
1065                 delay = 2 * delay;
1066         }
1067     }
1068     is->frame_timer += delay;
1069
1070     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071             delay, frame_current_pts, -diff);
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             //nothing to do, no picture to display in the que
1088         } else {
1089             double time= av_gettime()/1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if(time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if(is->pictq_size > 1){
1101                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             }else{
1105                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1106             }
1107             if(framedrop && time > next_target){
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if(is->pictq_size > 1 || time > next_target + 0.5){
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if(is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1211             fflush(stdout);
1212             last_time = cur_time;
1213         }
1214     }
1215 }
1216
1217 static void stream_close(VideoState *is)
1218 {
1219     VideoPicture *vp;
1220     int i;
1221     /* XXX: use a special url_shutdown call to abort parse cleanly */
1222     is->abort_request = 1;
1223     SDL_WaitThread(is->parse_tid, NULL);
1224     SDL_WaitThread(is->refresh_tid, NULL);
1225
1226     /* free all pictures */
1227     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1228         vp = &is->pictq[i];
1229 #if CONFIG_AVFILTER
1230         if (vp->picref) {
1231             avfilter_unref_buffer(vp->picref);
1232             vp->picref = NULL;
1233         }
1234 #endif
1235         if (vp->bmp) {
1236             SDL_FreeYUVOverlay(vp->bmp);
1237             vp->bmp = NULL;
1238         }
1239     }
1240     SDL_DestroyMutex(is->pictq_mutex);
1241     SDL_DestroyCond(is->pictq_cond);
1242     SDL_DestroyMutex(is->subpq_mutex);
1243     SDL_DestroyCond(is->subpq_cond);
1244 #if !CONFIG_AVFILTER
1245     if (is->img_convert_ctx)
1246         sws_freeContext(is->img_convert_ctx);
1247 #endif
1248     av_free(is);
1249 }
1250
1251 static void do_exit(void)
1252 {
1253     if (cur_stream) {
1254         stream_close(cur_stream);
1255         cur_stream = NULL;
1256     }
1257     uninit_opts();
1258 #if CONFIG_AVFILTER
1259     avfilter_uninit();
1260 #endif
1261     if (show_status)
1262         printf("\n");
1263     SDL_Quit();
1264     av_log(NULL, AV_LOG_QUIET, "");
1265     exit(0);
1266 }
1267
1268 /* allocate a picture (needs to do that in main thread to avoid
1269    potential locking problems */
1270 static void alloc_picture(void *opaque)
1271 {
1272     VideoState *is = opaque;
1273     VideoPicture *vp;
1274
1275     vp = &is->pictq[is->pictq_windex];
1276
1277     if (vp->bmp)
1278         SDL_FreeYUVOverlay(vp->bmp);
1279
1280 #if CONFIG_AVFILTER
1281     if (vp->picref)
1282         avfilter_unref_buffer(vp->picref);
1283     vp->picref = NULL;
1284
1285     vp->width   = is->out_video_filter->inputs[0]->w;
1286     vp->height  = is->out_video_filter->inputs[0]->h;
1287     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1288 #else
1289     vp->width   = is->video_st->codec->width;
1290     vp->height  = is->video_st->codec->height;
1291     vp->pix_fmt = is->video_st->codec->pix_fmt;
1292 #endif
1293
1294     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1295                                    SDL_YV12_OVERLAY,
1296                                    screen);
1297     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1298         /* SDL allocates a buffer smaller than requested if the video
1299          * overlay hardware is unable to support the requested size. */
1300         fprintf(stderr, "Error: the video system does not support an image\n"
1301                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1302                         "to reduce the image size.\n", vp->width, vp->height );
1303         do_exit();
1304     }
1305
1306     SDL_LockMutex(is->pictq_mutex);
1307     vp->allocated = 1;
1308     SDL_CondSignal(is->pictq_cond);
1309     SDL_UnlockMutex(is->pictq_mutex);
1310 }
1311
1312 /**
1313  *
1314  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1315  */
1316 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1317 {
1318     VideoPicture *vp;
1319 #if CONFIG_AVFILTER
1320     AVPicture pict_src;
1321 #else
1322     int dst_pix_fmt = PIX_FMT_YUV420P;
1323 #endif
1324     /* wait until we have space to put a new picture */
1325     SDL_LockMutex(is->pictq_mutex);
1326
1327     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1328         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1329
1330     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1331            !is->videoq.abort_request) {
1332         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1333     }
1334     SDL_UnlockMutex(is->pictq_mutex);
1335
1336     if (is->videoq.abort_request)
1337         return -1;
1338
1339     vp = &is->pictq[is->pictq_windex];
1340
1341     /* alloc or resize hardware picture buffer */
1342     if (!vp->bmp ||
1343 #if CONFIG_AVFILTER
1344         vp->width  != is->out_video_filter->inputs[0]->w ||
1345         vp->height != is->out_video_filter->inputs[0]->h) {
1346 #else
1347         vp->width != is->video_st->codec->width ||
1348         vp->height != is->video_st->codec->height) {
1349 #endif
1350         SDL_Event event;
1351
1352         vp->allocated = 0;
1353
1354         /* the allocation must be done in the main thread to avoid
1355            locking problems */
1356         event.type = FF_ALLOC_EVENT;
1357         event.user.data1 = is;
1358         SDL_PushEvent(&event);
1359
1360         /* wait until the picture is allocated */
1361         SDL_LockMutex(is->pictq_mutex);
1362         while (!vp->allocated && !is->videoq.abort_request) {
1363             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1364         }
1365         SDL_UnlockMutex(is->pictq_mutex);
1366
1367         if (is->videoq.abort_request)
1368             return -1;
1369     }
1370
1371     /* if the frame is not skipped, then display it */
1372     if (vp->bmp) {
1373         AVPicture pict;
1374 #if CONFIG_AVFILTER
1375         if(vp->picref)
1376             avfilter_unref_buffer(vp->picref);
1377         vp->picref = src_frame->opaque;
1378 #endif
1379
1380         /* get a pointer on the bitmap */
1381         SDL_LockYUVOverlay (vp->bmp);
1382
1383         memset(&pict,0,sizeof(AVPicture));
1384         pict.data[0] = vp->bmp->pixels[0];
1385         pict.data[1] = vp->bmp->pixels[2];
1386         pict.data[2] = vp->bmp->pixels[1];
1387
1388         pict.linesize[0] = vp->bmp->pitches[0];
1389         pict.linesize[1] = vp->bmp->pitches[2];
1390         pict.linesize[2] = vp->bmp->pitches[1];
1391
1392 #if CONFIG_AVFILTER
1393         pict_src.data[0] = src_frame->data[0];
1394         pict_src.data[1] = src_frame->data[1];
1395         pict_src.data[2] = src_frame->data[2];
1396
1397         pict_src.linesize[0] = src_frame->linesize[0];
1398         pict_src.linesize[1] = src_frame->linesize[1];
1399         pict_src.linesize[2] = src_frame->linesize[2];
1400
1401         //FIXME use direct rendering
1402         av_picture_copy(&pict, &pict_src,
1403                         vp->pix_fmt, vp->width, vp->height);
1404 #else
1405         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1406         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1407             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1408             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1409         if (is->img_convert_ctx == NULL) {
1410             fprintf(stderr, "Cannot initialize the conversion context\n");
1411             exit(1);
1412         }
1413         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1414                   0, vp->height, pict.data, pict.linesize);
1415 #endif
1416         /* update the bitmap content */
1417         SDL_UnlockYUVOverlay(vp->bmp);
1418
1419         vp->pts = pts;
1420         vp->pos = pos;
1421
1422         /* now we can update the picture count */
1423         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1424             is->pictq_windex = 0;
1425         SDL_LockMutex(is->pictq_mutex);
1426         vp->target_clock= compute_target_time(vp->pts, is);
1427
1428         is->pictq_size++;
1429         SDL_UnlockMutex(is->pictq_mutex);
1430     }
1431     return 0;
1432 }
1433
1434 /**
1435  * compute the exact PTS for the picture if it is omitted in the stream
1436  * @param pts1 the dts of the pkt / pts of the frame
1437  */
1438 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1439 {
1440     double frame_delay, pts;
1441
1442     pts = pts1;
1443
1444     if (pts != 0) {
1445         /* update video clock with pts, if present */
1446         is->video_clock = pts;
1447     } else {
1448         pts = is->video_clock;
1449     }
1450     /* update video clock for next frame */
1451     frame_delay = av_q2d(is->video_st->codec->time_base);
1452     /* for MPEG2, the frame can be repeated, so we update the
1453        clock accordingly */
1454     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1455     is->video_clock += frame_delay;
1456
1457     return queue_picture(is, src_frame, pts, pos);
1458 }
1459
1460 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1461 {
1462     int got_picture, i;
1463
1464     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1465         return -1;
1466
1467     if (pkt->data == flush_pkt.data) {
1468         avcodec_flush_buffers(is->video_st->codec);
1469
1470         SDL_LockMutex(is->pictq_mutex);
1471         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1472         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1473             is->pictq[i].target_clock= 0;
1474         }
1475         while (is->pictq_size && !is->videoq.abort_request) {
1476             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1477         }
1478         is->video_current_pos = -1;
1479         SDL_UnlockMutex(is->pictq_mutex);
1480
1481         init_pts_correction(&is->pts_ctx);
1482         is->frame_last_pts = AV_NOPTS_VALUE;
1483         is->frame_last_delay = 0;
1484         is->frame_timer = (double)av_gettime() / 1000000.0;
1485         is->skip_frames = 1;
1486         is->skip_frames_index = 0;
1487         return 0;
1488     }
1489
1490     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1491
1492     if (got_picture) {
1493         if (decoder_reorder_pts == -1) {
1494             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1495         } else if (decoder_reorder_pts) {
1496             *pts = frame->pkt_pts;
1497         } else {
1498             *pts = frame->pkt_dts;
1499         }
1500
1501         if (*pts == AV_NOPTS_VALUE) {
1502             *pts = 0;
1503         }
1504
1505         is->skip_frames_index += 1;
1506         if(is->skip_frames_index >= is->skip_frames){
1507             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1508             return 1;
1509         }
1510
1511     }
1512     return 0;
1513 }
1514
1515 #if CONFIG_AVFILTER
1516 typedef struct {
1517     VideoState *is;
1518     AVFrame *frame;
1519     int use_dr1;
1520 } FilterPriv;
1521
1522 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1523 {
1524     AVFilterContext *ctx = codec->opaque;
1525     AVFilterBufferRef  *ref;
1526     int perms = AV_PERM_WRITE;
1527     int i, w, h, stride[4];
1528     unsigned edge;
1529     int pixel_size;
1530
1531     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1532         perms |= AV_PERM_NEG_LINESIZES;
1533
1534     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1535         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1536         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1537         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1538     }
1539     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1540
1541     w = codec->width;
1542     h = codec->height;
1543     avcodec_align_dimensions2(codec, &w, &h, stride);
1544     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1545     w += edge << 1;
1546     h += edge << 1;
1547
1548     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1549         return -1;
1550
1551     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1552     ref->video->w = codec->width;
1553     ref->video->h = codec->height;
1554     for(i = 0; i < 4; i ++) {
1555         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1556         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1557
1558         if (ref->data[i]) {
1559             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1560         }
1561         pic->data[i]     = ref->data[i];
1562         pic->linesize[i] = ref->linesize[i];
1563     }
1564     pic->opaque = ref;
1565     pic->age    = INT_MAX;
1566     pic->type   = FF_BUFFER_TYPE_USER;
1567     pic->reordered_opaque = codec->reordered_opaque;
1568     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1569     else           pic->pkt_pts = AV_NOPTS_VALUE;
1570     return 0;
1571 }
1572
1573 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1574 {
1575     memset(pic->data, 0, sizeof(pic->data));
1576     avfilter_unref_buffer(pic->opaque);
1577 }
1578
1579 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1580 {
1581     AVFilterBufferRef *ref = pic->opaque;
1582
1583     if (pic->data[0] == NULL) {
1584         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1585         return codec->get_buffer(codec, pic);
1586     }
1587
1588     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1589         (codec->pix_fmt != ref->format)) {
1590         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1591         return -1;
1592     }
1593
1594     pic->reordered_opaque = codec->reordered_opaque;
1595     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1596     else           pic->pkt_pts = AV_NOPTS_VALUE;
1597     return 0;
1598 }
1599
1600 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1601 {
1602     FilterPriv *priv = ctx->priv;
1603     AVCodecContext *codec;
1604     if(!opaque) return -1;
1605
1606     priv->is = opaque;
1607     codec    = priv->is->video_st->codec;
1608     codec->opaque = ctx;
1609     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1610         priv->use_dr1 = 1;
1611         codec->get_buffer     = input_get_buffer;
1612         codec->release_buffer = input_release_buffer;
1613         codec->reget_buffer   = input_reget_buffer;
1614         codec->thread_safe_callbacks = 1;
1615     }
1616
1617     priv->frame = avcodec_alloc_frame();
1618
1619     return 0;
1620 }
1621
1622 static void input_uninit(AVFilterContext *ctx)
1623 {
1624     FilterPriv *priv = ctx->priv;
1625     av_free(priv->frame);
1626 }
1627
1628 static int input_request_frame(AVFilterLink *link)
1629 {
1630     FilterPriv *priv = link->src->priv;
1631     AVFilterBufferRef *picref;
1632     int64_t pts = 0;
1633     AVPacket pkt;
1634     int ret;
1635
1636     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1637         av_free_packet(&pkt);
1638     if (ret < 0)
1639         return -1;
1640
1641     if(priv->use_dr1) {
1642         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1643     } else {
1644         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1645         av_image_copy(picref->data, picref->linesize,
1646                       priv->frame->data, priv->frame->linesize,
1647                       picref->format, link->w, link->h);
1648     }
1649     av_free_packet(&pkt);
1650
1651     picref->pts = pts;
1652     picref->pos = pkt.pos;
1653     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1654     avfilter_start_frame(link, picref);
1655     avfilter_draw_slice(link, 0, link->h, 1);
1656     avfilter_end_frame(link);
1657
1658     return 0;
1659 }
1660
1661 static int input_query_formats(AVFilterContext *ctx)
1662 {
1663     FilterPriv *priv = ctx->priv;
1664     enum PixelFormat pix_fmts[] = {
1665         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1666     };
1667
1668     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1669     return 0;
1670 }
1671
1672 static int input_config_props(AVFilterLink *link)
1673 {
1674     FilterPriv *priv  = link->src->priv;
1675     AVCodecContext *c = priv->is->video_st->codec;
1676
1677     link->w = c->width;
1678     link->h = c->height;
1679     link->time_base = priv->is->video_st->time_base;
1680
1681     return 0;
1682 }
1683
1684 static AVFilter input_filter =
1685 {
1686     .name      = "avplay_input",
1687
1688     .priv_size = sizeof(FilterPriv),
1689
1690     .init      = input_init,
1691     .uninit    = input_uninit,
1692
1693     .query_formats = input_query_formats,
1694
1695     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1696     .outputs   = (AVFilterPad[]) {{ .name = "default",
1697                                     .type = AVMEDIA_TYPE_VIDEO,
1698                                     .request_frame = input_request_frame,
1699                                     .config_props  = input_config_props, },
1700                                   { .name = NULL }},
1701 };
1702
1703 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1704 {
1705     char sws_flags_str[128];
1706     int ret;
1707     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1708     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1709     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1710     graph->scale_sws_opts = av_strdup(sws_flags_str);
1711
1712     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1713                                             NULL, is, graph)) < 0)
1714         return ret;
1715     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1716                                             NULL, &ffsink_ctx, graph)) < 0)
1717         return ret;
1718
1719     if(vfilters) {
1720         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1721         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1722
1723         outputs->name    = av_strdup("in");
1724         outputs->filter_ctx = filt_src;
1725         outputs->pad_idx = 0;
1726         outputs->next    = NULL;
1727
1728         inputs->name    = av_strdup("out");
1729         inputs->filter_ctx = filt_out;
1730         inputs->pad_idx = 0;
1731         inputs->next    = NULL;
1732
1733         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1734             return ret;
1735         av_freep(&vfilters);
1736     } else {
1737         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1738             return ret;
1739     }
1740
1741     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1742         return ret;
1743
1744     is->out_video_filter = filt_out;
1745
1746     return ret;
1747 }
1748
1749 #endif  /* CONFIG_AVFILTER */
1750
1751 static int video_thread(void *arg)
1752 {
1753     VideoState *is = arg;
1754     AVFrame *frame= avcodec_alloc_frame();
1755     int64_t pts_int;
1756     double pts;
1757     int ret;
1758
1759 #if CONFIG_AVFILTER
1760     AVFilterGraph *graph = avfilter_graph_alloc();
1761     AVFilterContext *filt_out = NULL;
1762     int64_t pos;
1763     int last_w = is->video_st->codec->width;
1764     int last_h = is->video_st->codec->height;
1765
1766     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1767         goto the_end;
1768     filt_out = is->out_video_filter;
1769 #endif
1770
1771     for(;;) {
1772 #if !CONFIG_AVFILTER
1773         AVPacket pkt;
1774 #else
1775         AVFilterBufferRef *picref;
1776         AVRational tb;
1777 #endif
1778         while (is->paused && !is->videoq.abort_request)
1779             SDL_Delay(10);
1780 #if CONFIG_AVFILTER
1781         if (   last_w != is->video_st->codec->width
1782             || last_h != is->video_st->codec->height) {
1783             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1784                     is->video_st->codec->width, is->video_st->codec->height);
1785             avfilter_graph_free(&graph);
1786             graph = avfilter_graph_alloc();
1787             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1788                 goto the_end;
1789             filt_out = is->out_video_filter;
1790             last_w = is->video_st->codec->width;
1791             last_h = is->video_st->codec->height;
1792         }
1793         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1794         if (picref) {
1795             pts_int = picref->pts;
1796             pos     = picref->pos;
1797             frame->opaque = picref;
1798         }
1799
1800         if (av_cmp_q(tb, is->video_st->time_base)) {
1801             av_unused int64_t pts1 = pts_int;
1802             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1803             av_dlog(NULL, "video_thread(): "
1804                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1805                     tb.num, tb.den, pts1,
1806                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1807         }
1808 #else
1809         ret = get_video_frame(is, frame, &pts_int, &pkt);
1810 #endif
1811
1812         if (ret < 0) goto the_end;
1813
1814         if (!ret)
1815             continue;
1816
1817         pts = pts_int*av_q2d(is->video_st->time_base);
1818
1819 #if CONFIG_AVFILTER
1820         ret = output_picture2(is, frame, pts, pos);
1821 #else
1822         ret = output_picture2(is, frame, pts,  pkt.pos);
1823         av_free_packet(&pkt);
1824 #endif
1825         if (ret < 0)
1826             goto the_end;
1827
1828         if (step)
1829             if (cur_stream)
1830                 stream_pause(cur_stream);
1831     }
1832  the_end:
1833 #if CONFIG_AVFILTER
1834     avfilter_graph_free(&graph);
1835 #endif
1836     av_free(frame);
1837     return 0;
1838 }
1839
1840 static int subtitle_thread(void *arg)
1841 {
1842     VideoState *is = arg;
1843     SubPicture *sp;
1844     AVPacket pkt1, *pkt = &pkt1;
1845     int got_subtitle;
1846     double pts;
1847     int i, j;
1848     int r, g, b, y, u, v, a;
1849
1850     for(;;) {
1851         while (is->paused && !is->subtitleq.abort_request) {
1852             SDL_Delay(10);
1853         }
1854         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1855             break;
1856
1857         if(pkt->data == flush_pkt.data){
1858             avcodec_flush_buffers(is->subtitle_st->codec);
1859             continue;
1860         }
1861         SDL_LockMutex(is->subpq_mutex);
1862         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1863                !is->subtitleq.abort_request) {
1864             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1865         }
1866         SDL_UnlockMutex(is->subpq_mutex);
1867
1868         if (is->subtitleq.abort_request)
1869             return 0;
1870
1871         sp = &is->subpq[is->subpq_windex];
1872
1873        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1874            this packet, if any */
1875         pts = 0;
1876         if (pkt->pts != AV_NOPTS_VALUE)
1877             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1878
1879         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1880                                  &got_subtitle, pkt);
1881
1882         if (got_subtitle && sp->sub.format == 0) {
1883             sp->pts = pts;
1884
1885             for (i = 0; i < sp->sub.num_rects; i++)
1886             {
1887                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1888                 {
1889                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1890                     y = RGB_TO_Y_CCIR(r, g, b);
1891                     u = RGB_TO_U_CCIR(r, g, b, 0);
1892                     v = RGB_TO_V_CCIR(r, g, b, 0);
1893                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1894                 }
1895             }
1896
1897             /* now we can update the picture count */
1898             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1899                 is->subpq_windex = 0;
1900             SDL_LockMutex(is->subpq_mutex);
1901             is->subpq_size++;
1902             SDL_UnlockMutex(is->subpq_mutex);
1903         }
1904         av_free_packet(pkt);
1905     }
1906     return 0;
1907 }
1908
1909 /* copy samples for viewing in editor window */
1910 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1911 {
1912     int size, len;
1913
1914     size = samples_size / sizeof(short);
1915     while (size > 0) {
1916         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1917         if (len > size)
1918             len = size;
1919         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1920         samples += len;
1921         is->sample_array_index += len;
1922         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1923             is->sample_array_index = 0;
1924         size -= len;
1925     }
1926 }
1927
1928 /* return the new audio buffer size (samples can be added or deleted
1929    to get better sync if video or external master clock) */
1930 static int synchronize_audio(VideoState *is, short *samples,
1931                              int samples_size1, double pts)
1932 {
1933     int n, samples_size;
1934     double ref_clock;
1935
1936     n = 2 * is->audio_st->codec->channels;
1937     samples_size = samples_size1;
1938
1939     /* if not master, then we try to remove or add samples to correct the clock */
1940     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1941          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1942         double diff, avg_diff;
1943         int wanted_size, min_size, max_size, nb_samples;
1944
1945         ref_clock = get_master_clock(is);
1946         diff = get_audio_clock(is) - ref_clock;
1947
1948         if (diff < AV_NOSYNC_THRESHOLD) {
1949             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1950             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1951                 /* not enough measures to have a correct estimate */
1952                 is->audio_diff_avg_count++;
1953             } else {
1954                 /* estimate the A-V difference */
1955                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1956
1957                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1958                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1959                     nb_samples = samples_size / n;
1960
1961                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1962                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1963                     if (wanted_size < min_size)
1964                         wanted_size = min_size;
1965                     else if (wanted_size > max_size)
1966                         wanted_size = max_size;
1967
1968                     /* add or remove samples to correction the synchro */
1969                     if (wanted_size < samples_size) {
1970                         /* remove samples */
1971                         samples_size = wanted_size;
1972                     } else if (wanted_size > samples_size) {
1973                         uint8_t *samples_end, *q;
1974                         int nb;
1975
1976                         /* add samples */
1977                         nb = (samples_size - wanted_size);
1978                         samples_end = (uint8_t *)samples + samples_size - n;
1979                         q = samples_end + n;
1980                         while (nb > 0) {
1981                             memcpy(q, samples_end, n);
1982                             q += n;
1983                             nb -= n;
1984                         }
1985                         samples_size = wanted_size;
1986                     }
1987                 }
1988                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1989                         diff, avg_diff, samples_size - samples_size1,
1990                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1991             }
1992         } else {
1993             /* too big difference : may be initial PTS errors, so
1994                reset A-V filter */
1995             is->audio_diff_avg_count = 0;
1996             is->audio_diff_cum = 0;
1997         }
1998     }
1999
2000     return samples_size;
2001 }
2002
2003 /* decode one audio frame and returns its uncompressed size */
2004 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2005 {
2006     AVPacket *pkt_temp = &is->audio_pkt_temp;
2007     AVPacket *pkt = &is->audio_pkt;
2008     AVCodecContext *dec= is->audio_st->codec;
2009     int n, len1, data_size;
2010     double pts;
2011     int new_packet = 0;
2012     int flush_complete = 0;
2013
2014     for(;;) {
2015         /* NOTE: the audio packet can contain several frames */
2016         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2017             if (flush_complete)
2018                 break;
2019             new_packet = 0;
2020             data_size = sizeof(is->audio_buf1);
2021             len1 = avcodec_decode_audio3(dec,
2022                                         (int16_t *)is->audio_buf1, &data_size,
2023                                         pkt_temp);
2024             if (len1 < 0) {
2025                 /* if error, we skip the frame */
2026                 pkt_temp->size = 0;
2027                 break;
2028             }
2029
2030             pkt_temp->data += len1;
2031             pkt_temp->size -= len1;
2032
2033             if (data_size <= 0) {
2034                 /* stop sending empty packets if the decoder is finished */
2035                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2036                     flush_complete = 1;
2037                 continue;
2038             }
2039
2040             if (dec->sample_fmt != is->audio_src_fmt) {
2041                 if (is->reformat_ctx)
2042                     av_audio_convert_free(is->reformat_ctx);
2043                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2044                                                          dec->sample_fmt, 1, NULL, 0);
2045                 if (!is->reformat_ctx) {
2046                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2047                         av_get_sample_fmt_name(dec->sample_fmt),
2048                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2049                         break;
2050                 }
2051                 is->audio_src_fmt= dec->sample_fmt;
2052             }
2053
2054             if (is->reformat_ctx) {
2055                 const void *ibuf[6]= {is->audio_buf1};
2056                 void *obuf[6]= {is->audio_buf2};
2057                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2058                 int ostride[6]= {2};
2059                 int len= data_size/istride[0];
2060                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2061                     printf("av_audio_convert() failed\n");
2062                     break;
2063                 }
2064                 is->audio_buf= is->audio_buf2;
2065                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2066                           remove this legacy cruft */
2067                 data_size= len*2;
2068             }else{
2069                 is->audio_buf= is->audio_buf1;
2070             }
2071
2072             /* if no pts, then compute it */
2073             pts = is->audio_clock;
2074             *pts_ptr = pts;
2075             n = 2 * dec->channels;
2076             is->audio_clock += (double)data_size /
2077                 (double)(n * dec->sample_rate);
2078 #ifdef DEBUG
2079             {
2080                 static double last_clock;
2081                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2082                        is->audio_clock - last_clock,
2083                        is->audio_clock, pts);
2084                 last_clock = is->audio_clock;
2085             }
2086 #endif
2087             return data_size;
2088         }
2089
2090         /* free the current packet */
2091         if (pkt->data)
2092             av_free_packet(pkt);
2093
2094         if (is->paused || is->audioq.abort_request) {
2095             return -1;
2096         }
2097
2098         /* read next packet */
2099         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2100             return -1;
2101
2102         if (pkt->data == flush_pkt.data)
2103             avcodec_flush_buffers(dec);
2104
2105         pkt_temp->data = pkt->data;
2106         pkt_temp->size = pkt->size;
2107
2108         /* if update the audio clock with the pts */
2109         if (pkt->pts != AV_NOPTS_VALUE) {
2110             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2111         }
2112     }
2113 }
2114
2115 /* prepare a new audio buffer */
2116 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2117 {
2118     VideoState *is = opaque;
2119     int audio_size, len1;
2120     double pts;
2121
2122     audio_callback_time = av_gettime();
2123
2124     while (len > 0) {
2125         if (is->audio_buf_index >= is->audio_buf_size) {
2126            audio_size = audio_decode_frame(is, &pts);
2127            if (audio_size < 0) {
2128                 /* if error, just output silence */
2129                is->audio_buf = is->audio_buf1;
2130                is->audio_buf_size = 1024;
2131                memset(is->audio_buf, 0, is->audio_buf_size);
2132            } else {
2133                if (is->show_audio)
2134                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2135                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2136                                               pts);
2137                is->audio_buf_size = audio_size;
2138            }
2139            is->audio_buf_index = 0;
2140         }
2141         len1 = is->audio_buf_size - is->audio_buf_index;
2142         if (len1 > len)
2143             len1 = len;
2144         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2145         len -= len1;
2146         stream += len1;
2147         is->audio_buf_index += len1;
2148     }
2149 }
2150
2151 /* open a given stream. Return 0 if OK */
2152 static int stream_component_open(VideoState *is, int stream_index)
2153 {
2154     AVFormatContext *ic = is->ic;
2155     AVCodecContext *avctx;
2156     AVCodec *codec;
2157     SDL_AudioSpec wanted_spec, spec;
2158     AVDictionary *opts;
2159     AVDictionaryEntry *t = NULL;
2160
2161     if (stream_index < 0 || stream_index >= ic->nb_streams)
2162         return -1;
2163     avctx = ic->streams[stream_index]->codec;
2164
2165     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2166
2167     /* prepare audio output */
2168     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2169         if (avctx->channels > 0) {
2170             avctx->request_channels = FFMIN(2, avctx->channels);
2171         } else {
2172             avctx->request_channels = 2;
2173         }
2174     }
2175
2176     codec = avcodec_find_decoder(avctx->codec_id);
2177     avctx->debug_mv = debug_mv;
2178     avctx->debug = debug;
2179     avctx->workaround_bugs = workaround_bugs;
2180     avctx->lowres = lowres;
2181     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2182     avctx->idct_algo= idct;
2183     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2184     avctx->skip_frame= skip_frame;
2185     avctx->skip_idct= skip_idct;
2186     avctx->skip_loop_filter= skip_loop_filter;
2187     avctx->error_recognition= error_recognition;
2188     avctx->error_concealment= error_concealment;
2189     avctx->thread_count= thread_count;
2190
2191     if (!codec ||
2192         avcodec_open2(avctx, codec, &opts) < 0)
2193         return -1;
2194     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2195         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2196         return AVERROR_OPTION_NOT_FOUND;
2197     }
2198
2199     /* prepare audio output */
2200     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2201         wanted_spec.freq = avctx->sample_rate;
2202         wanted_spec.format = AUDIO_S16SYS;
2203         wanted_spec.channels = avctx->channels;
2204         wanted_spec.silence = 0;
2205         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2206         wanted_spec.callback = sdl_audio_callback;
2207         wanted_spec.userdata = is;
2208         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2209             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2210             return -1;
2211         }
2212         is->audio_hw_buf_size = spec.size;
2213         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2214     }
2215
2216     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2217     switch(avctx->codec_type) {
2218     case AVMEDIA_TYPE_AUDIO:
2219         is->audio_stream = stream_index;
2220         is->audio_st = ic->streams[stream_index];
2221         is->audio_buf_size = 0;
2222         is->audio_buf_index = 0;
2223
2224         /* init averaging filter */
2225         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2226         is->audio_diff_avg_count = 0;
2227         /* since we do not have a precise anough audio fifo fullness,
2228            we correct audio sync only if larger than this threshold */
2229         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2230
2231         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2232         packet_queue_init(&is->audioq);
2233         SDL_PauseAudio(0);
2234         break;
2235     case AVMEDIA_TYPE_VIDEO:
2236         is->video_stream = stream_index;
2237         is->video_st = ic->streams[stream_index];
2238
2239         packet_queue_init(&is->videoq);
2240         is->video_tid = SDL_CreateThread(video_thread, is);
2241         break;
2242     case AVMEDIA_TYPE_SUBTITLE:
2243         is->subtitle_stream = stream_index;
2244         is->subtitle_st = ic->streams[stream_index];
2245         packet_queue_init(&is->subtitleq);
2246
2247         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2248         break;
2249     default:
2250         break;
2251     }
2252     return 0;
2253 }
2254
2255 static void stream_component_close(VideoState *is, int stream_index)
2256 {
2257     AVFormatContext *ic = is->ic;
2258     AVCodecContext *avctx;
2259
2260     if (stream_index < 0 || stream_index >= ic->nb_streams)
2261         return;
2262     avctx = ic->streams[stream_index]->codec;
2263
2264     switch(avctx->codec_type) {
2265     case AVMEDIA_TYPE_AUDIO:
2266         packet_queue_abort(&is->audioq);
2267
2268         SDL_CloseAudio();
2269
2270         packet_queue_end(&is->audioq);
2271         av_free_packet(&is->audio_pkt);
2272         if (is->reformat_ctx)
2273             av_audio_convert_free(is->reformat_ctx);
2274         is->reformat_ctx = NULL;
2275
2276         if (is->rdft) {
2277             av_rdft_end(is->rdft);
2278             av_freep(&is->rdft_data);
2279         }
2280         break;
2281     case AVMEDIA_TYPE_VIDEO:
2282         packet_queue_abort(&is->videoq);
2283
2284         /* note: we also signal this mutex to make sure we deblock the
2285            video thread in all cases */
2286         SDL_LockMutex(is->pictq_mutex);
2287         SDL_CondSignal(is->pictq_cond);
2288         SDL_UnlockMutex(is->pictq_mutex);
2289
2290         SDL_WaitThread(is->video_tid, NULL);
2291
2292         packet_queue_end(&is->videoq);
2293         break;
2294     case AVMEDIA_TYPE_SUBTITLE:
2295         packet_queue_abort(&is->subtitleq);
2296
2297         /* note: we also signal this mutex to make sure we deblock the
2298            video thread in all cases */
2299         SDL_LockMutex(is->subpq_mutex);
2300         is->subtitle_stream_changed = 1;
2301
2302         SDL_CondSignal(is->subpq_cond);
2303         SDL_UnlockMutex(is->subpq_mutex);
2304
2305         SDL_WaitThread(is->subtitle_tid, NULL);
2306
2307         packet_queue_end(&is->subtitleq);
2308         break;
2309     default:
2310         break;
2311     }
2312
2313     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2314     avcodec_close(avctx);
2315     switch(avctx->codec_type) {
2316     case AVMEDIA_TYPE_AUDIO:
2317         is->audio_st = NULL;
2318         is->audio_stream = -1;
2319         break;
2320     case AVMEDIA_TYPE_VIDEO:
2321         is->video_st = NULL;
2322         is->video_stream = -1;
2323         break;
2324     case AVMEDIA_TYPE_SUBTITLE:
2325         is->subtitle_st = NULL;
2326         is->subtitle_stream = -1;
2327         break;
2328     default:
2329         break;
2330     }
2331 }
2332
2333 /* since we have only one decoding thread, we can use a global
2334    variable instead of a thread local variable */
2335 static VideoState *global_video_state;
2336
2337 static int decode_interrupt_cb(void)
2338 {
2339     return (global_video_state && global_video_state->abort_request);
2340 }
2341
2342 /* this thread gets the stream from the disk or the network */
2343 static int decode_thread(void *arg)
2344 {
2345     VideoState *is = arg;
2346     AVFormatContext *ic = NULL;
2347     int err, i, ret;
2348     int st_index[AVMEDIA_TYPE_NB];
2349     AVPacket pkt1, *pkt = &pkt1;
2350     int eof=0;
2351     int pkt_in_play_range = 0;
2352     AVDictionaryEntry *t;
2353     AVDictionary **opts;
2354     int orig_nb_streams;
2355
2356     memset(st_index, -1, sizeof(st_index));
2357     is->video_stream = -1;
2358     is->audio_stream = -1;
2359     is->subtitle_stream = -1;
2360
2361     global_video_state = is;
2362     avio_set_interrupt_cb(decode_interrupt_cb);
2363
2364     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2365     if (err < 0) {
2366         print_error(is->filename, err);
2367         ret = -1;
2368         goto fail;
2369     }
2370     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2371         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2372         ret = AVERROR_OPTION_NOT_FOUND;
2373         goto fail;
2374     }
2375     is->ic = ic;
2376
2377     if(genpts)
2378         ic->flags |= AVFMT_FLAG_GENPTS;
2379
2380     opts = setup_find_stream_info_opts(ic, codec_opts);
2381     orig_nb_streams = ic->nb_streams;
2382
2383     err = avformat_find_stream_info(ic, opts);
2384     if (err < 0) {
2385         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2386         ret = -1;
2387         goto fail;
2388     }
2389     for (i = 0; i < orig_nb_streams; i++)
2390         av_dict_free(&opts[i]);
2391     av_freep(&opts);
2392
2393     if(ic->pb)
2394         ic->pb->eof_reached= 0; //FIXME hack, avplay maybe should not use url_feof() to test for the end
2395
2396     if(seek_by_bytes<0)
2397         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2398
2399     /* if seeking requested, we execute it */
2400     if (start_time != AV_NOPTS_VALUE) {
2401         int64_t timestamp;
2402
2403         timestamp = start_time;
2404         /* add the stream start time */
2405         if (ic->start_time != AV_NOPTS_VALUE)
2406             timestamp += ic->start_time;
2407         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2408         if (ret < 0) {
2409             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2410                     is->filename, (double)timestamp / AV_TIME_BASE);
2411         }
2412     }
2413
2414     for (i = 0; i < ic->nb_streams; i++)
2415         ic->streams[i]->discard = AVDISCARD_ALL;
2416     if (!video_disable)
2417         st_index[AVMEDIA_TYPE_VIDEO] =
2418             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2419                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2420     if (!audio_disable)
2421         st_index[AVMEDIA_TYPE_AUDIO] =
2422             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2423                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2424                                 st_index[AVMEDIA_TYPE_VIDEO],
2425                                 NULL, 0);
2426     if (!video_disable)
2427         st_index[AVMEDIA_TYPE_SUBTITLE] =
2428             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2429                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2430                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2431                                  st_index[AVMEDIA_TYPE_AUDIO] :
2432                                  st_index[AVMEDIA_TYPE_VIDEO]),
2433                                 NULL, 0);
2434     if (show_status) {
2435         av_dump_format(ic, 0, is->filename, 0);
2436     }
2437
2438     /* open the streams */
2439     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2440         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2441     }
2442
2443     ret=-1;
2444     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2445         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2446     }
2447     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2448     if(ret<0) {
2449         if (!display_disable)
2450             is->show_audio = 2;
2451     }
2452
2453     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2454         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2455     }
2456
2457     if (is->video_stream < 0 && is->audio_stream < 0) {
2458         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2459         ret = -1;
2460         goto fail;
2461     }
2462
2463     for(;;) {
2464         if (is->abort_request)
2465             break;
2466         if (is->paused != is->last_paused) {
2467             is->last_paused = is->paused;
2468             if (is->paused)
2469                 is->read_pause_return= av_read_pause(ic);
2470             else
2471                 av_read_play(ic);
2472         }
2473 #if CONFIG_RTSP_DEMUXER
2474         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2475             /* wait 10 ms to avoid trying to get another packet */
2476             /* XXX: horrible */
2477             SDL_Delay(10);
2478             continue;
2479         }
2480 #endif
2481         if (is->seek_req) {
2482             int64_t seek_target= is->seek_pos;
2483             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2484             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2485 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2486 //      of the seek_pos/seek_rel variables
2487
2488             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2489             if (ret < 0) {
2490                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2491             }else{
2492                 if (is->audio_stream >= 0) {
2493                     packet_queue_flush(&is->audioq);
2494                     packet_queue_put(&is->audioq, &flush_pkt);
2495                 }
2496                 if (is->subtitle_stream >= 0) {
2497                     packet_queue_flush(&is->subtitleq);
2498                     packet_queue_put(&is->subtitleq, &flush_pkt);
2499                 }
2500                 if (is->video_stream >= 0) {
2501                     packet_queue_flush(&is->videoq);
2502                     packet_queue_put(&is->videoq, &flush_pkt);
2503                 }
2504             }
2505             is->seek_req = 0;
2506             eof= 0;
2507         }
2508
2509         /* if the queue are full, no need to read more */
2510         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2511             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2512                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2513                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2514             /* wait 10 ms */
2515             SDL_Delay(10);
2516             continue;
2517         }
2518         if(eof) {
2519             if(is->video_stream >= 0){
2520                 av_init_packet(pkt);
2521                 pkt->data=NULL;
2522                 pkt->size=0;
2523                 pkt->stream_index= is->video_stream;
2524                 packet_queue_put(&is->videoq, pkt);
2525             }
2526             if (is->audio_stream >= 0 &&
2527                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2528                 av_init_packet(pkt);
2529                 pkt->data = NULL;
2530                 pkt->size = 0;
2531                 pkt->stream_index = is->audio_stream;
2532                 packet_queue_put(&is->audioq, pkt);
2533             }
2534             SDL_Delay(10);
2535             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2536                 if(loop!=1 && (!loop || --loop)){
2537                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2538                 }else if(autoexit){
2539                     ret=AVERROR_EOF;
2540                     goto fail;
2541                 }
2542             }
2543             continue;
2544         }
2545         ret = av_read_frame(ic, pkt);
2546         if (ret < 0) {
2547             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2548                 eof=1;
2549             if (ic->pb && ic->pb->error)
2550                 break;
2551             SDL_Delay(100); /* wait for user event */
2552             continue;
2553         }
2554         /* check if packet is in play range specified by user, then queue, otherwise discard */
2555         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2556                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2557                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2558                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2559                 <= ((double)duration/1000000);
2560         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2561             packet_queue_put(&is->audioq, pkt);
2562         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2563             packet_queue_put(&is->videoq, pkt);
2564         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2565             packet_queue_put(&is->subtitleq, pkt);
2566         } else {
2567             av_free_packet(pkt);
2568         }
2569     }
2570     /* wait until the end */
2571     while (!is->abort_request) {
2572         SDL_Delay(100);
2573     }
2574
2575     ret = 0;
2576  fail:
2577     /* disable interrupting */
2578     global_video_state = NULL;
2579
2580     /* close each stream */
2581     if (is->audio_stream >= 0)
2582         stream_component_close(is, is->audio_stream);
2583     if (is->video_stream >= 0)
2584         stream_component_close(is, is->video_stream);
2585     if (is->subtitle_stream >= 0)
2586         stream_component_close(is, is->subtitle_stream);
2587     if (is->ic) {
2588         av_close_input_file(is->ic);
2589         is->ic = NULL; /* safety */
2590     }
2591     avio_set_interrupt_cb(NULL);
2592
2593     if (ret != 0) {
2594         SDL_Event event;
2595
2596         event.type = FF_QUIT_EVENT;
2597         event.user.data1 = is;
2598         SDL_PushEvent(&event);
2599     }
2600     return 0;
2601 }
2602
2603 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2604 {
2605     VideoState *is;
2606
2607     is = av_mallocz(sizeof(VideoState));
2608     if (!is)
2609         return NULL;
2610     av_strlcpy(is->filename, filename, sizeof(is->filename));
2611     is->iformat = iformat;
2612     is->ytop = 0;
2613     is->xleft = 0;
2614
2615     /* start video display */
2616     is->pictq_mutex = SDL_CreateMutex();
2617     is->pictq_cond = SDL_CreateCond();
2618
2619     is->subpq_mutex = SDL_CreateMutex();
2620     is->subpq_cond = SDL_CreateCond();
2621
2622     is->av_sync_type = av_sync_type;
2623     is->parse_tid = SDL_CreateThread(decode_thread, is);
2624     if (!is->parse_tid) {
2625         av_free(is);
2626         return NULL;
2627     }
2628     return is;
2629 }
2630
2631 static void stream_cycle_channel(VideoState *is, int codec_type)
2632 {
2633     AVFormatContext *ic = is->ic;
2634     int start_index, stream_index;
2635     AVStream *st;
2636
2637     if (codec_type == AVMEDIA_TYPE_VIDEO)
2638         start_index = is->video_stream;
2639     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2640         start_index = is->audio_stream;
2641     else
2642         start_index = is->subtitle_stream;
2643     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2644         return;
2645     stream_index = start_index;
2646     for(;;) {
2647         if (++stream_index >= is->ic->nb_streams)
2648         {
2649             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2650             {
2651                 stream_index = -1;
2652                 goto the_end;
2653             } else
2654                 stream_index = 0;
2655         }
2656         if (stream_index == start_index)
2657             return;
2658         st = ic->streams[stream_index];
2659         if (st->codec->codec_type == codec_type) {
2660             /* check that parameters are OK */
2661             switch(codec_type) {
2662             case AVMEDIA_TYPE_AUDIO:
2663                 if (st->codec->sample_rate != 0 &&
2664                     st->codec->channels != 0)
2665                     goto the_end;
2666                 break;
2667             case AVMEDIA_TYPE_VIDEO:
2668             case AVMEDIA_TYPE_SUBTITLE:
2669                 goto the_end;
2670             default:
2671                 break;
2672             }
2673         }
2674     }
2675  the_end:
2676     stream_component_close(is, start_index);
2677     stream_component_open(is, stream_index);
2678 }
2679
2680
2681 static void toggle_full_screen(void)
2682 {
2683     is_full_screen = !is_full_screen;
2684     video_open(cur_stream);
2685 }
2686
2687 static void toggle_pause(void)
2688 {
2689     if (cur_stream)
2690         stream_pause(cur_stream);
2691     step = 0;
2692 }
2693
2694 static void step_to_next_frame(void)
2695 {
2696     if (cur_stream) {
2697         /* if the stream is paused unpause it, then step */
2698         if (cur_stream->paused)
2699             stream_pause(cur_stream);
2700     }
2701     step = 1;
2702 }
2703
2704 static void toggle_audio_display(void)
2705 {
2706     if (cur_stream) {
2707         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2708         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2709         fill_rectangle(screen,
2710                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2711                     bgcolor);
2712         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2713     }
2714 }
2715
2716 /* handle an event sent by the GUI */
2717 static void event_loop(void)
2718 {
2719     SDL_Event event;
2720     double incr, pos, frac;
2721
2722     for(;;) {
2723         double x;
2724         SDL_WaitEvent(&event);
2725         switch(event.type) {
2726         case SDL_KEYDOWN:
2727             if (exit_on_keydown) {
2728                 do_exit();
2729                 break;
2730             }
2731             switch(event.key.keysym.sym) {
2732             case SDLK_ESCAPE:
2733             case SDLK_q:
2734                 do_exit();
2735                 break;
2736             case SDLK_f:
2737                 toggle_full_screen();
2738                 break;
2739             case SDLK_p:
2740             case SDLK_SPACE:
2741                 toggle_pause();
2742                 break;
2743             case SDLK_s: //S: Step to next frame
2744                 step_to_next_frame();
2745                 break;
2746             case SDLK_a:
2747                 if (cur_stream)
2748                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2749                 break;
2750             case SDLK_v:
2751                 if (cur_stream)
2752                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2753                 break;
2754             case SDLK_t:
2755                 if (cur_stream)
2756                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2757                 break;
2758             case SDLK_w:
2759                 toggle_audio_display();
2760                 break;
2761             case SDLK_LEFT:
2762                 incr = -10.0;
2763                 goto do_seek;
2764             case SDLK_RIGHT:
2765                 incr = 10.0;
2766                 goto do_seek;
2767             case SDLK_UP:
2768                 incr = 60.0;
2769                 goto do_seek;
2770             case SDLK_DOWN:
2771                 incr = -60.0;
2772             do_seek:
2773                 if (cur_stream) {
2774                     if (seek_by_bytes) {
2775                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2776                             pos= cur_stream->video_current_pos;
2777                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2778                             pos= cur_stream->audio_pkt.pos;
2779                         }else
2780                             pos = avio_tell(cur_stream->ic->pb);
2781                         if (cur_stream->ic->bit_rate)
2782                             incr *= cur_stream->ic->bit_rate / 8.0;
2783                         else
2784                             incr *= 180000.0;
2785                         pos += incr;
2786                         stream_seek(cur_stream, pos, incr, 1);
2787                     } else {
2788                         pos = get_master_clock(cur_stream);
2789                         pos += incr;
2790                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2791                     }
2792                 }
2793                 break;
2794             default:
2795                 break;
2796             }
2797             break;
2798         case SDL_MOUSEBUTTONDOWN:
2799             if (exit_on_mousedown) {
2800                 do_exit();
2801                 break;
2802             }
2803         case SDL_MOUSEMOTION:
2804             if(event.type ==SDL_MOUSEBUTTONDOWN){
2805                 x= event.button.x;
2806             }else{
2807                 if(event.motion.state != SDL_PRESSED)
2808                     break;
2809                 x= event.motion.x;
2810             }
2811             if (cur_stream) {
2812                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2813                     uint64_t size=  avio_size(cur_stream->ic->pb);
2814                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2815                 }else{
2816                     int64_t ts;
2817                     int ns, hh, mm, ss;
2818                     int tns, thh, tmm, tss;
2819                     tns = cur_stream->ic->duration/1000000LL;
2820                     thh = tns/3600;
2821                     tmm = (tns%3600)/60;
2822                     tss = (tns%60);
2823                     frac = x/cur_stream->width;
2824                     ns = frac*tns;
2825                     hh = ns/3600;
2826                     mm = (ns%3600)/60;
2827                     ss = (ns%60);
2828                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2829                             hh, mm, ss, thh, tmm, tss);
2830                     ts = frac*cur_stream->ic->duration;
2831                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2832                         ts += cur_stream->ic->start_time;
2833                     stream_seek(cur_stream, ts, 0, 0);
2834                 }
2835             }
2836             break;
2837         case SDL_VIDEORESIZE:
2838             if (cur_stream) {
2839                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2840                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2841                 screen_width = cur_stream->width = event.resize.w;
2842                 screen_height= cur_stream->height= event.resize.h;
2843             }
2844             break;
2845         case SDL_QUIT:
2846         case FF_QUIT_EVENT:
2847             do_exit();
2848             break;
2849         case FF_ALLOC_EVENT:
2850             video_open(event.user.data1);
2851             alloc_picture(event.user.data1);
2852             break;
2853         case FF_REFRESH_EVENT:
2854             video_refresh_timer(event.user.data1);
2855             cur_stream->refresh=0;
2856             break;
2857         default:
2858             break;
2859         }
2860     }
2861 }
2862
2863 static int opt_frame_size(const char *opt, const char *arg)
2864 {
2865     av_log(NULL, AV_LOG_ERROR,
2866            "Option '%s' has been removed, use private format options instead\n", opt);
2867     return AVERROR(EINVAL);
2868 }
2869
2870 static int opt_width(const char *opt, const char *arg)
2871 {
2872     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2873     return 0;
2874 }
2875
2876 static int opt_height(const char *opt, const char *arg)
2877 {
2878     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2879     return 0;
2880 }
2881
2882 static int opt_format(const char *opt, const char *arg)
2883 {
2884     file_iformat = av_find_input_format(arg);
2885     if (!file_iformat) {
2886         fprintf(stderr, "Unknown input format: %s\n", arg);
2887         return AVERROR(EINVAL);
2888     }
2889     return 0;
2890 }
2891
2892 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2893 {
2894     av_log(NULL, AV_LOG_ERROR,
2895            "Option '%s' has been removed, use private format options instead\n", opt);
2896     return AVERROR(EINVAL);
2897 }
2898
2899 static int opt_sync(const char *opt, const char *arg)
2900 {
2901     if (!strcmp(arg, "audio"))
2902         av_sync_type = AV_SYNC_AUDIO_MASTER;
2903     else if (!strcmp(arg, "video"))
2904         av_sync_type = AV_SYNC_VIDEO_MASTER;
2905     else if (!strcmp(arg, "ext"))
2906         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2907     else {
2908         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2909         exit(1);
2910     }
2911     return 0;
2912 }
2913
2914 static int opt_seek(const char *opt, const char *arg)
2915 {
2916     start_time = parse_time_or_die(opt, arg, 1);
2917     return 0;
2918 }
2919
2920 static int opt_duration(const char *opt, const char *arg)
2921 {
2922     duration = parse_time_or_die(opt, arg, 1);
2923     return 0;
2924 }
2925
2926 static int opt_debug(const char *opt, const char *arg)
2927 {
2928     av_log_set_level(99);
2929     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2930     return 0;
2931 }
2932
2933 static int opt_vismv(const char *opt, const char *arg)
2934 {
2935     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2936     return 0;
2937 }
2938
2939 static int opt_thread_count(const char *opt, const char *arg)
2940 {
2941     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2942 #if !HAVE_THREADS
2943     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2944 #endif
2945     return 0;
2946 }
2947
2948 static const OptionDef options[] = {
2949 #include "cmdutils_common_opts.h"
2950     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2951     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2952     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2953     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2954     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2955     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2956     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2957     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2958     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2959     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2960     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2961     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2962     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2963     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2964     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2965     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2966     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2967     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2968     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2969     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2970     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2971     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2972     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2973     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2974     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2975     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2976     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2977     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2978     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2979     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2980     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2981     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2982     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2983     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2984     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2985     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2986     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2987 #if CONFIG_AVFILTER
2988     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2989 #endif
2990     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2991     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2992     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2993     { NULL, },
2994 };
2995
2996 static void show_usage(void)
2997 {
2998     printf("Simple media player\n");
2999     printf("usage: %s [options] input_file\n", program_name);
3000     printf("\n");
3001 }
3002
3003 static void show_help(void)
3004 {
3005     av_log_set_callback(log_callback_help);
3006     show_usage();
3007     show_help_options(options, "Main options:\n",
3008                       OPT_EXPERT, 0);
3009     show_help_options(options, "\nAdvanced options:\n",
3010                       OPT_EXPERT, OPT_EXPERT);
3011     printf("\n");
3012     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3013     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3014 #if !CONFIG_AVFILTER
3015     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3016 #endif
3017     printf("\nWhile playing:\n"
3018            "q, ESC              quit\n"
3019            "f                   toggle full screen\n"
3020            "p, SPC              pause\n"
3021            "a                   cycle audio channel\n"
3022            "v                   cycle video channel\n"
3023            "t                   cycle subtitle channel\n"
3024            "w                   show audio waves\n"
3025            "s                   activate frame-step mode\n"
3026            "left/right          seek backward/forward 10 seconds\n"
3027            "down/up             seek backward/forward 1 minute\n"
3028            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3029            );
3030 }
3031
3032 static void opt_input_file(void *optctx, const char *filename)
3033 {
3034     if (input_filename) {
3035         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3036                 filename, input_filename);
3037         exit(1);
3038     }
3039     if (!strcmp(filename, "-"))
3040         filename = "pipe:";
3041     input_filename = filename;
3042 }
3043
3044 /* Called from the main */
3045 int main(int argc, char **argv)
3046 {
3047     int flags;
3048
3049     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3050     parse_loglevel(argc, argv, options);
3051
3052     /* register all codecs, demux and protocols */
3053     avcodec_register_all();
3054 #if CONFIG_AVDEVICE
3055     avdevice_register_all();
3056 #endif
3057 #if CONFIG_AVFILTER
3058     avfilter_register_all();
3059 #endif
3060     av_register_all();
3061
3062     init_opts();
3063
3064     show_banner();
3065
3066     parse_options(NULL, argc, argv, options, opt_input_file);
3067
3068     if (!input_filename) {
3069         show_usage();
3070         fprintf(stderr, "An input file must be specified\n");
3071         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3072         exit(1);
3073     }
3074
3075     if (display_disable) {
3076         video_disable = 1;
3077     }
3078     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3079 #if !defined(__MINGW32__) && !defined(__APPLE__)
3080     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3081 #endif
3082     if (SDL_Init (flags)) {
3083         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3084         exit(1);
3085     }
3086
3087     if (!display_disable) {
3088 #if HAVE_SDL_VIDEO_SIZE
3089         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3090         fs_screen_width = vi->current_w;
3091         fs_screen_height = vi->current_h;
3092 #endif
3093     }
3094
3095     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3096     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3097     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3098
3099     av_init_packet(&flush_pkt);
3100     flush_pkt.data= "FLUSH";
3101
3102     cur_stream = stream_open(input_filename, file_iformat);
3103
3104     event_loop();
3105
3106     /* never returns */
3107
3108     return 0;
3109 }