]> git.sesse.net Git - ffmpeg/blob - avplay.c
parser: Move Doxygen documentation to the header files
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;             // presentation timestamp for this picture
103     double target_clock;    // av_gettime() time at which this should be displayed ideally
104     int64_t pos;            // byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum AVPixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     int sdl_sample_rate;
168     enum AVSampleFormat resample_sample_fmt;
169     uint64_t resample_channel_layout;
170     int resample_sample_rate;
171     AVAudioResampleContext *avr;
172     AVFrame *frame;
173
174     int show_audio; /* if true, display audio samples */
175     int16_t sample_array[SAMPLE_ARRAY_SIZE];
176     int sample_array_index;
177     int last_i_start;
178     RDFTContext *rdft;
179     int rdft_bits;
180     FFTSample *rdft_data;
181     int xpos;
182
183     SDL_Thread *subtitle_tid;
184     int subtitle_stream;
185     int subtitle_stream_changed;
186     AVStream *subtitle_st;
187     PacketQueue subtitleq;
188     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
189     int subpq_size, subpq_rindex, subpq_windex;
190     SDL_mutex *subpq_mutex;
191     SDL_cond *subpq_cond;
192
193     double frame_timer;
194     double frame_last_pts;
195     double frame_last_delay;
196     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
197     int video_stream;
198     AVStream *video_st;
199     PacketQueue videoq;
200     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
201     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
202     int64_t video_current_pos;      // current displayed file pos
203     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204     int pictq_size, pictq_rindex, pictq_windex;
205     SDL_mutex *pictq_mutex;
206     SDL_cond *pictq_cond;
207 #if !CONFIG_AVFILTER
208     struct SwsContext *img_convert_ctx;
209 #endif
210
211     //    QETimer *video_timer;
212     char filename[1024];
213     int width, height, xleft, ytop;
214
215     PtsCorrectionContext pts_ctx;
216
217 #if CONFIG_AVFILTER
218     AVFilterContext *in_video_filter;   // the first filter in the video chain
219     AVFilterContext *out_video_filter;  // the last filter in the video chain
220     int use_dr1;
221     FrameBuffer *buffer_pool;
222 #endif
223
224     float skip_frames;
225     float skip_frames_index;
226     int refresh;
227 } VideoState;
228
229 /* options specified by the user */
230 static AVInputFormat *file_iformat;
231 static const char *input_filename;
232 static const char *window_title;
233 static int fs_screen_width;
234 static int fs_screen_height;
235 static int screen_width  = 0;
236 static int screen_height = 0;
237 static int audio_disable;
238 static int video_disable;
239 static int wanted_stream[AVMEDIA_TYPE_NB] = {
240     [AVMEDIA_TYPE_AUDIO]    = -1,
241     [AVMEDIA_TYPE_VIDEO]    = -1,
242     [AVMEDIA_TYPE_SUBTITLE] = -1,
243 };
244 static int seek_by_bytes = -1;
245 static int display_disable;
246 static int show_status = 1;
247 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
248 static int64_t start_time = AV_NOPTS_VALUE;
249 static int64_t duration = AV_NOPTS_VALUE;
250 static int debug = 0;
251 static int debug_mv = 0;
252 static int step = 0;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts = -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop = 1;
266 static int framedrop = 1;
267 static int infinite_buffer = 0;
268
269 static int rdftspeed = 20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for (;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414
415 #define RGBA_IN(r, g, b, a, s)\
416 {\
417     unsigned int v = ((const uint32_t *)(s))[0];\
418     a = (v >> 24) & 0xff;\
419     r = (v >> 16) & 0xff;\
420     g = (v >> 8) & 0xff;\
421     b = v & 0xff;\
422 }
423
424 #define YUVA_IN(y, u, v, a, s, pal)\
425 {\
426     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427     a = (val >> 24) & 0xff;\
428     y = (val >> 16) & 0xff;\
429     u = (val >> 8) & 0xff;\
430     v = val & 0xff;\
431 }
432
433 #define YUVA_OUT(d, y, u, v, a)\
434 {\
435     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436 }
437
438
439 #define BPP 1
440
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442 {
443     int wrap, wrap3, width2, skip2;
444     int y, u, v, a, u1, v1, a1, w, h;
445     uint8_t *lum, *cb, *cr;
446     const uint8_t *p;
447     const uint32_t *pal;
448     int dstx, dsty, dstw, dsth;
449
450     dstw = av_clip(rect->w, 0, imgw);
451     dsth = av_clip(rect->h, 0, imgh);
452     dstx = av_clip(rect->x, 0, imgw - dstw);
453     dsty = av_clip(rect->y, 0, imgh - dsth);
454     lum = dst->data[0] + dsty * dst->linesize[0];
455     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457
458     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459     skip2 = dstx >> 1;
460     wrap = dst->linesize[0];
461     wrap3 = rect->pict.linesize[0];
462     p = rect->pict.data[0];
463     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
464
465     if (dsty & 1) {
466         lum += dstx;
467         cb += skip2;
468         cr += skip2;
469
470         if (dstx & 1) {
471             YUVA_IN(y, u, v, a, p, pal);
472             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475             cb++;
476             cr++;
477             lum++;
478             p += BPP;
479         }
480         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
481             YUVA_IN(y, u, v, a, p, pal);
482             u1 = u;
483             v1 = v;
484             a1 = a;
485             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
487             YUVA_IN(y, u, v, a, p + BPP, pal);
488             u1 += u;
489             v1 += v;
490             a1 += a;
491             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494             cb++;
495             cr++;
496             p += 2 * BPP;
497             lum += 2;
498         }
499         if (w) {
500             YUVA_IN(y, u, v, a, p, pal);
501             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504             p++;
505             lum++;
506         }
507         p += wrap3 - dstw * BPP;
508         lum += wrap - dstw - dstx;
509         cb += dst->linesize[1] - width2 - skip2;
510         cr += dst->linesize[2] - width2 - skip2;
511     }
512     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
513         lum += dstx;
514         cb += skip2;
515         cr += skip2;
516
517         if (dstx & 1) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             p += wrap3;
524             lum += wrap;
525             YUVA_IN(y, u, v, a, p, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += -wrap3 + BPP;
535             lum += -wrap + 1;
536         }
537         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
538             YUVA_IN(y, u, v, a, p, pal);
539             u1 = u;
540             v1 = v;
541             a1 = a;
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544             YUVA_IN(y, u, v, a, p + BPP, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549             p += wrap3;
550             lum += wrap;
551
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
558             YUVA_IN(y, u, v, a, p + BPP, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566
567             cb++;
568             cr++;
569             p += -wrap3 + 2 * BPP;
570             lum += -wrap + 2;
571         }
572         if (w) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             p += wrap3;
579             lum += wrap;
580             YUVA_IN(y, u, v, a, p, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587             cb++;
588             cr++;
589             p += -wrap3 + BPP;
590             lum += -wrap + 1;
591         }
592         p += wrap3 + (wrap3 - dstw * BPP);
593         lum += wrap + (wrap - dstw - dstx);
594         cb += dst->linesize[1] - width2 - skip2;
595         cr += dst->linesize[2] - width2 - skip2;
596     }
597     /* handle odd height */
598     if (h) {
599         lum += dstx;
600         cb += skip2;
601         cr += skip2;
602
603         if (dstx & 1) {
604             YUVA_IN(y, u, v, a, p, pal);
605             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608             cb++;
609             cr++;
610             lum++;
611             p += BPP;
612         }
613         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
620             YUVA_IN(y, u, v, a, p + BPP, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627             cb++;
628             cr++;
629             p += 2 * BPP;
630             lum += 2;
631         }
632         if (w) {
633             YUVA_IN(y, u, v, a, p, pal);
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637         }
638     }
639 }
640
641 static void free_subpicture(SubPicture *sp)
642 {
643     avsubtitle_free(&sp->sub);
644 }
645
646 static void video_image_display(VideoState *is)
647 {
648     VideoPicture *vp;
649     SubPicture *sp;
650     AVPicture pict;
651     float aspect_ratio;
652     int width, height, x, y;
653     SDL_Rect rect;
654     int i;
655
656     vp = &is->pictq[is->pictq_rindex];
657     if (vp->bmp) {
658 #if CONFIG_AVFILTER
659          if (vp->picref->video->pixel_aspect.num == 0)
660              aspect_ratio = 0;
661          else
662              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
663 #else
664
665         /* XXX: use variable in the frame */
666         if (is->video_st->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
668         else if (is->video_st->codec->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
670         else
671             aspect_ratio = 0;
672 #endif
673         if (aspect_ratio <= 0.0)
674             aspect_ratio = 1.0;
675         aspect_ratio *= (float)vp->width / (float)vp->height;
676
677         if (is->subtitle_st)
678         {
679             if (is->subpq_size > 0)
680             {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
684                 {
685                     SDL_LockYUVOverlay (vp->bmp);
686
687                     pict.data[0] = vp->bmp->pixels[0];
688                     pict.data[1] = vp->bmp->pixels[2];
689                     pict.data[2] = vp->bmp->pixels[1];
690
691                     pict.linesize[0] = vp->bmp->pitches[0];
692                     pict.linesize[1] = vp->bmp->pitches[2];
693                     pict.linesize[2] = vp->bmp->pitches[1];
694
695                     for (i = 0; i < sp->sub.num_rects; i++)
696                         blend_subrect(&pict, sp->sub.rects[i],
697                                       vp->bmp->w, vp->bmp->h);
698
699                     SDL_UnlockYUVOverlay (vp->bmp);
700                 }
701             }
702         }
703
704
705         /* XXX: we suppose the screen has a 1.0 pixel ratio */
706         height = is->height;
707         width = ((int)rint(height * aspect_ratio)) & ~1;
708         if (width > is->width) {
709             width = is->width;
710             height = ((int)rint(width / aspect_ratio)) & ~1;
711         }
712         x = (is->width - width) / 2;
713         y = (is->height - height) / 2;
714         is->no_background = 0;
715         rect.x = is->xleft + x;
716         rect.y = is->ytop  + y;
717         rect.w = width;
718         rect.h = height;
719         SDL_DisplayYUVOverlay(vp->bmp, &rect);
720     }
721 }
722
723 /* get the current audio output buffer size, in samples. With SDL, we
724    cannot have a precise information */
725 static int audio_write_get_buf_size(VideoState *is)
726 {
727     return is->audio_buf_size - is->audio_buf_index;
728 }
729
730 static inline int compute_mod(int a, int b)
731 {
732     a = a % b;
733     if (a >= 0)
734         return a;
735     else
736         return a + b;
737 }
738
739 static void video_audio_display(VideoState *s)
740 {
741     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
742     int ch, channels, h, h2, bgcolor, fgcolor;
743     int16_t time_diff;
744     int rdft_bits, nb_freq;
745
746     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
747         ;
748     nb_freq = 1 << (rdft_bits - 1);
749
750     /* compute display index : center on currently output samples */
751     channels = s->sdl_channels;
752     nb_display_channels = channels;
753     if (!s->paused) {
754         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
755         n = 2 * channels;
756         delay = audio_write_get_buf_size(s);
757         delay /= n;
758
759         /* to be more precise, we take into account the time spent since
760            the last buffer computation */
761         if (audio_callback_time) {
762             time_diff = av_gettime() - audio_callback_time;
763             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
764         }
765
766         delay += 2 * data_used;
767         if (delay < data_used)
768             delay = data_used;
769
770         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
771         if (s->show_audio == 1) {
772             h = INT_MIN;
773             for (i = 0; i < 1000; i += channels) {
774                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
775                 int a = s->sample_array[idx];
776                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
777                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
778                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
779                 int score = a - d;
780                 if (h < score && (b ^ c) < 0) {
781                     h = score;
782                     i_start = idx;
783                 }
784             }
785         }
786
787         s->last_i_start = i_start;
788     } else {
789         i_start = s->last_i_start;
790     }
791
792     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
793     if (s->show_audio == 1) {
794         fill_rectangle(screen,
795                        s->xleft, s->ytop, s->width, s->height,
796                        bgcolor);
797
798         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
799
800         /* total height for one channel */
801         h = s->height / nb_display_channels;
802         /* graph height / 2 */
803         h2 = (h * 9) / 20;
804         for (ch = 0; ch < nb_display_channels; ch++) {
805             i = i_start + ch;
806             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
807             for (x = 0; x < s->width; x++) {
808                 y = (s->sample_array[i] * h2) >> 15;
809                 if (y < 0) {
810                     y = -y;
811                     ys = y1 - y;
812                 } else {
813                     ys = y1;
814                 }
815                 fill_rectangle(screen,
816                                s->xleft + x, ys, 1, y,
817                                fgcolor);
818                 i += channels;
819                 if (i >= SAMPLE_ARRAY_SIZE)
820                     i -= SAMPLE_ARRAY_SIZE;
821             }
822         }
823
824         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
825
826         for (ch = 1; ch < nb_display_channels; ch++) {
827             y = s->ytop + ch * h;
828             fill_rectangle(screen,
829                            s->xleft, y, s->width, 1,
830                            fgcolor);
831         }
832         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
833     } else {
834         nb_display_channels= FFMIN(nb_display_channels, 2);
835         if (rdft_bits != s->rdft_bits) {
836             av_rdft_end(s->rdft);
837             av_free(s->rdft_data);
838             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
839             s->rdft_bits = rdft_bits;
840             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
841         }
842         {
843             FFTSample *data[2];
844             for (ch = 0; ch < nb_display_channels; ch++) {
845                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
846                 i = i_start + ch;
847                 for (x = 0; x < 2 * nb_freq; x++) {
848                     double w = (x-nb_freq) * (1.0 / nb_freq);
849                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
850                     i += channels;
851                     if (i >= SAMPLE_ARRAY_SIZE)
852                         i -= SAMPLE_ARRAY_SIZE;
853                 }
854                 av_rdft_calc(s->rdft, data[ch]);
855             }
856             // least efficient way to do this, we should of course directly access it but its more than fast enough
857             for (y = 0; y < s->height; y++) {
858                 double w = 1 / sqrt(nb_freq);
859                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
860                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
861                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
862                 a = FFMIN(a, 255);
863                 b = FFMIN(b, 255);
864                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
865
866                 fill_rectangle(screen,
867                             s->xpos, s->height-y, 1, 1,
868                             fgcolor);
869             }
870         }
871         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
872         s->xpos++;
873         if (s->xpos >= s->width)
874             s->xpos= s->xleft;
875     }
876 }
877
878 static int video_open(VideoState *is)
879 {
880     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
881     int w,h;
882
883     if (is_full_screen) flags |= SDL_FULLSCREEN;
884     else                flags |= SDL_RESIZABLE;
885
886     if (is_full_screen && fs_screen_width) {
887         w = fs_screen_width;
888         h = fs_screen_height;
889     } else if (!is_full_screen && screen_width) {
890         w = screen_width;
891         h = screen_height;
892 #if CONFIG_AVFILTER
893     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
894         w = is->out_video_filter->inputs[0]->w;
895         h = is->out_video_filter->inputs[0]->h;
896 #else
897     } else if (is->video_st && is->video_st->codec->width) {
898         w = is->video_st->codec->width;
899         h = is->video_st->codec->height;
900 #endif
901     } else {
902         w = 640;
903         h = 480;
904     }
905     if (screen && is->width == screen->w && screen->w == w
906        && is->height== screen->h && screen->h == h)
907         return 0;
908
909 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
910     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
911     screen = SDL_SetVideoMode(w, h, 24, flags);
912 #else
913     screen = SDL_SetVideoMode(w, h, 0, flags);
914 #endif
915     if (!screen) {
916         fprintf(stderr, "SDL: could not set video mode - exiting\n");
917         return -1;
918     }
919     if (!window_title)
920         window_title = input_filename;
921     SDL_WM_SetCaption(window_title, window_title);
922
923     is->width  = screen->w;
924     is->height = screen->h;
925
926     return 0;
927 }
928
929 /* display the current picture, if any */
930 static void video_display(VideoState *is)
931 {
932     if (!screen)
933         video_open(cur_stream);
934     if (is->audio_st && is->show_audio)
935         video_audio_display(is);
936     else if (is->video_st)
937         video_image_display(is);
938 }
939
940 static int refresh_thread(void *opaque)
941 {
942     VideoState *is= opaque;
943     while (!is->abort_request) {
944         SDL_Event event;
945         event.type = FF_REFRESH_EVENT;
946         event.user.data1 = opaque;
947         if (!is->refresh) {
948             is->refresh = 1;
949             SDL_PushEvent(&event);
950         }
951         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
952     }
953     return 0;
954 }
955
956 /* get the current audio clock value */
957 static double get_audio_clock(VideoState *is)
958 {
959     double pts;
960     int hw_buf_size, bytes_per_sec;
961     pts = is->audio_clock;
962     hw_buf_size = audio_write_get_buf_size(is);
963     bytes_per_sec = 0;
964     if (is->audio_st) {
965         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
966                         av_get_bytes_per_sample(is->sdl_sample_fmt);
967     }
968     if (bytes_per_sec)
969         pts -= (double)hw_buf_size / bytes_per_sec;
970     return pts;
971 }
972
973 /* get the current video clock value */
974 static double get_video_clock(VideoState *is)
975 {
976     if (is->paused) {
977         return is->video_current_pts;
978     } else {
979         return is->video_current_pts_drift + av_gettime() / 1000000.0;
980     }
981 }
982
983 /* get the current external clock value */
984 static double get_external_clock(VideoState *is)
985 {
986     int64_t ti;
987     ti = av_gettime();
988     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
989 }
990
991 /* get the current master clock value */
992 static double get_master_clock(VideoState *is)
993 {
994     double val;
995
996     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
997         if (is->video_st)
998             val = get_video_clock(is);
999         else
1000             val = get_audio_clock(is);
1001     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1002         if (is->audio_st)
1003             val = get_audio_clock(is);
1004         else
1005             val = get_video_clock(is);
1006     } else {
1007         val = get_external_clock(is);
1008     }
1009     return val;
1010 }
1011
1012 /* seek in the stream */
1013 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1014 {
1015     if (!is->seek_req) {
1016         is->seek_pos = pos;
1017         is->seek_rel = rel;
1018         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1019         if (seek_by_bytes)
1020             is->seek_flags |= AVSEEK_FLAG_BYTE;
1021         is->seek_req = 1;
1022     }
1023 }
1024
1025 /* pause or resume the video */
1026 static void stream_pause(VideoState *is)
1027 {
1028     if (is->paused) {
1029         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1030         if (is->read_pause_return != AVERROR(ENOSYS)) {
1031             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1032         }
1033         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1034     }
1035     is->paused = !is->paused;
1036 }
1037
1038 static double compute_target_time(double frame_current_pts, VideoState *is)
1039 {
1040     double delay, sync_threshold, diff;
1041
1042     /* compute nominal delay */
1043     delay = frame_current_pts - is->frame_last_pts;
1044     if (delay <= 0 || delay >= 10.0) {
1045         /* if incorrect delay, use previous one */
1046         delay = is->frame_last_delay;
1047     } else {
1048         is->frame_last_delay = delay;
1049     }
1050     is->frame_last_pts = frame_current_pts;
1051
1052     /* update delay to follow master synchronisation source */
1053     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1054          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1055         /* if video is slave, we try to correct big delays by
1056            duplicating or deleting a frame */
1057         diff = get_video_clock(is) - get_master_clock(is);
1058
1059         /* skip or repeat frame. We take into account the
1060            delay to compute the threshold. I still don't know
1061            if it is the best guess */
1062         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1063         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1064             if (diff <= -sync_threshold)
1065                 delay = 0;
1066             else if (diff >= sync_threshold)
1067                 delay = 2 * delay;
1068         }
1069     }
1070     is->frame_timer += delay;
1071
1072     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1073             delay, frame_current_pts, -diff);
1074
1075     return is->frame_timer;
1076 }
1077
1078 /* called to display each frame */
1079 static void video_refresh_timer(void *opaque)
1080 {
1081     VideoState *is = opaque;
1082     VideoPicture *vp;
1083
1084     SubPicture *sp, *sp2;
1085
1086     if (is->video_st) {
1087 retry:
1088         if (is->pictq_size == 0) {
1089             // nothing to do, no picture to display in the que
1090         } else {
1091             double time = av_gettime() / 1000000.0;
1092             double next_target;
1093             /* dequeue the picture */
1094             vp = &is->pictq[is->pictq_rindex];
1095
1096             if (time < vp->target_clock)
1097                 return;
1098             /* update current video pts */
1099             is->video_current_pts = vp->pts;
1100             is->video_current_pts_drift = is->video_current_pts - time;
1101             is->video_current_pos = vp->pos;
1102             if (is->pictq_size > 1) {
1103                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1104                 assert(nextvp->target_clock >= vp->target_clock);
1105                 next_target= nextvp->target_clock;
1106             } else {
1107                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1108             }
1109             if (framedrop && time > next_target) {
1110                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1111                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1112                     /* update queue size and signal for next picture */
1113                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114                         is->pictq_rindex = 0;
1115
1116                     SDL_LockMutex(is->pictq_mutex);
1117                     is->pictq_size--;
1118                     SDL_CondSignal(is->pictq_cond);
1119                     SDL_UnlockMutex(is->pictq_mutex);
1120                     goto retry;
1121                 }
1122             }
1123
1124             if (is->subtitle_st) {
1125                 if (is->subtitle_stream_changed) {
1126                     SDL_LockMutex(is->subpq_mutex);
1127
1128                     while (is->subpq_size) {
1129                         free_subpicture(&is->subpq[is->subpq_rindex]);
1130
1131                         /* update queue size and signal for next picture */
1132                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1133                             is->subpq_rindex = 0;
1134
1135                         is->subpq_size--;
1136                     }
1137                     is->subtitle_stream_changed = 0;
1138
1139                     SDL_CondSignal(is->subpq_cond);
1140                     SDL_UnlockMutex(is->subpq_mutex);
1141                 } else {
1142                     if (is->subpq_size > 0) {
1143                         sp = &is->subpq[is->subpq_rindex];
1144
1145                         if (is->subpq_size > 1)
1146                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1147                         else
1148                             sp2 = NULL;
1149
1150                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1151                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1152                         {
1153                             free_subpicture(sp);
1154
1155                             /* update queue size and signal for next picture */
1156                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1157                                 is->subpq_rindex = 0;
1158
1159                             SDL_LockMutex(is->subpq_mutex);
1160                             is->subpq_size--;
1161                             SDL_CondSignal(is->subpq_cond);
1162                             SDL_UnlockMutex(is->subpq_mutex);
1163                         }
1164                     }
1165                 }
1166             }
1167
1168             /* display picture */
1169             if (!display_disable)
1170                 video_display(is);
1171
1172             /* update queue size and signal for next picture */
1173             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1174                 is->pictq_rindex = 0;
1175
1176             SDL_LockMutex(is->pictq_mutex);
1177             is->pictq_size--;
1178             SDL_CondSignal(is->pictq_cond);
1179             SDL_UnlockMutex(is->pictq_mutex);
1180         }
1181     } else if (is->audio_st) {
1182         /* draw the next audio frame */
1183
1184         /* if only audio stream, then display the audio bars (better
1185            than nothing, just to test the implementation */
1186
1187         /* display picture */
1188         if (!display_disable)
1189             video_display(is);
1190     }
1191     if (show_status) {
1192         static int64_t last_time;
1193         int64_t cur_time;
1194         int aqsize, vqsize, sqsize;
1195         double av_diff;
1196
1197         cur_time = av_gettime();
1198         if (!last_time || (cur_time - last_time) >= 30000) {
1199             aqsize = 0;
1200             vqsize = 0;
1201             sqsize = 0;
1202             if (is->audio_st)
1203                 aqsize = is->audioq.size;
1204             if (is->video_st)
1205                 vqsize = is->videoq.size;
1206             if (is->subtitle_st)
1207                 sqsize = is->subtitleq.size;
1208             av_diff = 0;
1209             if (is->audio_st && is->video_st)
1210                 av_diff = get_audio_clock(is) - get_video_clock(is);
1211             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1212                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1213                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1214             fflush(stdout);
1215             last_time = cur_time;
1216         }
1217     }
1218 }
1219
1220 static void stream_close(VideoState *is)
1221 {
1222     VideoPicture *vp;
1223     int i;
1224     /* XXX: use a special url_shutdown call to abort parse cleanly */
1225     is->abort_request = 1;
1226     SDL_WaitThread(is->parse_tid, NULL);
1227     SDL_WaitThread(is->refresh_tid, NULL);
1228
1229     /* free all pictures */
1230     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1231         vp = &is->pictq[i];
1232 #if CONFIG_AVFILTER
1233         avfilter_unref_bufferp(&vp->picref);
1234 #endif
1235         if (vp->bmp) {
1236             SDL_FreeYUVOverlay(vp->bmp);
1237             vp->bmp = NULL;
1238         }
1239     }
1240     SDL_DestroyMutex(is->pictq_mutex);
1241     SDL_DestroyCond(is->pictq_cond);
1242     SDL_DestroyMutex(is->subpq_mutex);
1243     SDL_DestroyCond(is->subpq_cond);
1244 #if !CONFIG_AVFILTER
1245     if (is->img_convert_ctx)
1246         sws_freeContext(is->img_convert_ctx);
1247 #endif
1248     av_free(is);
1249 }
1250
1251 static void do_exit(void)
1252 {
1253     if (cur_stream) {
1254         stream_close(cur_stream);
1255         cur_stream = NULL;
1256     }
1257     uninit_opts();
1258 #if CONFIG_AVFILTER
1259     avfilter_uninit();
1260 #endif
1261     avformat_network_deinit();
1262     if (show_status)
1263         printf("\n");
1264     SDL_Quit();
1265     av_log(NULL, AV_LOG_QUIET, "");
1266     exit(0);
1267 }
1268
1269 /* allocate a picture (needs to do that in main thread to avoid
1270    potential locking problems */
1271 static void alloc_picture(void *opaque)
1272 {
1273     VideoState *is = opaque;
1274     VideoPicture *vp;
1275
1276     vp = &is->pictq[is->pictq_windex];
1277
1278     if (vp->bmp)
1279         SDL_FreeYUVOverlay(vp->bmp);
1280
1281 #if CONFIG_AVFILTER
1282     avfilter_unref_bufferp(&vp->picref);
1283
1284     vp->width   = is->out_video_filter->inputs[0]->w;
1285     vp->height  = is->out_video_filter->inputs[0]->h;
1286     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1287 #else
1288     vp->width   = is->video_st->codec->width;
1289     vp->height  = is->video_st->codec->height;
1290     vp->pix_fmt = is->video_st->codec->pix_fmt;
1291 #endif
1292
1293     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1294                                    SDL_YV12_OVERLAY,
1295                                    screen);
1296     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1297         /* SDL allocates a buffer smaller than requested if the video
1298          * overlay hardware is unable to support the requested size. */
1299         fprintf(stderr, "Error: the video system does not support an image\n"
1300                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1301                         "to reduce the image size.\n", vp->width, vp->height );
1302         do_exit();
1303     }
1304
1305     SDL_LockMutex(is->pictq_mutex);
1306     vp->allocated = 1;
1307     SDL_CondSignal(is->pictq_cond);
1308     SDL_UnlockMutex(is->pictq_mutex);
1309 }
1310
1311 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1312  * guessed if not known. */
1313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1314 {
1315     VideoPicture *vp;
1316 #if CONFIG_AVFILTER
1317     AVPicture pict_src;
1318 #else
1319     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1320 #endif
1321     /* wait until we have space to put a new picture */
1322     SDL_LockMutex(is->pictq_mutex);
1323
1324     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1325         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     /* alloc or resize hardware picture buffer */
1339     if (!vp->bmp || vp->reallocate ||
1340 #if CONFIG_AVFILTER
1341         vp->width  != is->out_video_filter->inputs[0]->w ||
1342         vp->height != is->out_video_filter->inputs[0]->h) {
1343 #else
1344         vp->width != is->video_st->codec->width ||
1345         vp->height != is->video_st->codec->height) {
1346 #endif
1347         SDL_Event event;
1348
1349         vp->allocated  = 0;
1350         vp->reallocate = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         SDL_UnlockMutex(is->pictq_mutex);
1364
1365         if (is->videoq.abort_request)
1366             return -1;
1367     }
1368
1369     /* if the frame is not skipped, then display it */
1370     if (vp->bmp) {
1371         AVPicture pict = { { 0 } };
1372 #if CONFIG_AVFILTER
1373         avfilter_unref_bufferp(&vp->picref);
1374         vp->picref = src_frame->opaque;
1375 #endif
1376
1377         /* get a pointer on the bitmap */
1378         SDL_LockYUVOverlay (vp->bmp);
1379
1380         pict.data[0] = vp->bmp->pixels[0];
1381         pict.data[1] = vp->bmp->pixels[2];
1382         pict.data[2] = vp->bmp->pixels[1];
1383
1384         pict.linesize[0] = vp->bmp->pitches[0];
1385         pict.linesize[1] = vp->bmp->pitches[2];
1386         pict.linesize[2] = vp->bmp->pitches[1];
1387
1388 #if CONFIG_AVFILTER
1389         pict_src.data[0] = src_frame->data[0];
1390         pict_src.data[1] = src_frame->data[1];
1391         pict_src.data[2] = src_frame->data[2];
1392
1393         pict_src.linesize[0] = src_frame->linesize[0];
1394         pict_src.linesize[1] = src_frame->linesize[1];
1395         pict_src.linesize[2] = src_frame->linesize[2];
1396
1397         // FIXME use direct rendering
1398         av_picture_copy(&pict, &pict_src,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock = compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 /* Compute the exact PTS for the picture if it is omitted in the stream.
1431  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1432 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1433 {
1434     double frame_delay, pts;
1435
1436     pts = pts1;
1437
1438     if (pts != 0) {
1439         /* update video clock with pts, if present */
1440         is->video_clock = pts;
1441     } else {
1442         pts = is->video_clock;
1443     }
1444     /* update video clock for next frame */
1445     frame_delay = av_q2d(is->video_st->codec->time_base);
1446     /* for MPEG2, the frame can be repeated, so we update the
1447        clock accordingly */
1448     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1449     is->video_clock += frame_delay;
1450
1451     return queue_picture(is, src_frame, pts, pos);
1452 }
1453
1454 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1455 {
1456     int got_picture, i;
1457
1458     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1459         return -1;
1460
1461     if (pkt->data == flush_pkt.data) {
1462         avcodec_flush_buffers(is->video_st->codec);
1463
1464         SDL_LockMutex(is->pictq_mutex);
1465         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1466         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1467             is->pictq[i].target_clock= 0;
1468         }
1469         while (is->pictq_size && !is->videoq.abort_request) {
1470             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1471         }
1472         is->video_current_pos = -1;
1473         SDL_UnlockMutex(is->pictq_mutex);
1474
1475         init_pts_correction(&is->pts_ctx);
1476         is->frame_last_pts = AV_NOPTS_VALUE;
1477         is->frame_last_delay = 0;
1478         is->frame_timer = (double)av_gettime() / 1000000.0;
1479         is->skip_frames = 1;
1480         is->skip_frames_index = 0;
1481         return 0;
1482     }
1483
1484     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1485
1486     if (got_picture) {
1487         if (decoder_reorder_pts == -1) {
1488             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1489         } else if (decoder_reorder_pts) {
1490             *pts = frame->pkt_pts;
1491         } else {
1492             *pts = frame->pkt_dts;
1493         }
1494
1495         if (*pts == AV_NOPTS_VALUE) {
1496             *pts = 0;
1497         }
1498
1499         is->skip_frames_index += 1;
1500         if (is->skip_frames_index >= is->skip_frames) {
1501             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1502             return 1;
1503         }
1504
1505     }
1506     return 0;
1507 }
1508
1509 #if CONFIG_AVFILTER
1510 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1511 {
1512     char sws_flags_str[128];
1513     char buffersrc_args[256];
1514     int ret;
1515     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1516     AVCodecContext *codec = is->video_st->codec;
1517
1518     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1519     graph->scale_sws_opts = av_strdup(sws_flags_str);
1520
1521     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1522              codec->width, codec->height, codec->pix_fmt,
1523              is->video_st->time_base.num, is->video_st->time_base.den,
1524              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1525
1526
1527     if ((ret = avfilter_graph_create_filter(&filt_src,
1528                                             avfilter_get_by_name("buffer"),
1529                                             "src", buffersrc_args, NULL,
1530                                             graph)) < 0)
1531         return ret;
1532     if ((ret = avfilter_graph_create_filter(&filt_out,
1533                                             avfilter_get_by_name("buffersink"),
1534                                             "out", NULL, NULL, graph)) < 0)
1535         return ret;
1536
1537     if ((ret = avfilter_graph_create_filter(&filt_format,
1538                                             avfilter_get_by_name("format"),
1539                                             "format", "yuv420p", NULL, graph)) < 0)
1540         return ret;
1541     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1542         return ret;
1543
1544
1545     if (vfilters) {
1546         AVFilterInOut *outputs = avfilter_inout_alloc();
1547         AVFilterInOut *inputs  = avfilter_inout_alloc();
1548
1549         outputs->name    = av_strdup("in");
1550         outputs->filter_ctx = filt_src;
1551         outputs->pad_idx = 0;
1552         outputs->next    = NULL;
1553
1554         inputs->name    = av_strdup("out");
1555         inputs->filter_ctx = filt_format;
1556         inputs->pad_idx = 0;
1557         inputs->next    = NULL;
1558
1559         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1560             return ret;
1561     } else {
1562         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1563             return ret;
1564     }
1565
1566     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1567         return ret;
1568
1569     is->in_video_filter  = filt_src;
1570     is->out_video_filter = filt_out;
1571
1572     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1573         is->use_dr1 = 1;
1574         codec->get_buffer     = codec_get_buffer;
1575         codec->release_buffer = codec_release_buffer;
1576         codec->opaque         = &is->buffer_pool;
1577     }
1578
1579     return ret;
1580 }
1581
1582 #endif  /* CONFIG_AVFILTER */
1583
1584 static int video_thread(void *arg)
1585 {
1586     AVPacket pkt = { 0 };
1587     VideoState *is = arg;
1588     AVFrame *frame = avcodec_alloc_frame();
1589     int64_t pts_int;
1590     double pts;
1591     int ret;
1592
1593 #if CONFIG_AVFILTER
1594     AVFilterGraph *graph = avfilter_graph_alloc();
1595     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1596     int64_t pos;
1597     int last_w = is->video_st->codec->width;
1598     int last_h = is->video_st->codec->height;
1599
1600     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1601         goto the_end;
1602     filt_in  = is->in_video_filter;
1603     filt_out = is->out_video_filter;
1604 #endif
1605
1606     for (;;) {
1607 #if CONFIG_AVFILTER
1608         AVFilterBufferRef *picref;
1609         AVRational tb;
1610 #endif
1611         while (is->paused && !is->videoq.abort_request)
1612             SDL_Delay(10);
1613
1614         av_free_packet(&pkt);
1615
1616         ret = get_video_frame(is, frame, &pts_int, &pkt);
1617         if (ret < 0)
1618             goto the_end;
1619
1620         if (!ret)
1621             continue;
1622
1623 #if CONFIG_AVFILTER
1624         if (   last_w != is->video_st->codec->width
1625             || last_h != is->video_st->codec->height) {
1626             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1627                     is->video_st->codec->width, is->video_st->codec->height);
1628             avfilter_graph_free(&graph);
1629             graph = avfilter_graph_alloc();
1630             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1631                 goto the_end;
1632             filt_in  = is->in_video_filter;
1633             filt_out = is->out_video_filter;
1634             last_w = is->video_st->codec->width;
1635             last_h = is->video_st->codec->height;
1636         }
1637
1638         frame->pts = pts_int;
1639         if (is->use_dr1) {
1640             FrameBuffer      *buf = frame->opaque;
1641             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1642                                         frame->data, frame->linesize,
1643                                         AV_PERM_READ | AV_PERM_PRESERVE,
1644                                         frame->width, frame->height,
1645                                         frame->format);
1646
1647             avfilter_copy_frame_props(fb, frame);
1648             fb->buf->priv           = buf;
1649             fb->buf->free           = filter_release_buffer;
1650
1651             buf->refcount++;
1652             av_buffersrc_buffer(filt_in, fb);
1653
1654         } else
1655             av_buffersrc_write_frame(filt_in, frame);
1656
1657         while (ret >= 0) {
1658             ret = av_buffersink_read(filt_out, &picref);
1659             if (ret < 0) {
1660                 ret = 0;
1661                 break;
1662             }
1663
1664             avfilter_copy_buf_props(frame, picref);
1665
1666             pts_int = picref->pts;
1667             tb      = filt_out->inputs[0]->time_base;
1668             pos     = picref->pos;
1669             frame->opaque = picref;
1670
1671             if (av_cmp_q(tb, is->video_st->time_base)) {
1672                 av_unused int64_t pts1 = pts_int;
1673                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1674                 av_dlog(NULL, "video_thread(): "
1675                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1676                         tb.num, tb.den, pts1,
1677                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1678             }
1679             pts = pts_int * av_q2d(is->video_st->time_base);
1680             ret = output_picture2(is, frame, pts, pos);
1681         }
1682 #else
1683         pts = pts_int * av_q2d(is->video_st->time_base);
1684         ret = output_picture2(is, frame, pts,  pkt.pos);
1685 #endif
1686
1687         if (ret < 0)
1688             goto the_end;
1689
1690         if (step)
1691             if (cur_stream)
1692                 stream_pause(cur_stream);
1693     }
1694  the_end:
1695 #if CONFIG_AVFILTER
1696     av_freep(&vfilters);
1697     avfilter_graph_free(&graph);
1698 #endif
1699     av_free_packet(&pkt);
1700     avcodec_free_frame(&frame);
1701     return 0;
1702 }
1703
1704 static int subtitle_thread(void *arg)
1705 {
1706     VideoState *is = arg;
1707     SubPicture *sp;
1708     AVPacket pkt1, *pkt = &pkt1;
1709     int got_subtitle;
1710     double pts;
1711     int i, j;
1712     int r, g, b, y, u, v, a;
1713
1714     for (;;) {
1715         while (is->paused && !is->subtitleq.abort_request) {
1716             SDL_Delay(10);
1717         }
1718         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1719             break;
1720
1721         if (pkt->data == flush_pkt.data) {
1722             avcodec_flush_buffers(is->subtitle_st->codec);
1723             continue;
1724         }
1725         SDL_LockMutex(is->subpq_mutex);
1726         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1727                !is->subtitleq.abort_request) {
1728             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1729         }
1730         SDL_UnlockMutex(is->subpq_mutex);
1731
1732         if (is->subtitleq.abort_request)
1733             return 0;
1734
1735         sp = &is->subpq[is->subpq_windex];
1736
1737        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1738            this packet, if any */
1739         pts = 0;
1740         if (pkt->pts != AV_NOPTS_VALUE)
1741             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1742
1743         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1744                                  &got_subtitle, pkt);
1745
1746         if (got_subtitle && sp->sub.format == 0) {
1747             sp->pts = pts;
1748
1749             for (i = 0; i < sp->sub.num_rects; i++)
1750             {
1751                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1752                 {
1753                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1754                     y = RGB_TO_Y_CCIR(r, g, b);
1755                     u = RGB_TO_U_CCIR(r, g, b, 0);
1756                     v = RGB_TO_V_CCIR(r, g, b, 0);
1757                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1758                 }
1759             }
1760
1761             /* now we can update the picture count */
1762             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1763                 is->subpq_windex = 0;
1764             SDL_LockMutex(is->subpq_mutex);
1765             is->subpq_size++;
1766             SDL_UnlockMutex(is->subpq_mutex);
1767         }
1768         av_free_packet(pkt);
1769     }
1770     return 0;
1771 }
1772
1773 /* copy samples for viewing in editor window */
1774 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1775 {
1776     int size, len;
1777
1778     size = samples_size / sizeof(short);
1779     while (size > 0) {
1780         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1781         if (len > size)
1782             len = size;
1783         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1784         samples += len;
1785         is->sample_array_index += len;
1786         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1787             is->sample_array_index = 0;
1788         size -= len;
1789     }
1790 }
1791
1792 /* return the new audio buffer size (samples can be added or deleted
1793    to get better sync if video or external master clock) */
1794 static int synchronize_audio(VideoState *is, short *samples,
1795                              int samples_size1, double pts)
1796 {
1797     int n, samples_size;
1798     double ref_clock;
1799
1800     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1801     samples_size = samples_size1;
1802
1803     /* if not master, then we try to remove or add samples to correct the clock */
1804     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1805          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1806         double diff, avg_diff;
1807         int wanted_size, min_size, max_size, nb_samples;
1808
1809         ref_clock = get_master_clock(is);
1810         diff = get_audio_clock(is) - ref_clock;
1811
1812         if (diff < AV_NOSYNC_THRESHOLD) {
1813             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1814             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1815                 /* not enough measures to have a correct estimate */
1816                 is->audio_diff_avg_count++;
1817             } else {
1818                 /* estimate the A-V difference */
1819                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1820
1821                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1822                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1823                     nb_samples = samples_size / n;
1824
1825                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1826                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1827                     if (wanted_size < min_size)
1828                         wanted_size = min_size;
1829                     else if (wanted_size > max_size)
1830                         wanted_size = max_size;
1831
1832                     /* add or remove samples to correction the synchro */
1833                     if (wanted_size < samples_size) {
1834                         /* remove samples */
1835                         samples_size = wanted_size;
1836                     } else if (wanted_size > samples_size) {
1837                         uint8_t *samples_end, *q;
1838                         int nb;
1839
1840                         /* add samples */
1841                         nb = (samples_size - wanted_size);
1842                         samples_end = (uint8_t *)samples + samples_size - n;
1843                         q = samples_end + n;
1844                         while (nb > 0) {
1845                             memcpy(q, samples_end, n);
1846                             q += n;
1847                             nb -= n;
1848                         }
1849                         samples_size = wanted_size;
1850                     }
1851                 }
1852                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1853                         diff, avg_diff, samples_size - samples_size1,
1854                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1855             }
1856         } else {
1857             /* too big difference : may be initial PTS errors, so
1858                reset A-V filter */
1859             is->audio_diff_avg_count = 0;
1860             is->audio_diff_cum       = 0;
1861         }
1862     }
1863
1864     return samples_size;
1865 }
1866
1867 /* decode one audio frame and returns its uncompressed size */
1868 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1869 {
1870     AVPacket *pkt_temp = &is->audio_pkt_temp;
1871     AVPacket *pkt = &is->audio_pkt;
1872     AVCodecContext *dec = is->audio_st->codec;
1873     int n, len1, data_size, got_frame;
1874     double pts;
1875     int new_packet = 0;
1876     int flush_complete = 0;
1877
1878     for (;;) {
1879         /* NOTE: the audio packet can contain several frames */
1880         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1881             int resample_changed, audio_resample;
1882
1883             if (!is->frame) {
1884                 if (!(is->frame = avcodec_alloc_frame()))
1885                     return AVERROR(ENOMEM);
1886             } else
1887                 avcodec_get_frame_defaults(is->frame);
1888
1889             if (flush_complete)
1890                 break;
1891             new_packet = 0;
1892             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1893             if (len1 < 0) {
1894                 /* if error, we skip the frame */
1895                 pkt_temp->size = 0;
1896                 break;
1897             }
1898
1899             pkt_temp->data += len1;
1900             pkt_temp->size -= len1;
1901
1902             if (!got_frame) {
1903                 /* stop sending empty packets if the decoder is finished */
1904                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1905                     flush_complete = 1;
1906                 continue;
1907             }
1908             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1909                                                    is->frame->nb_samples,
1910                                                    is->frame->format, 1);
1911
1912             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1913                              is->frame->channel_layout != is->sdl_channel_layout ||
1914                              is->frame->sample_rate    != is->sdl_sample_rate;
1915
1916             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1917                                is->frame->channel_layout != is->resample_channel_layout ||
1918                                is->frame->sample_rate    != is->resample_sample_rate;
1919
1920             if ((!is->avr && audio_resample) || resample_changed) {
1921                 int ret;
1922                 if (is->avr)
1923                     avresample_close(is->avr);
1924                 else if (audio_resample) {
1925                     is->avr = avresample_alloc_context();
1926                     if (!is->avr) {
1927                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1928                         break;
1929                     }
1930                 }
1931                 if (audio_resample) {
1932                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1933                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1934                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1935                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1936                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1937                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1938
1939                     if ((ret = avresample_open(is->avr)) < 0) {
1940                         fprintf(stderr, "error initializing libavresample\n");
1941                         break;
1942                     }
1943                 }
1944                 is->resample_sample_fmt     = is->frame->format;
1945                 is->resample_channel_layout = is->frame->channel_layout;
1946                 is->resample_sample_rate    = is->frame->sample_rate;
1947             }
1948
1949             if (audio_resample) {
1950                 void *tmp_out;
1951                 int out_samples, out_size, out_linesize;
1952                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1953                 int nb_samples = is->frame->nb_samples;
1954
1955                 out_size = av_samples_get_buffer_size(&out_linesize,
1956                                                       is->sdl_channels,
1957                                                       nb_samples,
1958                                                       is->sdl_sample_fmt, 0);
1959                 tmp_out = av_realloc(is->audio_buf1, out_size);
1960                 if (!tmp_out)
1961                     return AVERROR(ENOMEM);
1962                 is->audio_buf1 = tmp_out;
1963
1964                 out_samples = avresample_convert(is->avr,
1965                                                  &is->audio_buf1,
1966                                                  out_linesize, nb_samples,
1967                                                  is->frame->data,
1968                                                  is->frame->linesize[0],
1969                                                  is->frame->nb_samples);
1970                 if (out_samples < 0) {
1971                     fprintf(stderr, "avresample_convert() failed\n");
1972                     break;
1973                 }
1974                 is->audio_buf = is->audio_buf1;
1975                 data_size = out_samples * osize * is->sdl_channels;
1976             } else {
1977                 is->audio_buf = is->frame->data[0];
1978             }
1979
1980             /* if no pts, then compute it */
1981             pts = is->audio_clock;
1982             *pts_ptr = pts;
1983             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1984             is->audio_clock += (double)data_size /
1985                 (double)(n * is->sdl_sample_rate);
1986 #ifdef DEBUG
1987             {
1988                 static double last_clock;
1989                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1990                        is->audio_clock - last_clock,
1991                        is->audio_clock, pts);
1992                 last_clock = is->audio_clock;
1993             }
1994 #endif
1995             return data_size;
1996         }
1997
1998         /* free the current packet */
1999         if (pkt->data)
2000             av_free_packet(pkt);
2001         memset(pkt_temp, 0, sizeof(*pkt_temp));
2002
2003         if (is->paused || is->audioq.abort_request) {
2004             return -1;
2005         }
2006
2007         /* read next packet */
2008         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2009             return -1;
2010
2011         if (pkt->data == flush_pkt.data) {
2012             avcodec_flush_buffers(dec);
2013             flush_complete = 0;
2014         }
2015
2016         *pkt_temp = *pkt;
2017
2018         /* if update the audio clock with the pts */
2019         if (pkt->pts != AV_NOPTS_VALUE) {
2020             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2021         }
2022     }
2023 }
2024
2025 /* prepare a new audio buffer */
2026 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2027 {
2028     VideoState *is = opaque;
2029     int audio_size, len1;
2030     double pts;
2031
2032     audio_callback_time = av_gettime();
2033
2034     while (len > 0) {
2035         if (is->audio_buf_index >= is->audio_buf_size) {
2036            audio_size = audio_decode_frame(is, &pts);
2037            if (audio_size < 0) {
2038                 /* if error, just output silence */
2039                is->audio_buf      = is->silence_buf;
2040                is->audio_buf_size = sizeof(is->silence_buf);
2041            } else {
2042                if (is->show_audio)
2043                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2044                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2045                                               pts);
2046                is->audio_buf_size = audio_size;
2047            }
2048            is->audio_buf_index = 0;
2049         }
2050         len1 = is->audio_buf_size - is->audio_buf_index;
2051         if (len1 > len)
2052             len1 = len;
2053         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2054         len -= len1;
2055         stream += len1;
2056         is->audio_buf_index += len1;
2057     }
2058 }
2059
2060 /* open a given stream. Return 0 if OK */
2061 static int stream_component_open(VideoState *is, int stream_index)
2062 {
2063     AVFormatContext *ic = is->ic;
2064     AVCodecContext *avctx;
2065     AVCodec *codec;
2066     SDL_AudioSpec wanted_spec, spec;
2067     AVDictionary *opts;
2068     AVDictionaryEntry *t = NULL;
2069
2070     if (stream_index < 0 || stream_index >= ic->nb_streams)
2071         return -1;
2072     avctx = ic->streams[stream_index]->codec;
2073
2074     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2075
2076     codec = avcodec_find_decoder(avctx->codec_id);
2077     avctx->debug_mv          = debug_mv;
2078     avctx->debug             = debug;
2079     avctx->workaround_bugs   = workaround_bugs;
2080     avctx->idct_algo         = idct;
2081     avctx->skip_frame        = skip_frame;
2082     avctx->skip_idct         = skip_idct;
2083     avctx->skip_loop_filter  = skip_loop_filter;
2084     avctx->error_concealment = error_concealment;
2085
2086     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2087
2088     if (!av_dict_get(opts, "threads", NULL, 0))
2089         av_dict_set(&opts, "threads", "auto", 0);
2090     if (!codec ||
2091         avcodec_open2(avctx, codec, &opts) < 0)
2092         return -1;
2093     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2094         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2095         return AVERROR_OPTION_NOT_FOUND;
2096     }
2097
2098     /* prepare audio output */
2099     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2100         is->sdl_sample_rate = avctx->sample_rate;
2101
2102         if (!avctx->channel_layout)
2103             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2104         if (!avctx->channel_layout) {
2105             fprintf(stderr, "unable to guess channel layout\n");
2106             return -1;
2107         }
2108         if (avctx->channels == 1)
2109             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2110         else
2111             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2112         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2113
2114         wanted_spec.format = AUDIO_S16SYS;
2115         wanted_spec.freq = is->sdl_sample_rate;
2116         wanted_spec.channels = is->sdl_channels;
2117         wanted_spec.silence = 0;
2118         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2119         wanted_spec.callback = sdl_audio_callback;
2120         wanted_spec.userdata = is;
2121         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2122             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2123             return -1;
2124         }
2125         is->audio_hw_buf_size = spec.size;
2126         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2127         is->resample_sample_fmt     = is->sdl_sample_fmt;
2128         is->resample_channel_layout = avctx->channel_layout;
2129         is->resample_sample_rate    = avctx->sample_rate;
2130     }
2131
2132     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2133     switch (avctx->codec_type) {
2134     case AVMEDIA_TYPE_AUDIO:
2135         is->audio_stream = stream_index;
2136         is->audio_st = ic->streams[stream_index];
2137         is->audio_buf_size  = 0;
2138         is->audio_buf_index = 0;
2139
2140         /* init averaging filter */
2141         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2142         is->audio_diff_avg_count = 0;
2143         /* since we do not have a precise anough audio fifo fullness,
2144            we correct audio sync only if larger than this threshold */
2145         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2146
2147         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2148         packet_queue_init(&is->audioq);
2149         SDL_PauseAudio(0);
2150         break;
2151     case AVMEDIA_TYPE_VIDEO:
2152         is->video_stream = stream_index;
2153         is->video_st = ic->streams[stream_index];
2154
2155         packet_queue_init(&is->videoq);
2156         is->video_tid = SDL_CreateThread(video_thread, is);
2157         break;
2158     case AVMEDIA_TYPE_SUBTITLE:
2159         is->subtitle_stream = stream_index;
2160         is->subtitle_st = ic->streams[stream_index];
2161         packet_queue_init(&is->subtitleq);
2162
2163         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2164         break;
2165     default:
2166         break;
2167     }
2168     return 0;
2169 }
2170
2171 static void stream_component_close(VideoState *is, int stream_index)
2172 {
2173     AVFormatContext *ic = is->ic;
2174     AVCodecContext *avctx;
2175
2176     if (stream_index < 0 || stream_index >= ic->nb_streams)
2177         return;
2178     avctx = ic->streams[stream_index]->codec;
2179
2180     switch (avctx->codec_type) {
2181     case AVMEDIA_TYPE_AUDIO:
2182         packet_queue_abort(&is->audioq);
2183
2184         SDL_CloseAudio();
2185
2186         packet_queue_end(&is->audioq);
2187         av_free_packet(&is->audio_pkt);
2188         if (is->avr)
2189             avresample_free(&is->avr);
2190         av_freep(&is->audio_buf1);
2191         is->audio_buf = NULL;
2192         avcodec_free_frame(&is->frame);
2193
2194         if (is->rdft) {
2195             av_rdft_end(is->rdft);
2196             av_freep(&is->rdft_data);
2197             is->rdft = NULL;
2198             is->rdft_bits = 0;
2199         }
2200         break;
2201     case AVMEDIA_TYPE_VIDEO:
2202         packet_queue_abort(&is->videoq);
2203
2204         /* note: we also signal this mutex to make sure we deblock the
2205            video thread in all cases */
2206         SDL_LockMutex(is->pictq_mutex);
2207         SDL_CondSignal(is->pictq_cond);
2208         SDL_UnlockMutex(is->pictq_mutex);
2209
2210         SDL_WaitThread(is->video_tid, NULL);
2211
2212         packet_queue_end(&is->videoq);
2213         break;
2214     case AVMEDIA_TYPE_SUBTITLE:
2215         packet_queue_abort(&is->subtitleq);
2216
2217         /* note: we also signal this mutex to make sure we deblock the
2218            video thread in all cases */
2219         SDL_LockMutex(is->subpq_mutex);
2220         is->subtitle_stream_changed = 1;
2221
2222         SDL_CondSignal(is->subpq_cond);
2223         SDL_UnlockMutex(is->subpq_mutex);
2224
2225         SDL_WaitThread(is->subtitle_tid, NULL);
2226
2227         packet_queue_end(&is->subtitleq);
2228         break;
2229     default:
2230         break;
2231     }
2232
2233     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2234     avcodec_close(avctx);
2235 #if CONFIG_AVFILTER
2236     free_buffer_pool(&is->buffer_pool);
2237 #endif
2238     switch (avctx->codec_type) {
2239     case AVMEDIA_TYPE_AUDIO:
2240         is->audio_st = NULL;
2241         is->audio_stream = -1;
2242         break;
2243     case AVMEDIA_TYPE_VIDEO:
2244         is->video_st = NULL;
2245         is->video_stream = -1;
2246         break;
2247     case AVMEDIA_TYPE_SUBTITLE:
2248         is->subtitle_st = NULL;
2249         is->subtitle_stream = -1;
2250         break;
2251     default:
2252         break;
2253     }
2254 }
2255
2256 /* since we have only one decoding thread, we can use a global
2257    variable instead of a thread local variable */
2258 static VideoState *global_video_state;
2259
2260 static int decode_interrupt_cb(void *ctx)
2261 {
2262     return global_video_state && global_video_state->abort_request;
2263 }
2264
2265 /* this thread gets the stream from the disk or the network */
2266 static int decode_thread(void *arg)
2267 {
2268     VideoState *is = arg;
2269     AVFormatContext *ic = NULL;
2270     int err, i, ret;
2271     int st_index[AVMEDIA_TYPE_NB];
2272     AVPacket pkt1, *pkt = &pkt1;
2273     int eof = 0;
2274     int pkt_in_play_range = 0;
2275     AVDictionaryEntry *t;
2276     AVDictionary **opts;
2277     int orig_nb_streams;
2278
2279     memset(st_index, -1, sizeof(st_index));
2280     is->video_stream = -1;
2281     is->audio_stream = -1;
2282     is->subtitle_stream = -1;
2283
2284     global_video_state = is;
2285
2286     ic = avformat_alloc_context();
2287     ic->interrupt_callback.callback = decode_interrupt_cb;
2288     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2289     if (err < 0) {
2290         print_error(is->filename, err);
2291         ret = -1;
2292         goto fail;
2293     }
2294     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2295         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2296         ret = AVERROR_OPTION_NOT_FOUND;
2297         goto fail;
2298     }
2299     is->ic = ic;
2300
2301     if (genpts)
2302         ic->flags |= AVFMT_FLAG_GENPTS;
2303
2304     opts = setup_find_stream_info_opts(ic, codec_opts);
2305     orig_nb_streams = ic->nb_streams;
2306
2307     err = avformat_find_stream_info(ic, opts);
2308     if (err < 0) {
2309         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2310         ret = -1;
2311         goto fail;
2312     }
2313     for (i = 0; i < orig_nb_streams; i++)
2314         av_dict_free(&opts[i]);
2315     av_freep(&opts);
2316
2317     if (ic->pb)
2318         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2319
2320     if (seek_by_bytes < 0)
2321         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2322
2323     /* if seeking requested, we execute it */
2324     if (start_time != AV_NOPTS_VALUE) {
2325         int64_t timestamp;
2326
2327         timestamp = start_time;
2328         /* add the stream start time */
2329         if (ic->start_time != AV_NOPTS_VALUE)
2330             timestamp += ic->start_time;
2331         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2332         if (ret < 0) {
2333             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2334                     is->filename, (double)timestamp / AV_TIME_BASE);
2335         }
2336     }
2337
2338     for (i = 0; i < ic->nb_streams; i++)
2339         ic->streams[i]->discard = AVDISCARD_ALL;
2340     if (!video_disable)
2341         st_index[AVMEDIA_TYPE_VIDEO] =
2342             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2343                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2344     if (!audio_disable)
2345         st_index[AVMEDIA_TYPE_AUDIO] =
2346             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2347                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2348                                 st_index[AVMEDIA_TYPE_VIDEO],
2349                                 NULL, 0);
2350     if (!video_disable)
2351         st_index[AVMEDIA_TYPE_SUBTITLE] =
2352             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2353                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2354                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2355                                  st_index[AVMEDIA_TYPE_AUDIO] :
2356                                  st_index[AVMEDIA_TYPE_VIDEO]),
2357                                 NULL, 0);
2358     if (show_status) {
2359         av_dump_format(ic, 0, is->filename, 0);
2360     }
2361
2362     /* open the streams */
2363     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2364         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2365     }
2366
2367     ret = -1;
2368     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2369         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2370     }
2371     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2372     if (ret < 0) {
2373         if (!display_disable)
2374             is->show_audio = 2;
2375     }
2376
2377     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2378         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2379     }
2380
2381     if (is->video_stream < 0 && is->audio_stream < 0) {
2382         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2383         ret = -1;
2384         goto fail;
2385     }
2386
2387     for (;;) {
2388         if (is->abort_request)
2389             break;
2390         if (is->paused != is->last_paused) {
2391             is->last_paused = is->paused;
2392             if (is->paused)
2393                 is->read_pause_return = av_read_pause(ic);
2394             else
2395                 av_read_play(ic);
2396         }
2397 #if CONFIG_RTSP_DEMUXER
2398         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2399             /* wait 10 ms to avoid trying to get another packet */
2400             /* XXX: horrible */
2401             SDL_Delay(10);
2402             continue;
2403         }
2404 #endif
2405         if (is->seek_req) {
2406             int64_t seek_target = is->seek_pos;
2407             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2408             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2409 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2410 //      of the seek_pos/seek_rel variables
2411
2412             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2413             if (ret < 0) {
2414                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2415             } else {
2416                 if (is->audio_stream >= 0) {
2417                     packet_queue_flush(&is->audioq);
2418                     packet_queue_put(&is->audioq, &flush_pkt);
2419                 }
2420                 if (is->subtitle_stream >= 0) {
2421                     packet_queue_flush(&is->subtitleq);
2422                     packet_queue_put(&is->subtitleq, &flush_pkt);
2423                 }
2424                 if (is->video_stream >= 0) {
2425                     packet_queue_flush(&is->videoq);
2426                     packet_queue_put(&is->videoq, &flush_pkt);
2427                 }
2428             }
2429             is->seek_req = 0;
2430             eof = 0;
2431         }
2432
2433         /* if the queue are full, no need to read more */
2434         if (!infinite_buffer &&
2435               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2436             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2437                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2438                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2439             /* wait 10 ms */
2440             SDL_Delay(10);
2441             continue;
2442         }
2443         if (eof) {
2444             if (is->video_stream >= 0) {
2445                 av_init_packet(pkt);
2446                 pkt->data = NULL;
2447                 pkt->size = 0;
2448                 pkt->stream_index = is->video_stream;
2449                 packet_queue_put(&is->videoq, pkt);
2450             }
2451             if (is->audio_stream >= 0 &&
2452                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2453                 av_init_packet(pkt);
2454                 pkt->data = NULL;
2455                 pkt->size = 0;
2456                 pkt->stream_index = is->audio_stream;
2457                 packet_queue_put(&is->audioq, pkt);
2458             }
2459             SDL_Delay(10);
2460             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2461                 if (loop != 1 && (!loop || --loop)) {
2462                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2463                 } else if (autoexit) {
2464                     ret = AVERROR_EOF;
2465                     goto fail;
2466                 }
2467             }
2468             continue;
2469         }
2470         ret = av_read_frame(ic, pkt);
2471         if (ret < 0) {
2472             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2473                 eof = 1;
2474             if (ic->pb && ic->pb->error)
2475                 break;
2476             SDL_Delay(100); /* wait for user event */
2477             continue;
2478         }
2479         /* check if packet is in play range specified by user, then queue, otherwise discard */
2480         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2481                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2482                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2483                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2484                 <= ((double)duration / 1000000);
2485         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2486             packet_queue_put(&is->audioq, pkt);
2487         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2488             packet_queue_put(&is->videoq, pkt);
2489         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2490             packet_queue_put(&is->subtitleq, pkt);
2491         } else {
2492             av_free_packet(pkt);
2493         }
2494     }
2495     /* wait until the end */
2496     while (!is->abort_request) {
2497         SDL_Delay(100);
2498     }
2499
2500     ret = 0;
2501  fail:
2502     /* disable interrupting */
2503     global_video_state = NULL;
2504
2505     /* close each stream */
2506     if (is->audio_stream >= 0)
2507         stream_component_close(is, is->audio_stream);
2508     if (is->video_stream >= 0)
2509         stream_component_close(is, is->video_stream);
2510     if (is->subtitle_stream >= 0)
2511         stream_component_close(is, is->subtitle_stream);
2512     if (is->ic) {
2513         avformat_close_input(&is->ic);
2514     }
2515
2516     if (ret != 0) {
2517         SDL_Event event;
2518
2519         event.type = FF_QUIT_EVENT;
2520         event.user.data1 = is;
2521         SDL_PushEvent(&event);
2522     }
2523     return 0;
2524 }
2525
2526 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2527 {
2528     VideoState *is;
2529
2530     is = av_mallocz(sizeof(VideoState));
2531     if (!is)
2532         return NULL;
2533     av_strlcpy(is->filename, filename, sizeof(is->filename));
2534     is->iformat = iformat;
2535     is->ytop    = 0;
2536     is->xleft   = 0;
2537
2538     /* start video display */
2539     is->pictq_mutex = SDL_CreateMutex();
2540     is->pictq_cond  = SDL_CreateCond();
2541
2542     is->subpq_mutex = SDL_CreateMutex();
2543     is->subpq_cond  = SDL_CreateCond();
2544
2545     is->av_sync_type = av_sync_type;
2546     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2547     if (!is->parse_tid) {
2548         av_free(is);
2549         return NULL;
2550     }
2551     return is;
2552 }
2553
2554 static void stream_cycle_channel(VideoState *is, int codec_type)
2555 {
2556     AVFormatContext *ic = is->ic;
2557     int start_index, stream_index;
2558     AVStream *st;
2559
2560     if (codec_type == AVMEDIA_TYPE_VIDEO)
2561         start_index = is->video_stream;
2562     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2563         start_index = is->audio_stream;
2564     else
2565         start_index = is->subtitle_stream;
2566     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2567         return;
2568     stream_index = start_index;
2569     for (;;) {
2570         if (++stream_index >= is->ic->nb_streams)
2571         {
2572             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2573             {
2574                 stream_index = -1;
2575                 goto the_end;
2576             } else
2577                 stream_index = 0;
2578         }
2579         if (stream_index == start_index)
2580             return;
2581         st = ic->streams[stream_index];
2582         if (st->codec->codec_type == codec_type) {
2583             /* check that parameters are OK */
2584             switch (codec_type) {
2585             case AVMEDIA_TYPE_AUDIO:
2586                 if (st->codec->sample_rate != 0 &&
2587                     st->codec->channels != 0)
2588                     goto the_end;
2589                 break;
2590             case AVMEDIA_TYPE_VIDEO:
2591             case AVMEDIA_TYPE_SUBTITLE:
2592                 goto the_end;
2593             default:
2594                 break;
2595             }
2596         }
2597     }
2598  the_end:
2599     stream_component_close(is, start_index);
2600     stream_component_open(is, stream_index);
2601 }
2602
2603
2604 static void toggle_full_screen(void)
2605 {
2606 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2607     /* OS X needs to empty the picture_queue */
2608     int i;
2609     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2610         cur_stream->pictq[i].reallocate = 1;
2611 #endif
2612     is_full_screen = !is_full_screen;
2613     video_open(cur_stream);
2614 }
2615
2616 static void toggle_pause(void)
2617 {
2618     if (cur_stream)
2619         stream_pause(cur_stream);
2620     step = 0;
2621 }
2622
2623 static void step_to_next_frame(void)
2624 {
2625     if (cur_stream) {
2626         /* if the stream is paused unpause it, then step */
2627         if (cur_stream->paused)
2628             stream_pause(cur_stream);
2629     }
2630     step = 1;
2631 }
2632
2633 static void toggle_audio_display(void)
2634 {
2635     if (cur_stream) {
2636         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2637         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2638         fill_rectangle(screen,
2639                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2640                        bgcolor);
2641         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2642     }
2643 }
2644
2645 /* handle an event sent by the GUI */
2646 static void event_loop(void)
2647 {
2648     SDL_Event event;
2649     double incr, pos, frac;
2650
2651     for (;;) {
2652         double x;
2653         SDL_WaitEvent(&event);
2654         switch (event.type) {
2655         case SDL_KEYDOWN:
2656             if (exit_on_keydown) {
2657                 do_exit();
2658                 break;
2659             }
2660             switch (event.key.keysym.sym) {
2661             case SDLK_ESCAPE:
2662             case SDLK_q:
2663                 do_exit();
2664                 break;
2665             case SDLK_f:
2666                 toggle_full_screen();
2667                 break;
2668             case SDLK_p:
2669             case SDLK_SPACE:
2670                 toggle_pause();
2671                 break;
2672             case SDLK_s: // S: Step to next frame
2673                 step_to_next_frame();
2674                 break;
2675             case SDLK_a:
2676                 if (cur_stream)
2677                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2678                 break;
2679             case SDLK_v:
2680                 if (cur_stream)
2681                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2682                 break;
2683             case SDLK_t:
2684                 if (cur_stream)
2685                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2686                 break;
2687             case SDLK_w:
2688                 toggle_audio_display();
2689                 break;
2690             case SDLK_LEFT:
2691                 incr = -10.0;
2692                 goto do_seek;
2693             case SDLK_RIGHT:
2694                 incr = 10.0;
2695                 goto do_seek;
2696             case SDLK_UP:
2697                 incr = 60.0;
2698                 goto do_seek;
2699             case SDLK_DOWN:
2700                 incr = -60.0;
2701             do_seek:
2702                 if (cur_stream) {
2703                     if (seek_by_bytes) {
2704                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2705                             pos = cur_stream->video_current_pos;
2706                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2707                             pos = cur_stream->audio_pkt.pos;
2708                         } else
2709                             pos = avio_tell(cur_stream->ic->pb);
2710                         if (cur_stream->ic->bit_rate)
2711                             incr *= cur_stream->ic->bit_rate / 8.0;
2712                         else
2713                             incr *= 180000.0;
2714                         pos += incr;
2715                         stream_seek(cur_stream, pos, incr, 1);
2716                     } else {
2717                         pos = get_master_clock(cur_stream);
2718                         pos += incr;
2719                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2720                     }
2721                 }
2722                 break;
2723             default:
2724                 break;
2725             }
2726             break;
2727         case SDL_MOUSEBUTTONDOWN:
2728             if (exit_on_mousedown) {
2729                 do_exit();
2730                 break;
2731             }
2732         case SDL_MOUSEMOTION:
2733             if (event.type == SDL_MOUSEBUTTONDOWN) {
2734                 x = event.button.x;
2735             } else {
2736                 if (event.motion.state != SDL_PRESSED)
2737                     break;
2738                 x = event.motion.x;
2739             }
2740             if (cur_stream) {
2741                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2742                     uint64_t size =  avio_size(cur_stream->ic->pb);
2743                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2744                 } else {
2745                     int64_t ts;
2746                     int ns, hh, mm, ss;
2747                     int tns, thh, tmm, tss;
2748                     tns  = cur_stream->ic->duration / 1000000LL;
2749                     thh  = tns / 3600;
2750                     tmm  = (tns % 3600) / 60;
2751                     tss  = (tns % 60);
2752                     frac = x / cur_stream->width;
2753                     ns   = frac * tns;
2754                     hh   = ns / 3600;
2755                     mm   = (ns % 3600) / 60;
2756                     ss   = (ns % 60);
2757                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2758                             hh, mm, ss, thh, tmm, tss);
2759                     ts = frac * cur_stream->ic->duration;
2760                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2761                         ts += cur_stream->ic->start_time;
2762                     stream_seek(cur_stream, ts, 0, 0);
2763                 }
2764             }
2765             break;
2766         case SDL_VIDEORESIZE:
2767             if (cur_stream) {
2768                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2769                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2770                 screen_width  = cur_stream->width  = event.resize.w;
2771                 screen_height = cur_stream->height = event.resize.h;
2772             }
2773             break;
2774         case SDL_QUIT:
2775         case FF_QUIT_EVENT:
2776             do_exit();
2777             break;
2778         case FF_ALLOC_EVENT:
2779             video_open(event.user.data1);
2780             alloc_picture(event.user.data1);
2781             break;
2782         case FF_REFRESH_EVENT:
2783             video_refresh_timer(event.user.data1);
2784             cur_stream->refresh = 0;
2785             break;
2786         default:
2787             break;
2788         }
2789     }
2790 }
2791
2792 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2793 {
2794     av_log(NULL, AV_LOG_ERROR,
2795            "Option '%s' has been removed, use private format options instead\n", opt);
2796     return AVERROR(EINVAL);
2797 }
2798
2799 static int opt_width(void *optctx, const char *opt, const char *arg)
2800 {
2801     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2802     return 0;
2803 }
2804
2805 static int opt_height(void *optctx, const char *opt, const char *arg)
2806 {
2807     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2808     return 0;
2809 }
2810
2811 static int opt_format(void *optctx, const char *opt, const char *arg)
2812 {
2813     file_iformat = av_find_input_format(arg);
2814     if (!file_iformat) {
2815         fprintf(stderr, "Unknown input format: %s\n", arg);
2816         return AVERROR(EINVAL);
2817     }
2818     return 0;
2819 }
2820
2821 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2822 {
2823     av_log(NULL, AV_LOG_ERROR,
2824            "Option '%s' has been removed, use private format options instead\n", opt);
2825     return AVERROR(EINVAL);
2826 }
2827
2828 static int opt_sync(void *optctx, const char *opt, const char *arg)
2829 {
2830     if (!strcmp(arg, "audio"))
2831         av_sync_type = AV_SYNC_AUDIO_MASTER;
2832     else if (!strcmp(arg, "video"))
2833         av_sync_type = AV_SYNC_VIDEO_MASTER;
2834     else if (!strcmp(arg, "ext"))
2835         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2836     else {
2837         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2838         exit(1);
2839     }
2840     return 0;
2841 }
2842
2843 static int opt_seek(void *optctx, const char *opt, const char *arg)
2844 {
2845     start_time = parse_time_or_die(opt, arg, 1);
2846     return 0;
2847 }
2848
2849 static int opt_duration(void *optctx, const char *opt, const char *arg)
2850 {
2851     duration = parse_time_or_die(opt, arg, 1);
2852     return 0;
2853 }
2854
2855 static int opt_debug(void *optctx, const char *opt, const char *arg)
2856 {
2857     av_log_set_level(99);
2858     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2859     return 0;
2860 }
2861
2862 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2863 {
2864     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2865     return 0;
2866 }
2867
2868 static const OptionDef options[] = {
2869 #include "cmdutils_common_opts.h"
2870     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2871     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2872     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2873     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2874     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2875     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2876     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2877     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2878     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2879     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2880     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2881     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2882     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2883     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2884     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2885     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2886     { "debug", HAS_ARG | OPT_EXPERT, { .func_arg = opt_debug }, "print specific debug info", "" },
2887     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2888     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2889     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2890     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2891     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2892     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2893     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2894     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2895     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2896     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2897     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2898     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2899     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2900     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2901     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2902     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2903     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2904     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2905 #if CONFIG_AVFILTER
2906     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2907 #endif
2908     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2909     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2910     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2911     { NULL, },
2912 };
2913
2914 static void show_usage(void)
2915 {
2916     printf("Simple media player\n");
2917     printf("usage: %s [options] input_file\n", program_name);
2918     printf("\n");
2919 }
2920
2921 void show_help_default(const char *opt, const char *arg)
2922 {
2923     av_log_set_callback(log_callback_help);
2924     show_usage();
2925     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2926     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2927     printf("\n");
2928     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2929     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2930 #if !CONFIG_AVFILTER
2931     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2932 #endif
2933     printf("\nWhile playing:\n"
2934            "q, ESC              quit\n"
2935            "f                   toggle full screen\n"
2936            "p, SPC              pause\n"
2937            "a                   cycle audio channel\n"
2938            "v                   cycle video channel\n"
2939            "t                   cycle subtitle channel\n"
2940            "w                   show audio waves\n"
2941            "s                   activate frame-step mode\n"
2942            "left/right          seek backward/forward 10 seconds\n"
2943            "down/up             seek backward/forward 1 minute\n"
2944            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2945            );
2946 }
2947
2948 static void opt_input_file(void *optctx, const char *filename)
2949 {
2950     if (input_filename) {
2951         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2952                 filename, input_filename);
2953         exit(1);
2954     }
2955     if (!strcmp(filename, "-"))
2956         filename = "pipe:";
2957     input_filename = filename;
2958 }
2959
2960 /* Called from the main */
2961 int main(int argc, char **argv)
2962 {
2963     int flags;
2964
2965     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2966     parse_loglevel(argc, argv, options);
2967
2968     /* register all codecs, demux and protocols */
2969     avcodec_register_all();
2970 #if CONFIG_AVDEVICE
2971     avdevice_register_all();
2972 #endif
2973 #if CONFIG_AVFILTER
2974     avfilter_register_all();
2975 #endif
2976     av_register_all();
2977     avformat_network_init();
2978
2979     init_opts();
2980
2981     show_banner();
2982
2983     parse_options(NULL, argc, argv, options, opt_input_file);
2984
2985     if (!input_filename) {
2986         show_usage();
2987         fprintf(stderr, "An input file must be specified\n");
2988         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2989         exit(1);
2990     }
2991
2992     if (display_disable) {
2993         video_disable = 1;
2994     }
2995     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2996 #if !defined(__MINGW32__) && !defined(__APPLE__)
2997     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2998 #endif
2999     if (SDL_Init (flags)) {
3000         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3001         exit(1);
3002     }
3003
3004     if (!display_disable) {
3005 #if HAVE_SDL_VIDEO_SIZE
3006         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3007         fs_screen_width = vi->current_w;
3008         fs_screen_height = vi->current_h;
3009 #endif
3010     }
3011
3012     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3013     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3014     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3015
3016     av_init_packet(&flush_pkt);
3017     flush_pkt.data = "FLUSH";
3018
3019     cur_stream = stream_open(input_filename, file_iformat);
3020
3021     event_loop();
3022
3023     /* never returns */
3024
3025     return 0;
3026 }