]> git.sesse.net Git - ffmpeg/blob - avplay.c
avprobe: fix formatting.
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavresample/avresample.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 # include "libavfilter/buffersink.h"
45 #endif
46
47 #include "cmdutils.h"
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #ifdef __MINGW32__
53 #undef main /* We don't want SDL to override our main() */
54 #endif
55
56 #include <unistd.h>
57 #include <assert.h>
58
59 const char program_name[] = "avplay";
60 const int program_birth_year = 2003;
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     double target_clock;                         ///< av_gettime() time at which this should be displayed ideally
103     int64_t pos;                                 ///< byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     int reallocate;
108     enum PixelFormat pix_fmt;
109
110 #if CONFIG_AVFILTER
111     AVFilterBufferRef *picref;
112 #endif
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 enum {
121     AV_SYNC_AUDIO_MASTER, /* default choice */
122     AV_SYNC_VIDEO_MASTER,
123     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125
126 typedef struct VideoState {
127     SDL_Thread *parse_tid;
128     SDL_Thread *video_tid;
129     SDL_Thread *refresh_tid;
130     AVInputFormat *iformat;
131     int no_background;
132     int abort_request;
133     int paused;
134     int last_paused;
135     int seek_req;
136     int seek_flags;
137     int64_t seek_pos;
138     int64_t seek_rel;
139     int read_pause_return;
140     AVFormatContext *ic;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
157     uint8_t *audio_buf;
158     uint8_t *audio_buf1;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat sdl_sample_fmt;
164     uint64_t sdl_channel_layout;
165     int sdl_channels;
166     enum AVSampleFormat resample_sample_fmt;
167     uint64_t resample_channel_layout;
168     AVAudioResampleContext *avr;
169     AVFrame *frame;
170
171     int show_audio; /* if true, display audio samples */
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///< current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     //    QETimer *video_timer;
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212     PtsCorrectionContext pts_ctx;
213
214 #if CONFIG_AVFILTER
215     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
216 #endif
217
218     float skip_frames;
219     float skip_frames_index;
220     int refresh;
221 } VideoState;
222
223 static void show_help(void);
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width  = 0;
232 static int screen_height = 0;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB] = {
236     [AVMEDIA_TYPE_AUDIO]    = -1,
237     [AVMEDIA_TYPE_VIDEO]    = -1,
238     [AVMEDIA_TYPE_SUBTITLE] = -1,
239 };
240 static int seek_by_bytes = -1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts = -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop = 1;
262 static int framedrop = 1;
263
264 static int rdftspeed = 20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 void exit_program(int ret)
283 {
284     exit(ret);
285 }
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for (;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414
415 #define RGBA_IN(r, g, b, a, s)\
416 {\
417     unsigned int v = ((const uint32_t *)(s))[0];\
418     a = (v >> 24) & 0xff;\
419     r = (v >> 16) & 0xff;\
420     g = (v >> 8) & 0xff;\
421     b = v & 0xff;\
422 }
423
424 #define YUVA_IN(y, u, v, a, s, pal)\
425 {\
426     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427     a = (val >> 24) & 0xff;\
428     y = (val >> 16) & 0xff;\
429     u = (val >> 8) & 0xff;\
430     v = val & 0xff;\
431 }
432
433 #define YUVA_OUT(d, y, u, v, a)\
434 {\
435     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436 }
437
438
439 #define BPP 1
440
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442 {
443     int wrap, wrap3, width2, skip2;
444     int y, u, v, a, u1, v1, a1, w, h;
445     uint8_t *lum, *cb, *cr;
446     const uint8_t *p;
447     const uint32_t *pal;
448     int dstx, dsty, dstw, dsth;
449
450     dstw = av_clip(rect->w, 0, imgw);
451     dsth = av_clip(rect->h, 0, imgh);
452     dstx = av_clip(rect->x, 0, imgw - dstw);
453     dsty = av_clip(rect->y, 0, imgh - dsth);
454     lum = dst->data[0] + dsty * dst->linesize[0];
455     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457
458     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459     skip2 = dstx >> 1;
460     wrap = dst->linesize[0];
461     wrap3 = rect->pict.linesize[0];
462     p = rect->pict.data[0];
463     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
464
465     if (dsty & 1) {
466         lum += dstx;
467         cb += skip2;
468         cr += skip2;
469
470         if (dstx & 1) {
471             YUVA_IN(y, u, v, a, p, pal);
472             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475             cb++;
476             cr++;
477             lum++;
478             p += BPP;
479         }
480         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
481             YUVA_IN(y, u, v, a, p, pal);
482             u1 = u;
483             v1 = v;
484             a1 = a;
485             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
487             YUVA_IN(y, u, v, a, p + BPP, pal);
488             u1 += u;
489             v1 += v;
490             a1 += a;
491             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494             cb++;
495             cr++;
496             p += 2 * BPP;
497             lum += 2;
498         }
499         if (w) {
500             YUVA_IN(y, u, v, a, p, pal);
501             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504             p++;
505             lum++;
506         }
507         p += wrap3 - dstw * BPP;
508         lum += wrap - dstw - dstx;
509         cb += dst->linesize[1] - width2 - skip2;
510         cr += dst->linesize[2] - width2 - skip2;
511     }
512     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
513         lum += dstx;
514         cb += skip2;
515         cr += skip2;
516
517         if (dstx & 1) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             p += wrap3;
524             lum += wrap;
525             YUVA_IN(y, u, v, a, p, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += -wrap3 + BPP;
535             lum += -wrap + 1;
536         }
537         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
538             YUVA_IN(y, u, v, a, p, pal);
539             u1 = u;
540             v1 = v;
541             a1 = a;
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544             YUVA_IN(y, u, v, a, p + BPP, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549             p += wrap3;
550             lum += wrap;
551
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
558             YUVA_IN(y, u, v, a, p + BPP, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566
567             cb++;
568             cr++;
569             p += -wrap3 + 2 * BPP;
570             lum += -wrap + 2;
571         }
572         if (w) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             p += wrap3;
579             lum += wrap;
580             YUVA_IN(y, u, v, a, p, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587             cb++;
588             cr++;
589             p += -wrap3 + BPP;
590             lum += -wrap + 1;
591         }
592         p += wrap3 + (wrap3 - dstw * BPP);
593         lum += wrap + (wrap - dstw - dstx);
594         cb += dst->linesize[1] - width2 - skip2;
595         cr += dst->linesize[2] - width2 - skip2;
596     }
597     /* handle odd height */
598     if (h) {
599         lum += dstx;
600         cb += skip2;
601         cr += skip2;
602
603         if (dstx & 1) {
604             YUVA_IN(y, u, v, a, p, pal);
605             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608             cb++;
609             cr++;
610             lum++;
611             p += BPP;
612         }
613         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
620             YUVA_IN(y, u, v, a, p + BPP, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627             cb++;
628             cr++;
629             p += 2 * BPP;
630             lum += 2;
631         }
632         if (w) {
633             YUVA_IN(y, u, v, a, p, pal);
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637         }
638     }
639 }
640
641 static void free_subpicture(SubPicture *sp)
642 {
643     avsubtitle_free(&sp->sub);
644 }
645
646 static void video_image_display(VideoState *is)
647 {
648     VideoPicture *vp;
649     SubPicture *sp;
650     AVPicture pict;
651     float aspect_ratio;
652     int width, height, x, y;
653     SDL_Rect rect;
654     int i;
655
656     vp = &is->pictq[is->pictq_rindex];
657     if (vp->bmp) {
658 #if CONFIG_AVFILTER
659          if (vp->picref->video->pixel_aspect.num == 0)
660              aspect_ratio = 0;
661          else
662              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
663 #else
664
665         /* XXX: use variable in the frame */
666         if (is->video_st->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
668         else if (is->video_st->codec->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
670         else
671             aspect_ratio = 0;
672 #endif
673         if (aspect_ratio <= 0.0)
674             aspect_ratio = 1.0;
675         aspect_ratio *= (float)vp->width / (float)vp->height;
676
677         if (is->subtitle_st)
678         {
679             if (is->subpq_size > 0)
680             {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
684                 {
685                     SDL_LockYUVOverlay (vp->bmp);
686
687                     pict.data[0] = vp->bmp->pixels[0];
688                     pict.data[1] = vp->bmp->pixels[2];
689                     pict.data[2] = vp->bmp->pixels[1];
690
691                     pict.linesize[0] = vp->bmp->pitches[0];
692                     pict.linesize[1] = vp->bmp->pitches[2];
693                     pict.linesize[2] = vp->bmp->pitches[1];
694
695                     for (i = 0; i < sp->sub.num_rects; i++)
696                         blend_subrect(&pict, sp->sub.rects[i],
697                                       vp->bmp->w, vp->bmp->h);
698
699                     SDL_UnlockYUVOverlay (vp->bmp);
700                 }
701             }
702         }
703
704
705         /* XXX: we suppose the screen has a 1.0 pixel ratio */
706         height = is->height;
707         width = ((int)rint(height * aspect_ratio)) & ~1;
708         if (width > is->width) {
709             width = is->width;
710             height = ((int)rint(width / aspect_ratio)) & ~1;
711         }
712         x = (is->width - width) / 2;
713         y = (is->height - height) / 2;
714         is->no_background = 0;
715         rect.x = is->xleft + x;
716         rect.y = is->ytop  + y;
717         rect.w = width;
718         rect.h = height;
719         SDL_DisplayYUVOverlay(vp->bmp, &rect);
720     }
721 }
722
723 /* get the current audio output buffer size, in samples. With SDL, we
724    cannot have a precise information */
725 static int audio_write_get_buf_size(VideoState *is)
726 {
727     return is->audio_buf_size - is->audio_buf_index;
728 }
729
730 static inline int compute_mod(int a, int b)
731 {
732     a = a % b;
733     if (a >= 0)
734         return a;
735     else
736         return a + b;
737 }
738
739 static void video_audio_display(VideoState *s)
740 {
741     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
742     int ch, channels, h, h2, bgcolor, fgcolor;
743     int16_t time_diff;
744     int rdft_bits, nb_freq;
745
746     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
747         ;
748     nb_freq = 1 << (rdft_bits - 1);
749
750     /* compute display index : center on currently output samples */
751     channels = s->sdl_channels;
752     nb_display_channels = channels;
753     if (!s->paused) {
754         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
755         n = 2 * channels;
756         delay = audio_write_get_buf_size(s);
757         delay /= n;
758
759         /* to be more precise, we take into account the time spent since
760            the last buffer computation */
761         if (audio_callback_time) {
762             time_diff = av_gettime() - audio_callback_time;
763             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
764         }
765
766         delay += 2 * data_used;
767         if (delay < data_used)
768             delay = data_used;
769
770         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
771         if (s->show_audio == 1) {
772             h = INT_MIN;
773             for (i = 0; i < 1000; i += channels) {
774                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
775                 int a = s->sample_array[idx];
776                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
777                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
778                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
779                 int score = a - d;
780                 if (h < score && (b ^ c) < 0) {
781                     h = score;
782                     i_start = idx;
783                 }
784             }
785         }
786
787         s->last_i_start = i_start;
788     } else {
789         i_start = s->last_i_start;
790     }
791
792     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
793     if (s->show_audio == 1) {
794         fill_rectangle(screen,
795                        s->xleft, s->ytop, s->width, s->height,
796                        bgcolor);
797
798         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
799
800         /* total height for one channel */
801         h = s->height / nb_display_channels;
802         /* graph height / 2 */
803         h2 = (h * 9) / 20;
804         for (ch = 0; ch < nb_display_channels; ch++) {
805             i = i_start + ch;
806             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
807             for (x = 0; x < s->width; x++) {
808                 y = (s->sample_array[i] * h2) >> 15;
809                 if (y < 0) {
810                     y = -y;
811                     ys = y1 - y;
812                 } else {
813                     ys = y1;
814                 }
815                 fill_rectangle(screen,
816                                s->xleft + x, ys, 1, y,
817                                fgcolor);
818                 i += channels;
819                 if (i >= SAMPLE_ARRAY_SIZE)
820                     i -= SAMPLE_ARRAY_SIZE;
821             }
822         }
823
824         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
825
826         for (ch = 1; ch < nb_display_channels; ch++) {
827             y = s->ytop + ch * h;
828             fill_rectangle(screen,
829                            s->xleft, y, s->width, 1,
830                            fgcolor);
831         }
832         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
833     } else {
834         nb_display_channels= FFMIN(nb_display_channels, 2);
835         if (rdft_bits != s->rdft_bits) {
836             av_rdft_end(s->rdft);
837             av_free(s->rdft_data);
838             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
839             s->rdft_bits = rdft_bits;
840             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
841         }
842         {
843             FFTSample *data[2];
844             for (ch = 0; ch < nb_display_channels; ch++) {
845                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
846                 i = i_start + ch;
847                 for (x = 0; x < 2 * nb_freq; x++) {
848                     double w = (x-nb_freq) * (1.0 / nb_freq);
849                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
850                     i += channels;
851                     if (i >= SAMPLE_ARRAY_SIZE)
852                         i -= SAMPLE_ARRAY_SIZE;
853                 }
854                 av_rdft_calc(s->rdft, data[ch]);
855             }
856             // least efficient way to do this, we should of course directly access it but its more than fast enough
857             for (y = 0; y < s->height; y++) {
858                 double w = 1 / sqrt(nb_freq);
859                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
860                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
861                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
862                 a = FFMIN(a, 255);
863                 b = FFMIN(b, 255);
864                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
865
866                 fill_rectangle(screen,
867                             s->xpos, s->height-y, 1, 1,
868                             fgcolor);
869             }
870         }
871         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
872         s->xpos++;
873         if (s->xpos >= s->width)
874             s->xpos= s->xleft;
875     }
876 }
877
878 static int video_open(VideoState *is)
879 {
880     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
881     int w,h;
882
883     if (is_full_screen) flags |= SDL_FULLSCREEN;
884     else                flags |= SDL_RESIZABLE;
885
886     if (is_full_screen && fs_screen_width) {
887         w = fs_screen_width;
888         h = fs_screen_height;
889     } else if (!is_full_screen && screen_width) {
890         w = screen_width;
891         h = screen_height;
892 #if CONFIG_AVFILTER
893     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
894         w = is->out_video_filter->inputs[0]->w;
895         h = is->out_video_filter->inputs[0]->h;
896 #else
897     } else if (is->video_st && is->video_st->codec->width) {
898         w = is->video_st->codec->width;
899         h = is->video_st->codec->height;
900 #endif
901     } else {
902         w = 640;
903         h = 480;
904     }
905     if (screen && is->width == screen->w && screen->w == w
906        && is->height== screen->h && screen->h == h)
907         return 0;
908
909 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
910     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
911     screen = SDL_SetVideoMode(w, h, 24, flags);
912 #else
913     screen = SDL_SetVideoMode(w, h, 0, flags);
914 #endif
915     if (!screen) {
916         fprintf(stderr, "SDL: could not set video mode - exiting\n");
917         return -1;
918     }
919     if (!window_title)
920         window_title = input_filename;
921     SDL_WM_SetCaption(window_title, window_title);
922
923     is->width  = screen->w;
924     is->height = screen->h;
925
926     return 0;
927 }
928
929 /* display the current picture, if any */
930 static void video_display(VideoState *is)
931 {
932     if (!screen)
933         video_open(cur_stream);
934     if (is->audio_st && is->show_audio)
935         video_audio_display(is);
936     else if (is->video_st)
937         video_image_display(is);
938 }
939
940 static int refresh_thread(void *opaque)
941 {
942     VideoState *is= opaque;
943     while (!is->abort_request) {
944         SDL_Event event;
945         event.type = FF_REFRESH_EVENT;
946         event.user.data1 = opaque;
947         if (!is->refresh) {
948             is->refresh = 1;
949             SDL_PushEvent(&event);
950         }
951         usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
952     }
953     return 0;
954 }
955
956 /* get the current audio clock value */
957 static double get_audio_clock(VideoState *is)
958 {
959     double pts;
960     int hw_buf_size, bytes_per_sec;
961     pts = is->audio_clock;
962     hw_buf_size = audio_write_get_buf_size(is);
963     bytes_per_sec = 0;
964     if (is->audio_st) {
965         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
966                         av_get_bytes_per_sample(is->sdl_sample_fmt);
967     }
968     if (bytes_per_sec)
969         pts -= (double)hw_buf_size / bytes_per_sec;
970     return pts;
971 }
972
973 /* get the current video clock value */
974 static double get_video_clock(VideoState *is)
975 {
976     if (is->paused) {
977         return is->video_current_pts;
978     } else {
979         return is->video_current_pts_drift + av_gettime() / 1000000.0;
980     }
981 }
982
983 /* get the current external clock value */
984 static double get_external_clock(VideoState *is)
985 {
986     int64_t ti;
987     ti = av_gettime();
988     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
989 }
990
991 /* get the current master clock value */
992 static double get_master_clock(VideoState *is)
993 {
994     double val;
995
996     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
997         if (is->video_st)
998             val = get_video_clock(is);
999         else
1000             val = get_audio_clock(is);
1001     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1002         if (is->audio_st)
1003             val = get_audio_clock(is);
1004         else
1005             val = get_video_clock(is);
1006     } else {
1007         val = get_external_clock(is);
1008     }
1009     return val;
1010 }
1011
1012 /* seek in the stream */
1013 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1014 {
1015     if (!is->seek_req) {
1016         is->seek_pos = pos;
1017         is->seek_rel = rel;
1018         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1019         if (seek_by_bytes)
1020             is->seek_flags |= AVSEEK_FLAG_BYTE;
1021         is->seek_req = 1;
1022     }
1023 }
1024
1025 /* pause or resume the video */
1026 static void stream_pause(VideoState *is)
1027 {
1028     if (is->paused) {
1029         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1030         if (is->read_pause_return != AVERROR(ENOSYS)) {
1031             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1032         }
1033         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1034     }
1035     is->paused = !is->paused;
1036 }
1037
1038 static double compute_target_time(double frame_current_pts, VideoState *is)
1039 {
1040     double delay, sync_threshold, diff;
1041
1042     /* compute nominal delay */
1043     delay = frame_current_pts - is->frame_last_pts;
1044     if (delay <= 0 || delay >= 10.0) {
1045         /* if incorrect delay, use previous one */
1046         delay = is->frame_last_delay;
1047     } else {
1048         is->frame_last_delay = delay;
1049     }
1050     is->frame_last_pts = frame_current_pts;
1051
1052     /* update delay to follow master synchronisation source */
1053     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1054          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1055         /* if video is slave, we try to correct big delays by
1056            duplicating or deleting a frame */
1057         diff = get_video_clock(is) - get_master_clock(is);
1058
1059         /* skip or repeat frame. We take into account the
1060            delay to compute the threshold. I still don't know
1061            if it is the best guess */
1062         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1063         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1064             if (diff <= -sync_threshold)
1065                 delay = 0;
1066             else if (diff >= sync_threshold)
1067                 delay = 2 * delay;
1068         }
1069     }
1070     is->frame_timer += delay;
1071
1072     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1073             delay, frame_current_pts, -diff);
1074
1075     return is->frame_timer;
1076 }
1077
1078 /* called to display each frame */
1079 static void video_refresh_timer(void *opaque)
1080 {
1081     VideoState *is = opaque;
1082     VideoPicture *vp;
1083
1084     SubPicture *sp, *sp2;
1085
1086     if (is->video_st) {
1087 retry:
1088         if (is->pictq_size == 0) {
1089             // nothing to do, no picture to display in the que
1090         } else {
1091             double time = av_gettime() / 1000000.0;
1092             double next_target;
1093             /* dequeue the picture */
1094             vp = &is->pictq[is->pictq_rindex];
1095
1096             if (time < vp->target_clock)
1097                 return;
1098             /* update current video pts */
1099             is->video_current_pts = vp->pts;
1100             is->video_current_pts_drift = is->video_current_pts - time;
1101             is->video_current_pos = vp->pos;
1102             if (is->pictq_size > 1) {
1103                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1104                 assert(nextvp->target_clock >= vp->target_clock);
1105                 next_target= nextvp->target_clock;
1106             } else {
1107                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1108             }
1109             if (framedrop && time > next_target) {
1110                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1111                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1112                     /* update queue size and signal for next picture */
1113                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114                         is->pictq_rindex = 0;
1115
1116                     SDL_LockMutex(is->pictq_mutex);
1117                     is->pictq_size--;
1118                     SDL_CondSignal(is->pictq_cond);
1119                     SDL_UnlockMutex(is->pictq_mutex);
1120                     goto retry;
1121                 }
1122             }
1123
1124             if (is->subtitle_st) {
1125                 if (is->subtitle_stream_changed) {
1126                     SDL_LockMutex(is->subpq_mutex);
1127
1128                     while (is->subpq_size) {
1129                         free_subpicture(&is->subpq[is->subpq_rindex]);
1130
1131                         /* update queue size and signal for next picture */
1132                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1133                             is->subpq_rindex = 0;
1134
1135                         is->subpq_size--;
1136                     }
1137                     is->subtitle_stream_changed = 0;
1138
1139                     SDL_CondSignal(is->subpq_cond);
1140                     SDL_UnlockMutex(is->subpq_mutex);
1141                 } else {
1142                     if (is->subpq_size > 0) {
1143                         sp = &is->subpq[is->subpq_rindex];
1144
1145                         if (is->subpq_size > 1)
1146                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1147                         else
1148                             sp2 = NULL;
1149
1150                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1151                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1152                         {
1153                             free_subpicture(sp);
1154
1155                             /* update queue size and signal for next picture */
1156                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1157                                 is->subpq_rindex = 0;
1158
1159                             SDL_LockMutex(is->subpq_mutex);
1160                             is->subpq_size--;
1161                             SDL_CondSignal(is->subpq_cond);
1162                             SDL_UnlockMutex(is->subpq_mutex);
1163                         }
1164                     }
1165                 }
1166             }
1167
1168             /* display picture */
1169             if (!display_disable)
1170                 video_display(is);
1171
1172             /* update queue size and signal for next picture */
1173             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1174                 is->pictq_rindex = 0;
1175
1176             SDL_LockMutex(is->pictq_mutex);
1177             is->pictq_size--;
1178             SDL_CondSignal(is->pictq_cond);
1179             SDL_UnlockMutex(is->pictq_mutex);
1180         }
1181     } else if (is->audio_st) {
1182         /* draw the next audio frame */
1183
1184         /* if only audio stream, then display the audio bars (better
1185            than nothing, just to test the implementation */
1186
1187         /* display picture */
1188         if (!display_disable)
1189             video_display(is);
1190     }
1191     if (show_status) {
1192         static int64_t last_time;
1193         int64_t cur_time;
1194         int aqsize, vqsize, sqsize;
1195         double av_diff;
1196
1197         cur_time = av_gettime();
1198         if (!last_time || (cur_time - last_time) >= 30000) {
1199             aqsize = 0;
1200             vqsize = 0;
1201             sqsize = 0;
1202             if (is->audio_st)
1203                 aqsize = is->audioq.size;
1204             if (is->video_st)
1205                 vqsize = is->videoq.size;
1206             if (is->subtitle_st)
1207                 sqsize = is->subtitleq.size;
1208             av_diff = 0;
1209             if (is->audio_st && is->video_st)
1210                 av_diff = get_audio_clock(is) - get_video_clock(is);
1211             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1212                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1213                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1214             fflush(stdout);
1215             last_time = cur_time;
1216         }
1217     }
1218 }
1219
1220 static void stream_close(VideoState *is)
1221 {
1222     VideoPicture *vp;
1223     int i;
1224     /* XXX: use a special url_shutdown call to abort parse cleanly */
1225     is->abort_request = 1;
1226     SDL_WaitThread(is->parse_tid, NULL);
1227     SDL_WaitThread(is->refresh_tid, NULL);
1228
1229     /* free all pictures */
1230     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1231         vp = &is->pictq[i];
1232 #if CONFIG_AVFILTER
1233         if (vp->picref) {
1234             avfilter_unref_buffer(vp->picref);
1235             vp->picref = NULL;
1236         }
1237 #endif
1238         if (vp->bmp) {
1239             SDL_FreeYUVOverlay(vp->bmp);
1240             vp->bmp = NULL;
1241         }
1242     }
1243     SDL_DestroyMutex(is->pictq_mutex);
1244     SDL_DestroyCond(is->pictq_cond);
1245     SDL_DestroyMutex(is->subpq_mutex);
1246     SDL_DestroyCond(is->subpq_cond);
1247 #if !CONFIG_AVFILTER
1248     if (is->img_convert_ctx)
1249         sws_freeContext(is->img_convert_ctx);
1250 #endif
1251     av_free(is);
1252 }
1253
1254 static void do_exit(void)
1255 {
1256     if (cur_stream) {
1257         stream_close(cur_stream);
1258         cur_stream = NULL;
1259     }
1260     uninit_opts();
1261 #if CONFIG_AVFILTER
1262     avfilter_uninit();
1263 #endif
1264     avformat_network_deinit();
1265     if (show_status)
1266         printf("\n");
1267     SDL_Quit();
1268     av_log(NULL, AV_LOG_QUIET, "");
1269     exit(0);
1270 }
1271
1272 /* allocate a picture (needs to do that in main thread to avoid
1273    potential locking problems */
1274 static void alloc_picture(void *opaque)
1275 {
1276     VideoState *is = opaque;
1277     VideoPicture *vp;
1278
1279     vp = &is->pictq[is->pictq_windex];
1280
1281     if (vp->bmp)
1282         SDL_FreeYUVOverlay(vp->bmp);
1283
1284 #if CONFIG_AVFILTER
1285     if (vp->picref)
1286         avfilter_unref_buffer(vp->picref);
1287     vp->picref = NULL;
1288
1289     vp->width   = is->out_video_filter->inputs[0]->w;
1290     vp->height  = is->out_video_filter->inputs[0]->h;
1291     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1292 #else
1293     vp->width   = is->video_st->codec->width;
1294     vp->height  = is->video_st->codec->height;
1295     vp->pix_fmt = is->video_st->codec->pix_fmt;
1296 #endif
1297
1298     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1299                                    SDL_YV12_OVERLAY,
1300                                    screen);
1301     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1302         /* SDL allocates a buffer smaller than requested if the video
1303          * overlay hardware is unable to support the requested size. */
1304         fprintf(stderr, "Error: the video system does not support an image\n"
1305                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1306                         "to reduce the image size.\n", vp->width, vp->height );
1307         do_exit();
1308     }
1309
1310     SDL_LockMutex(is->pictq_mutex);
1311     vp->allocated = 1;
1312     SDL_CondSignal(is->pictq_cond);
1313     SDL_UnlockMutex(is->pictq_mutex);
1314 }
1315
1316 /**
1317  *
1318  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1319  */
1320 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1321 {
1322     VideoPicture *vp;
1323 #if CONFIG_AVFILTER
1324     AVPicture pict_src;
1325 #else
1326     int dst_pix_fmt = PIX_FMT_YUV420P;
1327 #endif
1328     /* wait until we have space to put a new picture */
1329     SDL_LockMutex(is->pictq_mutex);
1330
1331     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1332         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1333
1334     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1335            !is->videoq.abort_request) {
1336         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1337     }
1338     SDL_UnlockMutex(is->pictq_mutex);
1339
1340     if (is->videoq.abort_request)
1341         return -1;
1342
1343     vp = &is->pictq[is->pictq_windex];
1344
1345     /* alloc or resize hardware picture buffer */
1346     if (!vp->bmp || vp->reallocate ||
1347 #if CONFIG_AVFILTER
1348         vp->width  != is->out_video_filter->inputs[0]->w ||
1349         vp->height != is->out_video_filter->inputs[0]->h) {
1350 #else
1351         vp->width != is->video_st->codec->width ||
1352         vp->height != is->video_st->codec->height) {
1353 #endif
1354         SDL_Event event;
1355
1356         vp->allocated  = 0;
1357         vp->reallocate = 0;
1358
1359         /* the allocation must be done in the main thread to avoid
1360            locking problems */
1361         event.type = FF_ALLOC_EVENT;
1362         event.user.data1 = is;
1363         SDL_PushEvent(&event);
1364
1365         /* wait until the picture is allocated */
1366         SDL_LockMutex(is->pictq_mutex);
1367         while (!vp->allocated && !is->videoq.abort_request) {
1368             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict = { { 0 } };
1379 #if CONFIG_AVFILTER
1380         if (vp->picref)
1381             avfilter_unref_buffer(vp->picref);
1382         vp->picref = src_frame->opaque;
1383 #endif
1384
1385         /* get a pointer on the bitmap */
1386         SDL_LockYUVOverlay (vp->bmp);
1387
1388         pict.data[0] = vp->bmp->pixels[0];
1389         pict.data[1] = vp->bmp->pixels[2];
1390         pict.data[2] = vp->bmp->pixels[1];
1391
1392         pict.linesize[0] = vp->bmp->pitches[0];
1393         pict.linesize[1] = vp->bmp->pitches[2];
1394         pict.linesize[2] = vp->bmp->pitches[1];
1395
1396 #if CONFIG_AVFILTER
1397         pict_src.data[0] = src_frame->data[0];
1398         pict_src.data[1] = src_frame->data[1];
1399         pict_src.data[2] = src_frame->data[2];
1400
1401         pict_src.linesize[0] = src_frame->linesize[0];
1402         pict_src.linesize[1] = src_frame->linesize[1];
1403         pict_src.linesize[2] = src_frame->linesize[2];
1404
1405         // FIXME use direct rendering
1406         av_picture_copy(&pict, &pict_src,
1407                         vp->pix_fmt, vp->width, vp->height);
1408 #else
1409         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1410         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1411             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1412             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1413         if (is->img_convert_ctx == NULL) {
1414             fprintf(stderr, "Cannot initialize the conversion context\n");
1415             exit(1);
1416         }
1417         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1418                   0, vp->height, pict.data, pict.linesize);
1419 #endif
1420         /* update the bitmap content */
1421         SDL_UnlockYUVOverlay(vp->bmp);
1422
1423         vp->pts = pts;
1424         vp->pos = pos;
1425
1426         /* now we can update the picture count */
1427         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1428             is->pictq_windex = 0;
1429         SDL_LockMutex(is->pictq_mutex);
1430         vp->target_clock = compute_target_time(vp->pts, is);
1431
1432         is->pictq_size++;
1433         SDL_UnlockMutex(is->pictq_mutex);
1434     }
1435     return 0;
1436 }
1437
1438 /**
1439  * compute the exact PTS for the picture if it is omitted in the stream
1440  * @param pts1 the dts of the pkt / pts of the frame
1441  */
1442 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1443 {
1444     double frame_delay, pts;
1445
1446     pts = pts1;
1447
1448     if (pts != 0) {
1449         /* update video clock with pts, if present */
1450         is->video_clock = pts;
1451     } else {
1452         pts = is->video_clock;
1453     }
1454     /* update video clock for next frame */
1455     frame_delay = av_q2d(is->video_st->codec->time_base);
1456     /* for MPEG2, the frame can be repeated, so we update the
1457        clock accordingly */
1458     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1459     is->video_clock += frame_delay;
1460
1461     return queue_picture(is, src_frame, pts, pos);
1462 }
1463
1464 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1465 {
1466     int got_picture, i;
1467
1468     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1469         return -1;
1470
1471     if (pkt->data == flush_pkt.data) {
1472         avcodec_flush_buffers(is->video_st->codec);
1473
1474         SDL_LockMutex(is->pictq_mutex);
1475         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1476         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1477             is->pictq[i].target_clock= 0;
1478         }
1479         while (is->pictq_size && !is->videoq.abort_request) {
1480             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1481         }
1482         is->video_current_pos = -1;
1483         SDL_UnlockMutex(is->pictq_mutex);
1484
1485         init_pts_correction(&is->pts_ctx);
1486         is->frame_last_pts = AV_NOPTS_VALUE;
1487         is->frame_last_delay = 0;
1488         is->frame_timer = (double)av_gettime() / 1000000.0;
1489         is->skip_frames = 1;
1490         is->skip_frames_index = 0;
1491         return 0;
1492     }
1493
1494     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1495
1496     if (got_picture) {
1497         if (decoder_reorder_pts == -1) {
1498             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1499         } else if (decoder_reorder_pts) {
1500             *pts = frame->pkt_pts;
1501         } else {
1502             *pts = frame->pkt_dts;
1503         }
1504
1505         if (*pts == AV_NOPTS_VALUE) {
1506             *pts = 0;
1507         }
1508
1509         is->skip_frames_index += 1;
1510         if (is->skip_frames_index >= is->skip_frames) {
1511             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1512             return 1;
1513         }
1514
1515     }
1516     return 0;
1517 }
1518
1519 #if CONFIG_AVFILTER
1520 typedef struct {
1521     VideoState *is;
1522     AVFrame *frame;
1523     int use_dr1;
1524 } FilterPriv;
1525
1526 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1527 {
1528     AVFilterContext *ctx = codec->opaque;
1529     AVFilterBufferRef  *ref;
1530     int perms = AV_PERM_WRITE;
1531     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1532     unsigned edge;
1533     int pixel_size;
1534
1535     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1536         perms |= AV_PERM_NEG_LINESIZES;
1537
1538     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1539         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1540         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1541         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1542     }
1543     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1544
1545     w = codec->width;
1546     h = codec->height;
1547     avcodec_align_dimensions2(codec, &w, &h, stride);
1548     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1549     w += edge << 1;
1550     h += edge << 1;
1551
1552     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1553         return -1;
1554
1555     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1556     ref->video->w = codec->width;
1557     ref->video->h = codec->height;
1558     for (i = 0; i < 4; i ++) {
1559         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1560         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1561
1562         if (ref->data[i]) {
1563             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1564         }
1565         pic->data[i]     = ref->data[i];
1566         pic->linesize[i] = ref->linesize[i];
1567     }
1568     pic->opaque = ref;
1569     pic->type   = FF_BUFFER_TYPE_USER;
1570     pic->reordered_opaque = codec->reordered_opaque;
1571     pic->width               = codec->width;
1572     pic->height              = codec->height;
1573     pic->format              = codec->pix_fmt;
1574     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1575     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1576     else            pic->pkt_pts = AV_NOPTS_VALUE;
1577     return 0;
1578 }
1579
1580 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1581 {
1582     memset(pic->data, 0, sizeof(pic->data));
1583     avfilter_unref_buffer(pic->opaque);
1584 }
1585
1586 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1587 {
1588     AVFilterBufferRef *ref = pic->opaque;
1589
1590     if (pic->data[0] == NULL) {
1591         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1592         return codec->get_buffer(codec, pic);
1593     }
1594
1595     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1596         (codec->pix_fmt != ref->format)) {
1597         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1598         return -1;
1599     }
1600
1601     pic->reordered_opaque = codec->reordered_opaque;
1602     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1603     else            pic->pkt_pts = AV_NOPTS_VALUE;
1604     return 0;
1605 }
1606
1607 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1608 {
1609     FilterPriv *priv = ctx->priv;
1610     AVCodecContext *codec;
1611     if (!opaque) return -1;
1612
1613     priv->is = opaque;
1614     codec    = priv->is->video_st->codec;
1615     codec->opaque = ctx;
1616     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1617         priv->use_dr1 = 1;
1618         codec->get_buffer     = input_get_buffer;
1619         codec->release_buffer = input_release_buffer;
1620         codec->reget_buffer   = input_reget_buffer;
1621         codec->thread_safe_callbacks = 1;
1622     }
1623
1624     priv->frame = avcodec_alloc_frame();
1625
1626     return 0;
1627 }
1628
1629 static void input_uninit(AVFilterContext *ctx)
1630 {
1631     FilterPriv *priv = ctx->priv;
1632     av_free(priv->frame);
1633 }
1634
1635 static int input_request_frame(AVFilterLink *link)
1636 {
1637     FilterPriv *priv = link->src->priv;
1638     AVFilterBufferRef *picref;
1639     int64_t pts = 0;
1640     AVPacket pkt;
1641     int ret;
1642
1643     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1644         av_free_packet(&pkt);
1645     if (ret < 0)
1646         return -1;
1647
1648     if (priv->use_dr1) {
1649         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1650     } else {
1651         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1652         av_image_copy(picref->data, picref->linesize,
1653                       priv->frame->data, priv->frame->linesize,
1654                       picref->format, link->w, link->h);
1655     }
1656     av_free_packet(&pkt);
1657
1658     avfilter_copy_frame_props(picref, priv->frame);
1659     picref->pts = pts;
1660
1661     avfilter_start_frame(link, picref);
1662     avfilter_draw_slice(link, 0, link->h, 1);
1663     avfilter_end_frame(link);
1664
1665     return 0;
1666 }
1667
1668 static int input_query_formats(AVFilterContext *ctx)
1669 {
1670     FilterPriv *priv = ctx->priv;
1671     enum PixelFormat pix_fmts[] = {
1672         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1673     };
1674
1675     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1676     return 0;
1677 }
1678
1679 static int input_config_props(AVFilterLink *link)
1680 {
1681     FilterPriv *priv  = link->src->priv;
1682     AVCodecContext *c = priv->is->video_st->codec;
1683
1684     link->w = c->width;
1685     link->h = c->height;
1686     link->time_base = priv->is->video_st->time_base;
1687
1688     return 0;
1689 }
1690
1691 static AVFilter input_filter =
1692 {
1693     .name      = "avplay_input",
1694
1695     .priv_size = sizeof(FilterPriv),
1696
1697     .init      = input_init,
1698     .uninit    = input_uninit,
1699
1700     .query_formats = input_query_formats,
1701
1702     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1703     .outputs   = (AVFilterPad[]) {{ .name = "default",
1704                                     .type = AVMEDIA_TYPE_VIDEO,
1705                                     .request_frame = input_request_frame,
1706                                     .config_props  = input_config_props, },
1707                                   { .name = NULL }},
1708 };
1709
1710 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1711 {
1712     char sws_flags_str[128];
1713     int ret;
1714     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1715     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1716     graph->scale_sws_opts = av_strdup(sws_flags_str);
1717
1718     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1719                                             NULL, is, graph)) < 0)
1720         return ret;
1721     if ((ret = avfilter_graph_create_filter(&filt_out,
1722                                             avfilter_get_by_name("buffersink"),
1723                                             "out", NULL, NULL, graph)) < 0)
1724         return ret;
1725
1726     if ((ret = avfilter_graph_create_filter(&filt_format,
1727                                             avfilter_get_by_name("format"),
1728                                             "format", "yuv420p", NULL, graph)) < 0)
1729         return ret;
1730     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1731         return ret;
1732
1733
1734     if (vfilters) {
1735         AVFilterInOut *outputs = avfilter_inout_alloc();
1736         AVFilterInOut *inputs  = avfilter_inout_alloc();
1737
1738         outputs->name    = av_strdup("in");
1739         outputs->filter_ctx = filt_src;
1740         outputs->pad_idx = 0;
1741         outputs->next    = NULL;
1742
1743         inputs->name    = av_strdup("out");
1744         inputs->filter_ctx = filt_format;
1745         inputs->pad_idx = 0;
1746         inputs->next    = NULL;
1747
1748         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1749             return ret;
1750     } else {
1751         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1752             return ret;
1753     }
1754
1755     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1756         return ret;
1757
1758     is->out_video_filter = filt_out;
1759
1760     return ret;
1761 }
1762
1763 #endif  /* CONFIG_AVFILTER */
1764
1765 static int video_thread(void *arg)
1766 {
1767     VideoState *is = arg;
1768     AVFrame *frame = avcodec_alloc_frame();
1769     int64_t pts_int;
1770     double pts;
1771     int ret;
1772
1773 #if CONFIG_AVFILTER
1774     AVFilterGraph *graph = avfilter_graph_alloc();
1775     AVFilterContext *filt_out = NULL;
1776     int64_t pos;
1777     int last_w = is->video_st->codec->width;
1778     int last_h = is->video_st->codec->height;
1779
1780     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1781         goto the_end;
1782     filt_out = is->out_video_filter;
1783 #endif
1784
1785     for (;;) {
1786 #if !CONFIG_AVFILTER
1787         AVPacket pkt;
1788 #else
1789         AVFilterBufferRef *picref;
1790         AVRational tb;
1791 #endif
1792         while (is->paused && !is->videoq.abort_request)
1793             SDL_Delay(10);
1794 #if CONFIG_AVFILTER
1795         if (   last_w != is->video_st->codec->width
1796             || last_h != is->video_st->codec->height) {
1797             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1798                     is->video_st->codec->width, is->video_st->codec->height);
1799             avfilter_graph_free(&graph);
1800             graph = avfilter_graph_alloc();
1801             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1802                 goto the_end;
1803             filt_out = is->out_video_filter;
1804             last_w = is->video_st->codec->width;
1805             last_h = is->video_st->codec->height;
1806         }
1807         ret = av_buffersink_read(filt_out, &picref);
1808         if (picref) {
1809             avfilter_copy_buf_props(frame, picref);
1810
1811             pts_int = picref->pts;
1812             tb      = filt_out->inputs[0]->time_base;
1813             pos     = picref->pos;
1814             frame->opaque = picref;
1815
1816             ret = 1;
1817         }
1818
1819         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1820             av_unused int64_t pts1 = pts_int;
1821             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1822             av_dlog(NULL, "video_thread(): "
1823                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1824                     tb.num, tb.den, pts1,
1825                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1826         }
1827 #else
1828         ret = get_video_frame(is, frame, &pts_int, &pkt);
1829 #endif
1830
1831         if (ret < 0)
1832             goto the_end;
1833
1834         if (!ret)
1835             continue;
1836
1837         pts = pts_int * av_q2d(is->video_st->time_base);
1838
1839 #if CONFIG_AVFILTER
1840         ret = output_picture2(is, frame, pts, pos);
1841 #else
1842         ret = output_picture2(is, frame, pts,  pkt.pos);
1843         av_free_packet(&pkt);
1844 #endif
1845         if (ret < 0)
1846             goto the_end;
1847
1848         if (step)
1849             if (cur_stream)
1850                 stream_pause(cur_stream);
1851     }
1852  the_end:
1853 #if CONFIG_AVFILTER
1854     av_freep(&vfilters);
1855     avfilter_graph_free(&graph);
1856 #endif
1857     av_free(frame);
1858     return 0;
1859 }
1860
1861 static int subtitle_thread(void *arg)
1862 {
1863     VideoState *is = arg;
1864     SubPicture *sp;
1865     AVPacket pkt1, *pkt = &pkt1;
1866     int got_subtitle;
1867     double pts;
1868     int i, j;
1869     int r, g, b, y, u, v, a;
1870
1871     for (;;) {
1872         while (is->paused && !is->subtitleq.abort_request) {
1873             SDL_Delay(10);
1874         }
1875         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1876             break;
1877
1878         if (pkt->data == flush_pkt.data) {
1879             avcodec_flush_buffers(is->subtitle_st->codec);
1880             continue;
1881         }
1882         SDL_LockMutex(is->subpq_mutex);
1883         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1884                !is->subtitleq.abort_request) {
1885             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1886         }
1887         SDL_UnlockMutex(is->subpq_mutex);
1888
1889         if (is->subtitleq.abort_request)
1890             return 0;
1891
1892         sp = &is->subpq[is->subpq_windex];
1893
1894        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1895            this packet, if any */
1896         pts = 0;
1897         if (pkt->pts != AV_NOPTS_VALUE)
1898             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1899
1900         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1901                                  &got_subtitle, pkt);
1902
1903         if (got_subtitle && sp->sub.format == 0) {
1904             sp->pts = pts;
1905
1906             for (i = 0; i < sp->sub.num_rects; i++)
1907             {
1908                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1909                 {
1910                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1911                     y = RGB_TO_Y_CCIR(r, g, b);
1912                     u = RGB_TO_U_CCIR(r, g, b, 0);
1913                     v = RGB_TO_V_CCIR(r, g, b, 0);
1914                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1915                 }
1916             }
1917
1918             /* now we can update the picture count */
1919             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1920                 is->subpq_windex = 0;
1921             SDL_LockMutex(is->subpq_mutex);
1922             is->subpq_size++;
1923             SDL_UnlockMutex(is->subpq_mutex);
1924         }
1925         av_free_packet(pkt);
1926     }
1927     return 0;
1928 }
1929
1930 /* copy samples for viewing in editor window */
1931 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1932 {
1933     int size, len;
1934
1935     size = samples_size / sizeof(short);
1936     while (size > 0) {
1937         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1938         if (len > size)
1939             len = size;
1940         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1941         samples += len;
1942         is->sample_array_index += len;
1943         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1944             is->sample_array_index = 0;
1945         size -= len;
1946     }
1947 }
1948
1949 /* return the new audio buffer size (samples can be added or deleted
1950    to get better sync if video or external master clock) */
1951 static int synchronize_audio(VideoState *is, short *samples,
1952                              int samples_size1, double pts)
1953 {
1954     int n, samples_size;
1955     double ref_clock;
1956
1957     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1958     samples_size = samples_size1;
1959
1960     /* if not master, then we try to remove or add samples to correct the clock */
1961     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1962          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1963         double diff, avg_diff;
1964         int wanted_size, min_size, max_size, nb_samples;
1965
1966         ref_clock = get_master_clock(is);
1967         diff = get_audio_clock(is) - ref_clock;
1968
1969         if (diff < AV_NOSYNC_THRESHOLD) {
1970             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1971             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1972                 /* not enough measures to have a correct estimate */
1973                 is->audio_diff_avg_count++;
1974             } else {
1975                 /* estimate the A-V difference */
1976                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1977
1978                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1979                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1980                     nb_samples = samples_size / n;
1981
1982                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1983                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1984                     if (wanted_size < min_size)
1985                         wanted_size = min_size;
1986                     else if (wanted_size > max_size)
1987                         wanted_size = max_size;
1988
1989                     /* add or remove samples to correction the synchro */
1990                     if (wanted_size < samples_size) {
1991                         /* remove samples */
1992                         samples_size = wanted_size;
1993                     } else if (wanted_size > samples_size) {
1994                         uint8_t *samples_end, *q;
1995                         int nb;
1996
1997                         /* add samples */
1998                         nb = (samples_size - wanted_size);
1999                         samples_end = (uint8_t *)samples + samples_size - n;
2000                         q = samples_end + n;
2001                         while (nb > 0) {
2002                             memcpy(q, samples_end, n);
2003                             q += n;
2004                             nb -= n;
2005                         }
2006                         samples_size = wanted_size;
2007                     }
2008                 }
2009                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2010                         diff, avg_diff, samples_size - samples_size1,
2011                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2012             }
2013         } else {
2014             /* too big difference : may be initial PTS errors, so
2015                reset A-V filter */
2016             is->audio_diff_avg_count = 0;
2017             is->audio_diff_cum       = 0;
2018         }
2019     }
2020
2021     return samples_size;
2022 }
2023
2024 /* decode one audio frame and returns its uncompressed size */
2025 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2026 {
2027     AVPacket *pkt_temp = &is->audio_pkt_temp;
2028     AVPacket *pkt = &is->audio_pkt;
2029     AVCodecContext *dec = is->audio_st->codec;
2030     int n, len1, data_size, got_frame;
2031     double pts;
2032     int new_packet = 0;
2033     int flush_complete = 0;
2034
2035     for (;;) {
2036         /* NOTE: the audio packet can contain several frames */
2037         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2038             int resample_changed, audio_resample;
2039
2040             if (!is->frame) {
2041                 if (!(is->frame = avcodec_alloc_frame()))
2042                     return AVERROR(ENOMEM);
2043             } else
2044                 avcodec_get_frame_defaults(is->frame);
2045
2046             if (flush_complete)
2047                 break;
2048             new_packet = 0;
2049             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2050             if (len1 < 0) {
2051                 /* if error, we skip the frame */
2052                 pkt_temp->size = 0;
2053                 break;
2054             }
2055
2056             pkt_temp->data += len1;
2057             pkt_temp->size -= len1;
2058
2059             if (!got_frame) {
2060                 /* stop sending empty packets if the decoder is finished */
2061                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2062                     flush_complete = 1;
2063                 continue;
2064             }
2065             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2066                                                    is->frame->nb_samples,
2067                                                    dec->sample_fmt, 1);
2068
2069             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
2070                              dec->channel_layout != is->sdl_channel_layout;
2071
2072             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
2073                                dec->channel_layout != is->resample_channel_layout;
2074
2075             if ((!is->avr && audio_resample) || resample_changed) {
2076                 int ret;
2077                 if (is->avr)
2078                     avresample_close(is->avr);
2079                 else if (audio_resample) {
2080                     is->avr = avresample_alloc_context();
2081                     if (!is->avr) {
2082                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
2083                         break;
2084                     }
2085                 }
2086                 if (audio_resample) {
2087                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
2088                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
2089                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
2090                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
2091                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
2092                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
2093                     if (av_get_bytes_per_sample(dec->sample_fmt) <= 2)
2094                         av_opt_set_int(is->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
2095
2096                     if ((ret = avresample_open(is->avr)) < 0) {
2097                         fprintf(stderr, "error initializing libavresample\n");
2098                         break;
2099                     }
2100                 }
2101                 is->resample_sample_fmt     = dec->sample_fmt;
2102                 is->resample_channel_layout = dec->channel_layout;
2103             }
2104
2105             if (audio_resample) {
2106                 void *tmp_out;
2107                 int out_samples, out_size, out_linesize;
2108                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
2109                 int nb_samples = is->frame->nb_samples;
2110
2111                 out_size = av_samples_get_buffer_size(&out_linesize,
2112                                                       is->sdl_channels,
2113                                                       nb_samples,
2114                                                       is->sdl_sample_fmt, 0);
2115                 tmp_out = av_realloc(is->audio_buf1, out_size);
2116                 if (!tmp_out)
2117                     return AVERROR(ENOMEM);
2118                 is->audio_buf1 = tmp_out;
2119
2120                 out_samples = avresample_convert(is->avr,
2121                                                  (void **)&is->audio_buf1,
2122                                                  out_linesize, nb_samples,
2123                                                  (void **)is->frame->data,
2124                                                  is->frame->linesize[0],
2125                                                  is->frame->nb_samples);
2126                 if (out_samples < 0) {
2127                     fprintf(stderr, "avresample_convert() failed\n");
2128                     break;
2129                 }
2130                 is->audio_buf = is->audio_buf1;
2131                 data_size = out_samples * osize * is->sdl_channels;
2132             } else {
2133                 is->audio_buf = is->frame->data[0];
2134             }
2135
2136             /* if no pts, then compute it */
2137             pts = is->audio_clock;
2138             *pts_ptr = pts;
2139             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
2140             is->audio_clock += (double)data_size /
2141                 (double)(n * dec->sample_rate);
2142 #ifdef DEBUG
2143             {
2144                 static double last_clock;
2145                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2146                        is->audio_clock - last_clock,
2147                        is->audio_clock, pts);
2148                 last_clock = is->audio_clock;
2149             }
2150 #endif
2151             return data_size;
2152         }
2153
2154         /* free the current packet */
2155         if (pkt->data)
2156             av_free_packet(pkt);
2157         memset(pkt_temp, 0, sizeof(*pkt_temp));
2158
2159         if (is->paused || is->audioq.abort_request) {
2160             return -1;
2161         }
2162
2163         /* read next packet */
2164         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2165             return -1;
2166
2167         if (pkt->data == flush_pkt.data) {
2168             avcodec_flush_buffers(dec);
2169             flush_complete = 0;
2170         }
2171
2172         *pkt_temp = *pkt;
2173
2174         /* if update the audio clock with the pts */
2175         if (pkt->pts != AV_NOPTS_VALUE) {
2176             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2177         }
2178     }
2179 }
2180
2181 /* prepare a new audio buffer */
2182 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2183 {
2184     VideoState *is = opaque;
2185     int audio_size, len1;
2186     double pts;
2187
2188     audio_callback_time = av_gettime();
2189
2190     while (len > 0) {
2191         if (is->audio_buf_index >= is->audio_buf_size) {
2192            audio_size = audio_decode_frame(is, &pts);
2193            if (audio_size < 0) {
2194                 /* if error, just output silence */
2195                is->audio_buf      = is->silence_buf;
2196                is->audio_buf_size = sizeof(is->silence_buf);
2197            } else {
2198                if (is->show_audio)
2199                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2200                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2201                                               pts);
2202                is->audio_buf_size = audio_size;
2203            }
2204            is->audio_buf_index = 0;
2205         }
2206         len1 = is->audio_buf_size - is->audio_buf_index;
2207         if (len1 > len)
2208             len1 = len;
2209         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2210         len -= len1;
2211         stream += len1;
2212         is->audio_buf_index += len1;
2213     }
2214 }
2215
2216 /* open a given stream. Return 0 if OK */
2217 static int stream_component_open(VideoState *is, int stream_index)
2218 {
2219     AVFormatContext *ic = is->ic;
2220     AVCodecContext *avctx;
2221     AVCodec *codec;
2222     SDL_AudioSpec wanted_spec, spec;
2223     AVDictionary *opts;
2224     AVDictionaryEntry *t = NULL;
2225
2226     if (stream_index < 0 || stream_index >= ic->nb_streams)
2227         return -1;
2228     avctx = ic->streams[stream_index]->codec;
2229
2230     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2231
2232     codec = avcodec_find_decoder(avctx->codec_id);
2233     avctx->debug_mv          = debug_mv;
2234     avctx->debug             = debug;
2235     avctx->workaround_bugs   = workaround_bugs;
2236     avctx->idct_algo         = idct;
2237     avctx->skip_frame        = skip_frame;
2238     avctx->skip_idct         = skip_idct;
2239     avctx->skip_loop_filter  = skip_loop_filter;
2240     avctx->error_concealment = error_concealment;
2241
2242     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2243
2244     if (!av_dict_get(opts, "threads", NULL, 0))
2245         av_dict_set(&opts, "threads", "auto", 0);
2246     if (!codec ||
2247         avcodec_open2(avctx, codec, &opts) < 0)
2248         return -1;
2249     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2250         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2251         return AVERROR_OPTION_NOT_FOUND;
2252     }
2253
2254     /* prepare audio output */
2255     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2256         wanted_spec.freq = avctx->sample_rate;
2257         wanted_spec.format = AUDIO_S16SYS;
2258
2259         if (!avctx->channel_layout)
2260             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2261         if (!avctx->channel_layout) {
2262             fprintf(stderr, "unable to guess channel layout\n");
2263             return -1;
2264         }
2265         if (avctx->channels == 1)
2266             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2267         else
2268             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2269         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2270
2271         wanted_spec.channels = is->sdl_channels;
2272         wanted_spec.silence = 0;
2273         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2274         wanted_spec.callback = sdl_audio_callback;
2275         wanted_spec.userdata = is;
2276         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2277             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2278             return -1;
2279         }
2280         is->audio_hw_buf_size = spec.size;
2281         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2282         is->resample_sample_fmt     = is->sdl_sample_fmt;
2283         is->resample_channel_layout = is->sdl_channel_layout;
2284     }
2285
2286     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2287     switch (avctx->codec_type) {
2288     case AVMEDIA_TYPE_AUDIO:
2289         is->audio_stream = stream_index;
2290         is->audio_st = ic->streams[stream_index];
2291         is->audio_buf_size  = 0;
2292         is->audio_buf_index = 0;
2293
2294         /* init averaging filter */
2295         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2296         is->audio_diff_avg_count = 0;
2297         /* since we do not have a precise anough audio fifo fullness,
2298            we correct audio sync only if larger than this threshold */
2299         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2300
2301         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2302         packet_queue_init(&is->audioq);
2303         SDL_PauseAudio(0);
2304         break;
2305     case AVMEDIA_TYPE_VIDEO:
2306         is->video_stream = stream_index;
2307         is->video_st = ic->streams[stream_index];
2308
2309         packet_queue_init(&is->videoq);
2310         is->video_tid = SDL_CreateThread(video_thread, is);
2311         break;
2312     case AVMEDIA_TYPE_SUBTITLE:
2313         is->subtitle_stream = stream_index;
2314         is->subtitle_st = ic->streams[stream_index];
2315         packet_queue_init(&is->subtitleq);
2316
2317         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2318         break;
2319     default:
2320         break;
2321     }
2322     return 0;
2323 }
2324
2325 static void stream_component_close(VideoState *is, int stream_index)
2326 {
2327     AVFormatContext *ic = is->ic;
2328     AVCodecContext *avctx;
2329
2330     if (stream_index < 0 || stream_index >= ic->nb_streams)
2331         return;
2332     avctx = ic->streams[stream_index]->codec;
2333
2334     switch (avctx->codec_type) {
2335     case AVMEDIA_TYPE_AUDIO:
2336         packet_queue_abort(&is->audioq);
2337
2338         SDL_CloseAudio();
2339
2340         packet_queue_end(&is->audioq);
2341         av_free_packet(&is->audio_pkt);
2342         if (is->avr)
2343             avresample_free(&is->avr);
2344         av_freep(&is->audio_buf1);
2345         is->audio_buf = NULL;
2346         av_freep(&is->frame);
2347
2348         if (is->rdft) {
2349             av_rdft_end(is->rdft);
2350             av_freep(&is->rdft_data);
2351             is->rdft = NULL;
2352             is->rdft_bits = 0;
2353         }
2354         break;
2355     case AVMEDIA_TYPE_VIDEO:
2356         packet_queue_abort(&is->videoq);
2357
2358         /* note: we also signal this mutex to make sure we deblock the
2359            video thread in all cases */
2360         SDL_LockMutex(is->pictq_mutex);
2361         SDL_CondSignal(is->pictq_cond);
2362         SDL_UnlockMutex(is->pictq_mutex);
2363
2364         SDL_WaitThread(is->video_tid, NULL);
2365
2366         packet_queue_end(&is->videoq);
2367         break;
2368     case AVMEDIA_TYPE_SUBTITLE:
2369         packet_queue_abort(&is->subtitleq);
2370
2371         /* note: we also signal this mutex to make sure we deblock the
2372            video thread in all cases */
2373         SDL_LockMutex(is->subpq_mutex);
2374         is->subtitle_stream_changed = 1;
2375
2376         SDL_CondSignal(is->subpq_cond);
2377         SDL_UnlockMutex(is->subpq_mutex);
2378
2379         SDL_WaitThread(is->subtitle_tid, NULL);
2380
2381         packet_queue_end(&is->subtitleq);
2382         break;
2383     default:
2384         break;
2385     }
2386
2387     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2388     avcodec_close(avctx);
2389     switch (avctx->codec_type) {
2390     case AVMEDIA_TYPE_AUDIO:
2391         is->audio_st = NULL;
2392         is->audio_stream = -1;
2393         break;
2394     case AVMEDIA_TYPE_VIDEO:
2395         is->video_st = NULL;
2396         is->video_stream = -1;
2397         break;
2398     case AVMEDIA_TYPE_SUBTITLE:
2399         is->subtitle_st = NULL;
2400         is->subtitle_stream = -1;
2401         break;
2402     default:
2403         break;
2404     }
2405 }
2406
2407 /* since we have only one decoding thread, we can use a global
2408    variable instead of a thread local variable */
2409 static VideoState *global_video_state;
2410
2411 static int decode_interrupt_cb(void *ctx)
2412 {
2413     return global_video_state && global_video_state->abort_request;
2414 }
2415
2416 /* this thread gets the stream from the disk or the network */
2417 static int decode_thread(void *arg)
2418 {
2419     VideoState *is = arg;
2420     AVFormatContext *ic = NULL;
2421     int err, i, ret;
2422     int st_index[AVMEDIA_TYPE_NB];
2423     AVPacket pkt1, *pkt = &pkt1;
2424     int eof = 0;
2425     int pkt_in_play_range = 0;
2426     AVDictionaryEntry *t;
2427     AVDictionary **opts;
2428     int orig_nb_streams;
2429
2430     memset(st_index, -1, sizeof(st_index));
2431     is->video_stream = -1;
2432     is->audio_stream = -1;
2433     is->subtitle_stream = -1;
2434
2435     global_video_state = is;
2436
2437     ic = avformat_alloc_context();
2438     ic->interrupt_callback.callback = decode_interrupt_cb;
2439     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2440     if (err < 0) {
2441         print_error(is->filename, err);
2442         ret = -1;
2443         goto fail;
2444     }
2445     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2446         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2447         ret = AVERROR_OPTION_NOT_FOUND;
2448         goto fail;
2449     }
2450     is->ic = ic;
2451
2452     if (genpts)
2453         ic->flags |= AVFMT_FLAG_GENPTS;
2454
2455     opts = setup_find_stream_info_opts(ic, codec_opts);
2456     orig_nb_streams = ic->nb_streams;
2457
2458     err = avformat_find_stream_info(ic, opts);
2459     if (err < 0) {
2460         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2461         ret = -1;
2462         goto fail;
2463     }
2464     for (i = 0; i < orig_nb_streams; i++)
2465         av_dict_free(&opts[i]);
2466     av_freep(&opts);
2467
2468     if (ic->pb)
2469         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2470
2471     if (seek_by_bytes < 0)
2472         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2473
2474     /* if seeking requested, we execute it */
2475     if (start_time != AV_NOPTS_VALUE) {
2476         int64_t timestamp;
2477
2478         timestamp = start_time;
2479         /* add the stream start time */
2480         if (ic->start_time != AV_NOPTS_VALUE)
2481             timestamp += ic->start_time;
2482         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2483         if (ret < 0) {
2484             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2485                     is->filename, (double)timestamp / AV_TIME_BASE);
2486         }
2487     }
2488
2489     for (i = 0; i < ic->nb_streams; i++)
2490         ic->streams[i]->discard = AVDISCARD_ALL;
2491     if (!video_disable)
2492         st_index[AVMEDIA_TYPE_VIDEO] =
2493             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2494                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2495     if (!audio_disable)
2496         st_index[AVMEDIA_TYPE_AUDIO] =
2497             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2498                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2499                                 st_index[AVMEDIA_TYPE_VIDEO],
2500                                 NULL, 0);
2501     if (!video_disable)
2502         st_index[AVMEDIA_TYPE_SUBTITLE] =
2503             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2504                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2505                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2506                                  st_index[AVMEDIA_TYPE_AUDIO] :
2507                                  st_index[AVMEDIA_TYPE_VIDEO]),
2508                                 NULL, 0);
2509     if (show_status) {
2510         av_dump_format(ic, 0, is->filename, 0);
2511     }
2512
2513     /* open the streams */
2514     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2515         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2516     }
2517
2518     ret = -1;
2519     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2520         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2521     }
2522     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2523     if (ret < 0) {
2524         if (!display_disable)
2525             is->show_audio = 2;
2526     }
2527
2528     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2529         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2530     }
2531
2532     if (is->video_stream < 0 && is->audio_stream < 0) {
2533         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2534         ret = -1;
2535         goto fail;
2536     }
2537
2538     for (;;) {
2539         if (is->abort_request)
2540             break;
2541         if (is->paused != is->last_paused) {
2542             is->last_paused = is->paused;
2543             if (is->paused)
2544                 is->read_pause_return = av_read_pause(ic);
2545             else
2546                 av_read_play(ic);
2547         }
2548 #if CONFIG_RTSP_DEMUXER
2549         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2550             /* wait 10 ms to avoid trying to get another packet */
2551             /* XXX: horrible */
2552             SDL_Delay(10);
2553             continue;
2554         }
2555 #endif
2556         if (is->seek_req) {
2557             int64_t seek_target = is->seek_pos;
2558             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2559             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2560 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2561 //      of the seek_pos/seek_rel variables
2562
2563             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2564             if (ret < 0) {
2565                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2566             } else {
2567                 if (is->audio_stream >= 0) {
2568                     packet_queue_flush(&is->audioq);
2569                     packet_queue_put(&is->audioq, &flush_pkt);
2570                 }
2571                 if (is->subtitle_stream >= 0) {
2572                     packet_queue_flush(&is->subtitleq);
2573                     packet_queue_put(&is->subtitleq, &flush_pkt);
2574                 }
2575                 if (is->video_stream >= 0) {
2576                     packet_queue_flush(&is->videoq);
2577                     packet_queue_put(&is->videoq, &flush_pkt);
2578                 }
2579             }
2580             is->seek_req = 0;
2581             eof = 0;
2582         }
2583
2584         /* if the queue are full, no need to read more */
2585         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2586             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2587                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2588                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
2589             /* wait 10 ms */
2590             SDL_Delay(10);
2591             continue;
2592         }
2593         if (eof) {
2594             if (is->video_stream >= 0) {
2595                 av_init_packet(pkt);
2596                 pkt->data = NULL;
2597                 pkt->size = 0;
2598                 pkt->stream_index = is->video_stream;
2599                 packet_queue_put(&is->videoq, pkt);
2600             }
2601             if (is->audio_stream >= 0 &&
2602                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2603                 av_init_packet(pkt);
2604                 pkt->data = NULL;
2605                 pkt->size = 0;
2606                 pkt->stream_index = is->audio_stream;
2607                 packet_queue_put(&is->audioq, pkt);
2608             }
2609             SDL_Delay(10);
2610             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2611                 if (loop != 1 && (!loop || --loop)) {
2612                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2613                 } else if (autoexit) {
2614                     ret = AVERROR_EOF;
2615                     goto fail;
2616                 }
2617             }
2618             continue;
2619         }
2620         ret = av_read_frame(ic, pkt);
2621         if (ret < 0) {
2622             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2623                 eof = 1;
2624             if (ic->pb && ic->pb->error)
2625                 break;
2626             SDL_Delay(100); /* wait for user event */
2627             continue;
2628         }
2629         /* check if packet is in play range specified by user, then queue, otherwise discard */
2630         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2631                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2632                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2633                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2634                 <= ((double)duration / 1000000);
2635         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2636             packet_queue_put(&is->audioq, pkt);
2637         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2638             packet_queue_put(&is->videoq, pkt);
2639         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2640             packet_queue_put(&is->subtitleq, pkt);
2641         } else {
2642             av_free_packet(pkt);
2643         }
2644     }
2645     /* wait until the end */
2646     while (!is->abort_request) {
2647         SDL_Delay(100);
2648     }
2649
2650     ret = 0;
2651  fail:
2652     /* disable interrupting */
2653     global_video_state = NULL;
2654
2655     /* close each stream */
2656     if (is->audio_stream >= 0)
2657         stream_component_close(is, is->audio_stream);
2658     if (is->video_stream >= 0)
2659         stream_component_close(is, is->video_stream);
2660     if (is->subtitle_stream >= 0)
2661         stream_component_close(is, is->subtitle_stream);
2662     if (is->ic) {
2663         avformat_close_input(&is->ic);
2664     }
2665
2666     if (ret != 0) {
2667         SDL_Event event;
2668
2669         event.type = FF_QUIT_EVENT;
2670         event.user.data1 = is;
2671         SDL_PushEvent(&event);
2672     }
2673     return 0;
2674 }
2675
2676 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2677 {
2678     VideoState *is;
2679
2680     is = av_mallocz(sizeof(VideoState));
2681     if (!is)
2682         return NULL;
2683     av_strlcpy(is->filename, filename, sizeof(is->filename));
2684     is->iformat = iformat;
2685     is->ytop    = 0;
2686     is->xleft   = 0;
2687
2688     /* start video display */
2689     is->pictq_mutex = SDL_CreateMutex();
2690     is->pictq_cond  = SDL_CreateCond();
2691
2692     is->subpq_mutex = SDL_CreateMutex();
2693     is->subpq_cond  = SDL_CreateCond();
2694
2695     is->av_sync_type = av_sync_type;
2696     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2697     if (!is->parse_tid) {
2698         av_free(is);
2699         return NULL;
2700     }
2701     return is;
2702 }
2703
2704 static void stream_cycle_channel(VideoState *is, int codec_type)
2705 {
2706     AVFormatContext *ic = is->ic;
2707     int start_index, stream_index;
2708     AVStream *st;
2709
2710     if (codec_type == AVMEDIA_TYPE_VIDEO)
2711         start_index = is->video_stream;
2712     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2713         start_index = is->audio_stream;
2714     else
2715         start_index = is->subtitle_stream;
2716     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2717         return;
2718     stream_index = start_index;
2719     for (;;) {
2720         if (++stream_index >= is->ic->nb_streams)
2721         {
2722             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2723             {
2724                 stream_index = -1;
2725                 goto the_end;
2726             } else
2727                 stream_index = 0;
2728         }
2729         if (stream_index == start_index)
2730             return;
2731         st = ic->streams[stream_index];
2732         if (st->codec->codec_type == codec_type) {
2733             /* check that parameters are OK */
2734             switch (codec_type) {
2735             case AVMEDIA_TYPE_AUDIO:
2736                 if (st->codec->sample_rate != 0 &&
2737                     st->codec->channels != 0)
2738                     goto the_end;
2739                 break;
2740             case AVMEDIA_TYPE_VIDEO:
2741             case AVMEDIA_TYPE_SUBTITLE:
2742                 goto the_end;
2743             default:
2744                 break;
2745             }
2746         }
2747     }
2748  the_end:
2749     stream_component_close(is, start_index);
2750     stream_component_open(is, stream_index);
2751 }
2752
2753
2754 static void toggle_full_screen(void)
2755 {
2756     is_full_screen = !is_full_screen;
2757 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2758     /* OS X needs to empty the picture_queue */
2759     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2760         cur_stream->pictq[i].reallocate = 1;
2761     }
2762 #endif
2763     video_open(cur_stream);
2764 }
2765
2766 static void toggle_pause(void)
2767 {
2768     if (cur_stream)
2769         stream_pause(cur_stream);
2770     step = 0;
2771 }
2772
2773 static void step_to_next_frame(void)
2774 {
2775     if (cur_stream) {
2776         /* if the stream is paused unpause it, then step */
2777         if (cur_stream->paused)
2778             stream_pause(cur_stream);
2779     }
2780     step = 1;
2781 }
2782
2783 static void toggle_audio_display(void)
2784 {
2785     if (cur_stream) {
2786         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2787         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2788         fill_rectangle(screen,
2789                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2790                        bgcolor);
2791         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2792     }
2793 }
2794
2795 /* handle an event sent by the GUI */
2796 static void event_loop(void)
2797 {
2798     SDL_Event event;
2799     double incr, pos, frac;
2800
2801     for (;;) {
2802         double x;
2803         SDL_WaitEvent(&event);
2804         switch (event.type) {
2805         case SDL_KEYDOWN:
2806             if (exit_on_keydown) {
2807                 do_exit();
2808                 break;
2809             }
2810             switch (event.key.keysym.sym) {
2811             case SDLK_ESCAPE:
2812             case SDLK_q:
2813                 do_exit();
2814                 break;
2815             case SDLK_f:
2816                 toggle_full_screen();
2817                 break;
2818             case SDLK_p:
2819             case SDLK_SPACE:
2820                 toggle_pause();
2821                 break;
2822             case SDLK_s: // S: Step to next frame
2823                 step_to_next_frame();
2824                 break;
2825             case SDLK_a:
2826                 if (cur_stream)
2827                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2828                 break;
2829             case SDLK_v:
2830                 if (cur_stream)
2831                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2832                 break;
2833             case SDLK_t:
2834                 if (cur_stream)
2835                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2836                 break;
2837             case SDLK_w:
2838                 toggle_audio_display();
2839                 break;
2840             case SDLK_LEFT:
2841                 incr = -10.0;
2842                 goto do_seek;
2843             case SDLK_RIGHT:
2844                 incr = 10.0;
2845                 goto do_seek;
2846             case SDLK_UP:
2847                 incr = 60.0;
2848                 goto do_seek;
2849             case SDLK_DOWN:
2850                 incr = -60.0;
2851             do_seek:
2852                 if (cur_stream) {
2853                     if (seek_by_bytes) {
2854                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2855                             pos = cur_stream->video_current_pos;
2856                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2857                             pos = cur_stream->audio_pkt.pos;
2858                         } else
2859                             pos = avio_tell(cur_stream->ic->pb);
2860                         if (cur_stream->ic->bit_rate)
2861                             incr *= cur_stream->ic->bit_rate / 8.0;
2862                         else
2863                             incr *= 180000.0;
2864                         pos += incr;
2865                         stream_seek(cur_stream, pos, incr, 1);
2866                     } else {
2867                         pos = get_master_clock(cur_stream);
2868                         pos += incr;
2869                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2870                     }
2871                 }
2872                 break;
2873             default:
2874                 break;
2875             }
2876             break;
2877         case SDL_MOUSEBUTTONDOWN:
2878             if (exit_on_mousedown) {
2879                 do_exit();
2880                 break;
2881             }
2882         case SDL_MOUSEMOTION:
2883             if (event.type == SDL_MOUSEBUTTONDOWN) {
2884                 x = event.button.x;
2885             } else {
2886                 if (event.motion.state != SDL_PRESSED)
2887                     break;
2888                 x = event.motion.x;
2889             }
2890             if (cur_stream) {
2891                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2892                     uint64_t size =  avio_size(cur_stream->ic->pb);
2893                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2894                 } else {
2895                     int64_t ts;
2896                     int ns, hh, mm, ss;
2897                     int tns, thh, tmm, tss;
2898                     tns  = cur_stream->ic->duration / 1000000LL;
2899                     thh  = tns / 3600;
2900                     tmm  = (tns % 3600) / 60;
2901                     tss  = (tns % 60);
2902                     frac = x / cur_stream->width;
2903                     ns   = frac * tns;
2904                     hh   = ns / 3600;
2905                     mm   = (ns % 3600) / 60;
2906                     ss   = (ns % 60);
2907                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2908                             hh, mm, ss, thh, tmm, tss);
2909                     ts = frac * cur_stream->ic->duration;
2910                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2911                         ts += cur_stream->ic->start_time;
2912                     stream_seek(cur_stream, ts, 0, 0);
2913                 }
2914             }
2915             break;
2916         case SDL_VIDEORESIZE:
2917             if (cur_stream) {
2918                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2919                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2920                 screen_width  = cur_stream->width  = event.resize.w;
2921                 screen_height = cur_stream->height = event.resize.h;
2922             }
2923             break;
2924         case SDL_QUIT:
2925         case FF_QUIT_EVENT:
2926             do_exit();
2927             break;
2928         case FF_ALLOC_EVENT:
2929             video_open(event.user.data1);
2930             alloc_picture(event.user.data1);
2931             break;
2932         case FF_REFRESH_EVENT:
2933             video_refresh_timer(event.user.data1);
2934             cur_stream->refresh = 0;
2935             break;
2936         default:
2937             break;
2938         }
2939     }
2940 }
2941
2942 static int opt_frame_size(const char *opt, const char *arg)
2943 {
2944     av_log(NULL, AV_LOG_ERROR,
2945            "Option '%s' has been removed, use private format options instead\n", opt);
2946     return AVERROR(EINVAL);
2947 }
2948
2949 static int opt_width(const char *opt, const char *arg)
2950 {
2951     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2952     return 0;
2953 }
2954
2955 static int opt_height(const char *opt, const char *arg)
2956 {
2957     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2958     return 0;
2959 }
2960
2961 static int opt_format(const char *opt, const char *arg)
2962 {
2963     file_iformat = av_find_input_format(arg);
2964     if (!file_iformat) {
2965         fprintf(stderr, "Unknown input format: %s\n", arg);
2966         return AVERROR(EINVAL);
2967     }
2968     return 0;
2969 }
2970
2971 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2972 {
2973     av_log(NULL, AV_LOG_ERROR,
2974            "Option '%s' has been removed, use private format options instead\n", opt);
2975     return AVERROR(EINVAL);
2976 }
2977
2978 static int opt_sync(const char *opt, const char *arg)
2979 {
2980     if (!strcmp(arg, "audio"))
2981         av_sync_type = AV_SYNC_AUDIO_MASTER;
2982     else if (!strcmp(arg, "video"))
2983         av_sync_type = AV_SYNC_VIDEO_MASTER;
2984     else if (!strcmp(arg, "ext"))
2985         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2986     else {
2987         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2988         exit(1);
2989     }
2990     return 0;
2991 }
2992
2993 static int opt_seek(const char *opt, const char *arg)
2994 {
2995     start_time = parse_time_or_die(opt, arg, 1);
2996     return 0;
2997 }
2998
2999 static int opt_duration(const char *opt, const char *arg)
3000 {
3001     duration = parse_time_or_die(opt, arg, 1);
3002     return 0;
3003 }
3004
3005 static int opt_debug(const char *opt, const char *arg)
3006 {
3007     av_log_set_level(99);
3008     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3009     return 0;
3010 }
3011
3012 static int opt_vismv(const char *opt, const char *arg)
3013 {
3014     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3015     return 0;
3016 }
3017
3018 static const OptionDef options[] = {
3019 #include "cmdutils_common_opts.h"
3020     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3021     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3022     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3023     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3024     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3025     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3026     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3027     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3028     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3029     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3030     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3031     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3032     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3033     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3034     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3035     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3036     { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
3037     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3038     { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
3039     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3040     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3041     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3042     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3043     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3044     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3045     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3046     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3047     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3048     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3049     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3050     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3051     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3052     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3053     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3054 #if CONFIG_AVFILTER
3055     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3056 #endif
3057     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3058     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3059     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
3060     { NULL, },
3061 };
3062
3063 static void show_usage(void)
3064 {
3065     printf("Simple media player\n");
3066     printf("usage: %s [options] input_file\n", program_name);
3067     printf("\n");
3068 }
3069
3070 static void show_help(void)
3071 {
3072     av_log_set_callback(log_callback_help);
3073     show_usage();
3074     show_help_options(options, "Main options:\n",
3075                       OPT_EXPERT, 0);
3076     show_help_options(options, "\nAdvanced options:\n",
3077                       OPT_EXPERT, OPT_EXPERT);
3078     printf("\n");
3079     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3080     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3081 #if !CONFIG_AVFILTER
3082     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3083 #endif
3084     printf("\nWhile playing:\n"
3085            "q, ESC              quit\n"
3086            "f                   toggle full screen\n"
3087            "p, SPC              pause\n"
3088            "a                   cycle audio channel\n"
3089            "v                   cycle video channel\n"
3090            "t                   cycle subtitle channel\n"
3091            "w                   show audio waves\n"
3092            "s                   activate frame-step mode\n"
3093            "left/right          seek backward/forward 10 seconds\n"
3094            "down/up             seek backward/forward 1 minute\n"
3095            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3096            );
3097 }
3098
3099 static void opt_input_file(void *optctx, const char *filename)
3100 {
3101     if (input_filename) {
3102         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3103                 filename, input_filename);
3104         exit(1);
3105     }
3106     if (!strcmp(filename, "-"))
3107         filename = "pipe:";
3108     input_filename = filename;
3109 }
3110
3111 /* Called from the main */
3112 int main(int argc, char **argv)
3113 {
3114     int flags;
3115
3116     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3117     parse_loglevel(argc, argv, options);
3118
3119     /* register all codecs, demux and protocols */
3120     avcodec_register_all();
3121 #if CONFIG_AVDEVICE
3122     avdevice_register_all();
3123 #endif
3124 #if CONFIG_AVFILTER
3125     avfilter_register_all();
3126 #endif
3127     av_register_all();
3128     avformat_network_init();
3129
3130     init_opts();
3131
3132     show_banner();
3133
3134     parse_options(NULL, argc, argv, options, opt_input_file);
3135
3136     if (!input_filename) {
3137         show_usage();
3138         fprintf(stderr, "An input file must be specified\n");
3139         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3140         exit(1);
3141     }
3142
3143     if (display_disable) {
3144         video_disable = 1;
3145     }
3146     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3147 #if !defined(__MINGW32__) && !defined(__APPLE__)
3148     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3149 #endif
3150     if (SDL_Init (flags)) {
3151         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3152         exit(1);
3153     }
3154
3155     if (!display_disable) {
3156 #if HAVE_SDL_VIDEO_SIZE
3157         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3158         fs_screen_width = vi->current_w;
3159         fs_screen_height = vi->current_h;
3160 #endif
3161     }
3162
3163     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3164     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3165     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3166
3167     av_init_packet(&flush_pkt);
3168     flush_pkt.data = "FLUSH";
3169
3170     cur_stream = stream_open(input_filename, file_iformat);
3171
3172     event_loop();
3173
3174     /* never returns */
3175
3176     return 0;
3177 }