]> git.sesse.net Git - ffmpeg/blob - avplay.c
lavf/hevc: pad the RBSP buffer as required by the bistream reader
[ffmpeg] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/display.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/dict.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/samplefmt.h"
37 #include "libavutil/time.h"
38 #include "libavformat/avformat.h"
39 #include "libavdevice/avdevice.h"
40 #include "libswscale/swscale.h"
41 #include "libavresample/avresample.h"
42 #include "libavutil/opt.h"
43 #include "libavcodec/avfft.h"
44
45 #if CONFIG_AVFILTER
46 # include "libavfilter/avfilter.h"
47 # include "libavfilter/buffersink.h"
48 # include "libavfilter/buffersrc.h"
49 #endif
50
51 #include "cmdutils.h"
52
53 #include <SDL.h>
54 #include <SDL_thread.h>
55
56 #ifdef __MINGW32__
57 #undef main /* We don't want SDL to override our main() */
58 #endif
59
60 #include <assert.h>
61
62 const char program_name[] = "avplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2 * 65536)
88
89 static int64_t sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;             // presentation timestamp for this picture
105     double target_clock;    // av_gettime_relative() time at which this should be displayed ideally
106     int64_t pos;            // byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     int reallocate;
111     enum AVPixelFormat pix_fmt;
112
113     AVRational sar;
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     int sdl_sample_rate;
168     enum AVSampleFormat resample_sample_fmt;
169     uint64_t resample_channel_layout;
170     int resample_sample_rate;
171     AVAudioResampleContext *avr;
172     AVFrame *frame;
173
174     int show_audio; /* if true, display audio samples */
175     int16_t sample_array[SAMPLE_ARRAY_SIZE];
176     int sample_array_index;
177     int last_i_start;
178     RDFTContext *rdft;
179     int rdft_bits;
180     FFTSample *rdft_data;
181     int xpos;
182
183     SDL_Thread *subtitle_tid;
184     int subtitle_stream;
185     int subtitle_stream_changed;
186     AVStream *subtitle_st;
187     PacketQueue subtitleq;
188     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
189     int subpq_size, subpq_rindex, subpq_windex;
190     SDL_mutex *subpq_mutex;
191     SDL_cond *subpq_cond;
192
193     double frame_timer;
194     double frame_last_pts;
195     double frame_last_delay;
196     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
197     int video_stream;
198     AVStream *video_st;
199     PacketQueue videoq;
200     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
201     double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
202     int64_t video_current_pos;      // current displayed file pos
203     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204     int pictq_size, pictq_rindex, pictq_windex;
205     SDL_mutex *pictq_mutex;
206     SDL_cond *pictq_cond;
207 #if !CONFIG_AVFILTER
208     struct SwsContext *img_convert_ctx;
209 #endif
210
211     //    QETimer *video_timer;
212     char filename[1024];
213     int width, height, xleft, ytop;
214
215     PtsCorrectionContext pts_ctx;
216
217 #if CONFIG_AVFILTER
218     AVFilterContext *in_video_filter;   // the first filter in the video chain
219     AVFilterContext *out_video_filter;  // the last filter in the video chain
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int step = 0;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts = -1;
258 static int noautoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop = 1;
262 static int framedrop = 1;
263 static int infinite_buffer = 0;
264
265 static int rdftspeed = 20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269 static int autorotate = 1;
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_free_packet(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     /* duplicate the packet */
324     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
325         return -1;
326
327     pkt1 = av_malloc(sizeof(AVPacketList));
328     if (!pkt1)
329         return -1;
330     pkt1->pkt = *pkt;
331     pkt1->next = NULL;
332
333
334     SDL_LockMutex(q->mutex);
335
336     if (!q->last_pkt)
337
338         q->first_pkt = pkt1;
339     else
340         q->last_pkt->next = pkt1;
341     q->last_pkt = pkt1;
342     q->nb_packets++;
343     q->size += pkt1->pkt.size + sizeof(*pkt1);
344     /* XXX: should duplicate packet data in DV case */
345     SDL_CondSignal(q->cond);
346
347     SDL_UnlockMutex(q->mutex);
348     return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for (;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414     unsigned int v = ((const uint32_t *)(s))[0];\
415     a = (v >> 24) & 0xff;\
416     r = (v >> 16) & 0xff;\
417     g = (v >> 8) & 0xff;\
418     b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424     a = (val >> 24) & 0xff;\
425     y = (val >> 16) & 0xff;\
426     u = (val >> 8) & 0xff;\
427     v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 {
440     int wrap, wrap3, width2, skip2;
441     int y, u, v, a, u1, v1, a1, w, h;
442     uint8_t *lum, *cb, *cr;
443     const uint8_t *p;
444     const uint32_t *pal;
445     int dstx, dsty, dstw, dsth;
446
447     dstw = av_clip(rect->w, 0, imgw);
448     dsth = av_clip(rect->h, 0, imgh);
449     dstx = av_clip(rect->x, 0, imgw - dstw);
450     dsty = av_clip(rect->y, 0, imgh - dsth);
451     lum = dst->data[0] + dsty * dst->linesize[0];
452     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
453     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454
455     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
456     skip2 = dstx >> 1;
457     wrap = dst->linesize[0];
458     wrap3 = rect->pict.linesize[0];
459     p = rect->pict.data[0];
460     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
461
462     if (dsty & 1) {
463         lum += dstx;
464         cb += skip2;
465         cr += skip2;
466
467         if (dstx & 1) {
468             YUVA_IN(y, u, v, a, p, pal);
469             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
471             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
472             cb++;
473             cr++;
474             lum++;
475             p += BPP;
476         }
477         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
478             YUVA_IN(y, u, v, a, p, pal);
479             u1 = u;
480             v1 = v;
481             a1 = a;
482             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
484             YUVA_IN(y, u, v, a, p + BPP, pal);
485             u1 += u;
486             v1 += v;
487             a1 += a;
488             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
489             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
490             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
491             cb++;
492             cr++;
493             p += 2 * BPP;
494             lum += 2;
495         }
496         if (w) {
497             YUVA_IN(y, u, v, a, p, pal);
498             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
500             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
501             p++;
502             lum++;
503         }
504         p += wrap3 - dstw * BPP;
505         lum += wrap - dstw - dstx;
506         cb += dst->linesize[1] - width2 - skip2;
507         cr += dst->linesize[2] - width2 - skip2;
508     }
509     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
510         lum += dstx;
511         cb += skip2;
512         cr += skip2;
513
514         if (dstx & 1) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             p += wrap3;
521             lum += wrap;
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += -wrap3 + BPP;
532             lum += -wrap + 1;
533         }
534         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
535             YUVA_IN(y, u, v, a, p, pal);
536             u1 = u;
537             v1 = v;
538             a1 = a;
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541             YUVA_IN(y, u, v, a, p + BPP, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546             p += wrap3;
547             lum += wrap;
548
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555             YUVA_IN(y, u, v, a, p + BPP, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560
561             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
562             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563
564             cb++;
565             cr++;
566             p += -wrap3 + 2 * BPP;
567             lum += -wrap + 2;
568         }
569         if (w) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             p += wrap3;
576             lum += wrap;
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 += u;
579             v1 += v;
580             a1 += a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
583             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
584             cb++;
585             cr++;
586             p += -wrap3 + BPP;
587             lum += -wrap + 1;
588         }
589         p += wrap3 + (wrap3 - dstw * BPP);
590         lum += wrap + (wrap - dstw - dstx);
591         cb += dst->linesize[1] - width2 - skip2;
592         cr += dst->linesize[2] - width2 - skip2;
593     }
594     /* handle odd height */
595     if (h) {
596         lum += dstx;
597         cb += skip2;
598         cr += skip2;
599
600         if (dstx & 1) {
601             YUVA_IN(y, u, v, a, p, pal);
602             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605             cb++;
606             cr++;
607             lum++;
608             p += BPP;
609         }
610         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
617             YUVA_IN(y, u, v, a, p + BPP, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
624             cb++;
625             cr++;
626             p += 2 * BPP;
627             lum += 2;
628         }
629         if (w) {
630             YUVA_IN(y, u, v, a, p, pal);
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
633             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634         }
635     }
636 }
637
638 static void free_subpicture(SubPicture *sp)
639 {
640     avsubtitle_free(&sp->sub);
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645     VideoPicture *vp;
646     SubPicture *sp;
647     AVPicture pict;
648     float aspect_ratio;
649     int width, height, x, y;
650     SDL_Rect rect;
651     int i;
652
653     vp = &is->pictq[is->pictq_rindex];
654     if (vp->bmp) {
655 #if CONFIG_AVFILTER
656          if (!vp->sar.num)
657              aspect_ratio = 0;
658          else
659              aspect_ratio = av_q2d(vp->sar);
660 #else
661
662         /* XXX: use variable in the frame */
663         if (is->video_st->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665         else if (is->video_st->codec->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667         else
668             aspect_ratio = 0;
669 #endif
670         if (aspect_ratio <= 0.0)
671             aspect_ratio = 1.0;
672         aspect_ratio *= (float)vp->width / (float)vp->height;
673
674         if (is->subtitle_st)
675         {
676             if (is->subpq_size > 0)
677             {
678                 sp = &is->subpq[is->subpq_rindex];
679
680                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
681                 {
682                     SDL_LockYUVOverlay (vp->bmp);
683
684                     pict.data[0] = vp->bmp->pixels[0];
685                     pict.data[1] = vp->bmp->pixels[2];
686                     pict.data[2] = vp->bmp->pixels[1];
687
688                     pict.linesize[0] = vp->bmp->pitches[0];
689                     pict.linesize[1] = vp->bmp->pitches[2];
690                     pict.linesize[2] = vp->bmp->pitches[1];
691
692                     for (i = 0; i < sp->sub.num_rects; i++)
693                         blend_subrect(&pict, sp->sub.rects[i],
694                                       vp->bmp->w, vp->bmp->h);
695
696                     SDL_UnlockYUVOverlay (vp->bmp);
697                 }
698             }
699         }
700
701
702         /* XXX: we suppose the screen has a 1.0 pixel ratio */
703         height = is->height;
704         width = ((int)rint(height * aspect_ratio)) & ~1;
705         if (width > is->width) {
706             width = is->width;
707             height = ((int)rint(width / aspect_ratio)) & ~1;
708         }
709         x = (is->width - width) / 2;
710         y = (is->height - height) / 2;
711         is->no_background = 0;
712         rect.x = is->xleft + x;
713         rect.y = is->ytop  + y;
714         rect.w = width;
715         rect.h = height;
716         SDL_DisplayYUVOverlay(vp->bmp, &rect);
717     }
718 }
719
720 /* get the current audio output buffer size, in samples. With SDL, we
721    cannot have a precise information */
722 static int audio_write_get_buf_size(VideoState *is)
723 {
724     return is->audio_buf_size - is->audio_buf_index;
725 }
726
727 static inline int compute_mod(int a, int b)
728 {
729     a = a % b;
730     if (a >= 0)
731         return a;
732     else
733         return a + b;
734 }
735
736 static void video_audio_display(VideoState *s)
737 {
738     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
739     int ch, channels, h, h2, bgcolor, fgcolor;
740     int16_t time_diff;
741     int rdft_bits, nb_freq;
742
743     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
744         ;
745     nb_freq = 1 << (rdft_bits - 1);
746
747     /* compute display index : center on currently output samples */
748     channels = s->sdl_channels;
749     nb_display_channels = channels;
750     if (!s->paused) {
751         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
752         n = 2 * channels;
753         delay = audio_write_get_buf_size(s);
754         delay /= n;
755
756         /* to be more precise, we take into account the time spent since
757            the last buffer computation */
758         if (audio_callback_time) {
759             time_diff = av_gettime_relative() - audio_callback_time;
760             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
761         }
762
763         delay += 2 * data_used;
764         if (delay < data_used)
765             delay = data_used;
766
767         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
768         if (s->show_audio == 1) {
769             h = INT_MIN;
770             for (i = 0; i < 1000; i += channels) {
771                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
772                 int a = s->sample_array[idx];
773                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
774                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
775                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
776                 int score = a - d;
777                 if (h < score && (b ^ c) < 0) {
778                     h = score;
779                     i_start = idx;
780                 }
781             }
782         }
783
784         s->last_i_start = i_start;
785     } else {
786         i_start = s->last_i_start;
787     }
788
789     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
790     if (s->show_audio == 1) {
791         fill_rectangle(screen,
792                        s->xleft, s->ytop, s->width, s->height,
793                        bgcolor);
794
795         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797         /* total height for one channel */
798         h = s->height / nb_display_channels;
799         /* graph height / 2 */
800         h2 = (h * 9) / 20;
801         for (ch = 0; ch < nb_display_channels; ch++) {
802             i = i_start + ch;
803             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804             for (x = 0; x < s->width; x++) {
805                 y = (s->sample_array[i] * h2) >> 15;
806                 if (y < 0) {
807                     y = -y;
808                     ys = y1 - y;
809                 } else {
810                     ys = y1;
811                 }
812                 fill_rectangle(screen,
813                                s->xleft + x, ys, 1, y,
814                                fgcolor);
815                 i += channels;
816                 if (i >= SAMPLE_ARRAY_SIZE)
817                     i -= SAMPLE_ARRAY_SIZE;
818             }
819         }
820
821         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823         for (ch = 1; ch < nb_display_channels; ch++) {
824             y = s->ytop + ch * h;
825             fill_rectangle(screen,
826                            s->xleft, y, s->width, 1,
827                            fgcolor);
828         }
829         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830     } else {
831         nb_display_channels= FFMIN(nb_display_channels, 2);
832         if (rdft_bits != s->rdft_bits) {
833             av_rdft_end(s->rdft);
834             av_free(s->rdft_data);
835             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
836             s->rdft_bits = rdft_bits;
837             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
838         }
839         {
840             FFTSample *data[2];
841             for (ch = 0; ch < nb_display_channels; ch++) {
842                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
843                 i = i_start + ch;
844                 for (x = 0; x < 2 * nb_freq; x++) {
845                     double w = (x-nb_freq) * (1.0 / nb_freq);
846                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
847                     i += channels;
848                     if (i >= SAMPLE_ARRAY_SIZE)
849                         i -= SAMPLE_ARRAY_SIZE;
850                 }
851                 av_rdft_calc(s->rdft, data[ch]);
852             }
853             /* Least efficient way to do this, we should of course
854              * directly access it but it is more than fast enough. */
855             for (y = 0; y < s->height; y++) {
856                 double w = 1 / sqrt(nb_freq);
857                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
858                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
859                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
860                 a = FFMIN(a, 255);
861                 b = FFMIN(b, 255);
862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
863
864                 fill_rectangle(screen,
865                             s->xpos, s->height-y, 1, 1,
866                             fgcolor);
867             }
868         }
869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870         s->xpos++;
871         if (s->xpos >= s->width)
872             s->xpos= s->xleft;
873     }
874 }
875
876 static int video_open(VideoState *is)
877 {
878     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
879     int w,h;
880
881     if (is_full_screen) flags |= SDL_FULLSCREEN;
882     else                flags |= SDL_RESIZABLE;
883
884     if (is_full_screen && fs_screen_width) {
885         w = fs_screen_width;
886         h = fs_screen_height;
887     } else if (!is_full_screen && screen_width) {
888         w = screen_width;
889         h = screen_height;
890 #if CONFIG_AVFILTER
891     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
892         w = is->out_video_filter->inputs[0]->w;
893         h = is->out_video_filter->inputs[0]->h;
894 #else
895     } else if (is->video_st && is->video_st->codec->width) {
896         w = is->video_st->codec->width;
897         h = is->video_st->codec->height;
898 #endif
899     } else {
900         w = 640;
901         h = 480;
902     }
903     if (screen && is->width == screen->w && screen->w == w
904        && is->height== screen->h && screen->h == h)
905         return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911     screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913     if (!screen) {
914         fprintf(stderr, "SDL: could not set video mode - exiting\n");
915         return -1;
916     }
917     if (!window_title)
918         window_title = input_filename;
919     SDL_WM_SetCaption(window_title, window_title);
920
921     is->width  = screen->w;
922     is->height = screen->h;
923
924     return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930     if (!screen)
931         video_open(cur_stream);
932     if (is->audio_st && is->show_audio)
933         video_audio_display(is);
934     else if (is->video_st)
935         video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940     VideoState *is= opaque;
941     while (!is->abort_request) {
942         SDL_Event event;
943         event.type = FF_REFRESH_EVENT;
944         event.user.data1 = opaque;
945         if (!is->refresh) {
946             is->refresh = 1;
947             SDL_PushEvent(&event);
948         }
949         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950     }
951     return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957     double pts;
958     int hw_buf_size, bytes_per_sec;
959     pts = is->audio_clock;
960     hw_buf_size = audio_write_get_buf_size(is);
961     bytes_per_sec = 0;
962     if (is->audio_st) {
963         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
964                         av_get_bytes_per_sample(is->sdl_sample_fmt);
965     }
966     if (bytes_per_sec)
967         pts -= (double)hw_buf_size / bytes_per_sec;
968     return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974     if (is->paused) {
975         return is->video_current_pts;
976     } else {
977         return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
978     }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984     int64_t ti;
985     ti = av_gettime_relative();
986     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992     double val;
993
994     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995         if (is->video_st)
996             val = get_video_clock(is);
997         else
998             val = get_audio_clock(is);
999     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000         if (is->audio_st)
1001             val = get_audio_clock(is);
1002         else
1003             val = get_video_clock(is);
1004     } else {
1005         val = get_external_clock(is);
1006     }
1007     return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013     if (!is->seek_req) {
1014         is->seek_pos = pos;
1015         is->seek_rel = rel;
1016         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017         if (seek_by_bytes)
1018             is->seek_flags |= AVSEEK_FLAG_BYTE;
1019         is->seek_req = 1;
1020     }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026     if (is->paused) {
1027         is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028         if (is->read_pause_return != AVERROR(ENOSYS)) {
1029             is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
1030         }
1031         is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
1032     }
1033     is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038     double delay, sync_threshold, diff = 0;
1039
1040     /* compute nominal delay */
1041     delay = frame_current_pts - is->frame_last_pts;
1042     if (delay <= 0 || delay >= 10.0) {
1043         /* if incorrect delay, use previous one */
1044         delay = is->frame_last_delay;
1045     } else {
1046         is->frame_last_delay = delay;
1047     }
1048     is->frame_last_pts = frame_current_pts;
1049
1050     /* update delay to follow master synchronisation source */
1051     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053         /* if video is slave, we try to correct big delays by
1054            duplicating or deleting a frame */
1055         diff = get_video_clock(is) - get_master_clock(is);
1056
1057         /* skip or repeat frame. We take into account the
1058            delay to compute the threshold. I still don't know
1059            if it is the best guess */
1060         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062             if (diff <= -sync_threshold)
1063                 delay = 0;
1064             else if (diff >= sync_threshold)
1065                 delay = 2 * delay;
1066         }
1067     }
1068     is->frame_timer += delay;
1069
1070     av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071             delay, frame_current_pts, -diff);
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             // nothing to do, no picture to display in the que
1088         } else {
1089             double time = av_gettime_relative() / 1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if (time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if (is->pictq_size > 1) {
1101                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             } else {
1105                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1106             }
1107             if (framedrop && time > next_target) {
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if (is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime_relative();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1211                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212             fflush(stdout);
1213             last_time = cur_time;
1214         }
1215     }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220     VideoPicture *vp;
1221     int i;
1222     /* XXX: use a special url_shutdown call to abort parse cleanly */
1223     is->abort_request = 1;
1224     SDL_WaitThread(is->parse_tid, NULL);
1225     SDL_WaitThread(is->refresh_tid, NULL);
1226
1227     /* free all pictures */
1228     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229         vp = &is->pictq[i];
1230         if (vp->bmp) {
1231             SDL_FreeYUVOverlay(vp->bmp);
1232             vp->bmp = NULL;
1233         }
1234     }
1235     SDL_DestroyMutex(is->pictq_mutex);
1236     SDL_DestroyCond(is->pictq_cond);
1237     SDL_DestroyMutex(is->subpq_mutex);
1238     SDL_DestroyCond(is->subpq_cond);
1239 #if !CONFIG_AVFILTER
1240     if (is->img_convert_ctx)
1241         sws_freeContext(is->img_convert_ctx);
1242 #endif
1243     av_free(is);
1244 }
1245
1246 static void do_exit(void)
1247 {
1248     if (cur_stream) {
1249         stream_close(cur_stream);
1250         cur_stream = NULL;
1251     }
1252     uninit_opts();
1253     avformat_network_deinit();
1254     if (show_status)
1255         printf("\n");
1256     SDL_Quit();
1257     av_log(NULL, AV_LOG_QUIET, "");
1258     exit(0);
1259 }
1260
1261 /* allocate a picture (needs to do that in main thread to avoid
1262    potential locking problems */
1263 static void alloc_picture(void *opaque)
1264 {
1265     VideoState *is = opaque;
1266     VideoPicture *vp;
1267
1268     vp = &is->pictq[is->pictq_windex];
1269
1270     if (vp->bmp)
1271         SDL_FreeYUVOverlay(vp->bmp);
1272
1273 #if CONFIG_AVFILTER
1274     vp->width   = is->out_video_filter->inputs[0]->w;
1275     vp->height  = is->out_video_filter->inputs[0]->h;
1276     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1277 #else
1278     vp->width   = is->video_st->codec->width;
1279     vp->height  = is->video_st->codec->height;
1280     vp->pix_fmt = is->video_st->codec->pix_fmt;
1281 #endif
1282
1283     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1284                                    SDL_YV12_OVERLAY,
1285                                    screen);
1286     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1287         /* SDL allocates a buffer smaller than requested if the video
1288          * overlay hardware is unable to support the requested size. */
1289         fprintf(stderr, "Error: the video system does not support an image\n"
1290                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1291                         "to reduce the image size.\n", vp->width, vp->height );
1292         do_exit();
1293     }
1294
1295     SDL_LockMutex(is->pictq_mutex);
1296     vp->allocated = 1;
1297     SDL_CondSignal(is->pictq_cond);
1298     SDL_UnlockMutex(is->pictq_mutex);
1299 }
1300
1301 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1302  * guessed if not known. */
1303 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1304 {
1305     VideoPicture *vp;
1306 #if CONFIG_AVFILTER
1307     AVPicture pict_src;
1308 #else
1309     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1310 #endif
1311     /* wait until we have space to put a new picture */
1312     SDL_LockMutex(is->pictq_mutex);
1313
1314     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1315         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1316
1317     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1318            !is->videoq.abort_request) {
1319         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1320     }
1321     SDL_UnlockMutex(is->pictq_mutex);
1322
1323     if (is->videoq.abort_request)
1324         return -1;
1325
1326     vp = &is->pictq[is->pictq_windex];
1327
1328     vp->sar = src_frame->sample_aspect_ratio;
1329
1330     /* alloc or resize hardware picture buffer */
1331     if (!vp->bmp || vp->reallocate ||
1332 #if CONFIG_AVFILTER
1333         vp->width  != is->out_video_filter->inputs[0]->w ||
1334         vp->height != is->out_video_filter->inputs[0]->h) {
1335 #else
1336         vp->width != is->video_st->codec->width ||
1337         vp->height != is->video_st->codec->height) {
1338 #endif
1339         SDL_Event event;
1340
1341         vp->allocated  = 0;
1342         vp->reallocate = 0;
1343
1344         /* the allocation must be done in the main thread to avoid
1345            locking problems */
1346         event.type = FF_ALLOC_EVENT;
1347         event.user.data1 = is;
1348         SDL_PushEvent(&event);
1349
1350         /* wait until the picture is allocated */
1351         SDL_LockMutex(is->pictq_mutex);
1352         while (!vp->allocated && !is->videoq.abort_request) {
1353             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1354         }
1355         SDL_UnlockMutex(is->pictq_mutex);
1356
1357         if (is->videoq.abort_request)
1358             return -1;
1359     }
1360
1361     /* if the frame is not skipped, then display it */
1362     if (vp->bmp) {
1363         AVPicture pict = { { 0 } };
1364
1365         /* get a pointer on the bitmap */
1366         SDL_LockYUVOverlay (vp->bmp);
1367
1368         pict.data[0] = vp->bmp->pixels[0];
1369         pict.data[1] = vp->bmp->pixels[2];
1370         pict.data[2] = vp->bmp->pixels[1];
1371
1372         pict.linesize[0] = vp->bmp->pitches[0];
1373         pict.linesize[1] = vp->bmp->pitches[2];
1374         pict.linesize[2] = vp->bmp->pitches[1];
1375
1376 #if CONFIG_AVFILTER
1377         pict_src.data[0] = src_frame->data[0];
1378         pict_src.data[1] = src_frame->data[1];
1379         pict_src.data[2] = src_frame->data[2];
1380
1381         pict_src.linesize[0] = src_frame->linesize[0];
1382         pict_src.linesize[1] = src_frame->linesize[1];
1383         pict_src.linesize[2] = src_frame->linesize[2];
1384
1385         // FIXME use direct rendering
1386         av_picture_copy(&pict, &pict_src,
1387                         vp->pix_fmt, vp->width, vp->height);
1388 #else
1389         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1390         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1391             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1392             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1393         if (!is->img_convert_ctx) {
1394             fprintf(stderr, "Cannot initialize the conversion context\n");
1395             exit(1);
1396         }
1397         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1398                   0, vp->height, pict.data, pict.linesize);
1399 #endif
1400         /* update the bitmap content */
1401         SDL_UnlockYUVOverlay(vp->bmp);
1402
1403         vp->pts = pts;
1404         vp->pos = pos;
1405
1406         /* now we can update the picture count */
1407         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1408             is->pictq_windex = 0;
1409         SDL_LockMutex(is->pictq_mutex);
1410         vp->target_clock = compute_target_time(vp->pts, is);
1411
1412         is->pictq_size++;
1413         SDL_UnlockMutex(is->pictq_mutex);
1414     }
1415     return 0;
1416 }
1417
1418 /* Compute the exact PTS for the picture if it is omitted in the stream.
1419  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1420 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1421 {
1422     double frame_delay, pts;
1423     int ret;
1424
1425     pts = pts1;
1426
1427     if (pts != 0) {
1428         /* update video clock with pts, if present */
1429         is->video_clock = pts;
1430     } else {
1431         pts = is->video_clock;
1432     }
1433     /* update video clock for next frame */
1434     frame_delay = av_q2d(is->video_st->codec->time_base);
1435     /* for MPEG2, the frame can be repeated, so we update the
1436        clock accordingly */
1437     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1438     is->video_clock += frame_delay;
1439
1440     ret = queue_picture(is, src_frame, pts, pos);
1441     av_frame_unref(src_frame);
1442     return ret;
1443 }
1444
1445 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1446 {
1447     int got_picture, i;
1448
1449     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1450         return -1;
1451
1452     if (pkt->data == flush_pkt.data) {
1453         avcodec_flush_buffers(is->video_st->codec);
1454
1455         SDL_LockMutex(is->pictq_mutex);
1456         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1457         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1458             is->pictq[i].target_clock= 0;
1459         }
1460         while (is->pictq_size && !is->videoq.abort_request) {
1461             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1462         }
1463         is->video_current_pos = -1;
1464         SDL_UnlockMutex(is->pictq_mutex);
1465
1466         init_pts_correction(&is->pts_ctx);
1467         is->frame_last_pts = AV_NOPTS_VALUE;
1468         is->frame_last_delay = 0;
1469         is->frame_timer = (double)av_gettime_relative() / 1000000.0;
1470         is->skip_frames = 1;
1471         is->skip_frames_index = 0;
1472         return 0;
1473     }
1474
1475     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1476
1477     if (got_picture) {
1478         if (decoder_reorder_pts == -1) {
1479             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1480         } else if (decoder_reorder_pts) {
1481             *pts = frame->pkt_pts;
1482         } else {
1483             *pts = frame->pkt_dts;
1484         }
1485
1486         if (*pts == AV_NOPTS_VALUE) {
1487             *pts = 0;
1488         }
1489         if (is->video_st->sample_aspect_ratio.num) {
1490             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1491         }
1492
1493         is->skip_frames_index += 1;
1494         if (is->skip_frames_index >= is->skip_frames) {
1495             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1496             return 1;
1497         }
1498         av_frame_unref(frame);
1499     }
1500     return 0;
1501 }
1502
1503 #if CONFIG_AVFILTER
1504 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1505 {
1506     char sws_flags_str[128];
1507     char buffersrc_args[256];
1508     int ret;
1509     AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
1510     AVCodecContext *codec = is->video_st->codec;
1511
1512     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1513     graph->scale_sws_opts = av_strdup(sws_flags_str);
1514
1515     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1516              codec->width, codec->height, codec->pix_fmt,
1517              is->video_st->time_base.num, is->video_st->time_base.den,
1518              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1519
1520
1521     if ((ret = avfilter_graph_create_filter(&filt_src,
1522                                             avfilter_get_by_name("buffer"),
1523                                             "src", buffersrc_args, NULL,
1524                                             graph)) < 0)
1525         return ret;
1526     if ((ret = avfilter_graph_create_filter(&filt_out,
1527                                             avfilter_get_by_name("buffersink"),
1528                                             "out", NULL, NULL, graph)) < 0)
1529         return ret;
1530
1531     last_filter = filt_out;
1532
1533 /* Note: this macro adds a filter before the lastly added filter, so the
1534  * processing order of the filters is in reverse */
1535 #define INSERT_FILT(name, arg) do {                                          \
1536     AVFilterContext *filt_ctx;                                               \
1537                                                                              \
1538     ret = avfilter_graph_create_filter(&filt_ctx,                            \
1539                                        avfilter_get_by_name(name),           \
1540                                        "avplay_" name, arg, NULL, graph);    \
1541     if (ret < 0)                                                             \
1542         return ret;                                                          \
1543                                                                              \
1544     ret = avfilter_link(filt_ctx, 0, last_filter, 0);                        \
1545     if (ret < 0)                                                             \
1546         return ret;                                                          \
1547                                                                              \
1548     last_filter = filt_ctx;                                                  \
1549 } while (0)
1550
1551     INSERT_FILT("format", "yuv420p");
1552
1553     if (autorotate) {
1554         uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
1555                                                          AV_PKT_DATA_DISPLAYMATRIX, NULL);
1556         if (displaymatrix) {
1557             double rot = av_display_rotation_get((int32_t*) displaymatrix);
1558             if (rot < -135 || rot > 135) {
1559                 INSERT_FILT("vflip", NULL);
1560                 INSERT_FILT("hflip", NULL);
1561             } else if (rot < -45) {
1562                 INSERT_FILT("transpose", "dir=clock");
1563             } else if (rot > 45) {
1564                 INSERT_FILT("transpose", "dir=cclock");
1565             }
1566         }
1567     }
1568
1569     if (vfilters) {
1570         AVFilterInOut *outputs = avfilter_inout_alloc();
1571         AVFilterInOut *inputs  = avfilter_inout_alloc();
1572
1573         outputs->name    = av_strdup("in");
1574         outputs->filter_ctx = filt_src;
1575         outputs->pad_idx = 0;
1576         outputs->next    = NULL;
1577
1578         inputs->name    = av_strdup("out");
1579         inputs->filter_ctx = last_filter;
1580         inputs->pad_idx = 0;
1581         inputs->next    = NULL;
1582
1583         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1584             return ret;
1585     } else {
1586         if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
1587             return ret;
1588     }
1589
1590     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1591         return ret;
1592
1593     is->in_video_filter  = filt_src;
1594     is->out_video_filter = filt_out;
1595
1596     return ret;
1597 }
1598
1599 #endif  /* CONFIG_AVFILTER */
1600
1601 static int video_thread(void *arg)
1602 {
1603     AVPacket pkt = { 0 };
1604     VideoState *is = arg;
1605     AVFrame *frame = av_frame_alloc();
1606     int64_t pts_int;
1607     double pts;
1608     int ret;
1609
1610 #if CONFIG_AVFILTER
1611     AVFilterGraph *graph = avfilter_graph_alloc();
1612     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1613     int last_w = is->video_st->codec->width;
1614     int last_h = is->video_st->codec->height;
1615     if (!graph) {
1616         av_frame_free(&frame);
1617         return AVERROR(ENOMEM);
1618     }
1619
1620     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1621         goto the_end;
1622     filt_in  = is->in_video_filter;
1623     filt_out = is->out_video_filter;
1624 #endif
1625
1626     if (!frame) {
1627 #if CONFIG_AVFILTER
1628         avfilter_graph_free(&graph);
1629 #endif
1630         return AVERROR(ENOMEM);
1631     }
1632
1633     for (;;) {
1634 #if CONFIG_AVFILTER
1635         AVRational tb;
1636 #endif
1637         while (is->paused && !is->videoq.abort_request)
1638             SDL_Delay(10);
1639
1640         av_free_packet(&pkt);
1641
1642         ret = get_video_frame(is, frame, &pts_int, &pkt);
1643         if (ret < 0)
1644             goto the_end;
1645
1646         if (!ret)
1647             continue;
1648
1649 #if CONFIG_AVFILTER
1650         if (   last_w != is->video_st->codec->width
1651             || last_h != is->video_st->codec->height) {
1652             av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1653                     is->video_st->codec->width, is->video_st->codec->height);
1654             avfilter_graph_free(&graph);
1655             graph = avfilter_graph_alloc();
1656             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1657                 goto the_end;
1658             filt_in  = is->in_video_filter;
1659             filt_out = is->out_video_filter;
1660             last_w = is->video_st->codec->width;
1661             last_h = is->video_st->codec->height;
1662         }
1663
1664         frame->pts = pts_int;
1665         ret = av_buffersrc_add_frame(filt_in, frame);
1666         if (ret < 0)
1667             goto the_end;
1668
1669         while (ret >= 0) {
1670             ret = av_buffersink_get_frame(filt_out, frame);
1671             if (ret < 0) {
1672                 ret = 0;
1673                 break;
1674             }
1675
1676             pts_int = frame->pts;
1677             tb      = filt_out->inputs[0]->time_base;
1678             if (av_cmp_q(tb, is->video_st->time_base)) {
1679                 av_unused int64_t pts1 = pts_int;
1680                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1681                 av_log(NULL, AV_LOG_TRACE, "video_thread(): "
1682                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1683                         tb.num, tb.den, pts1,
1684                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1685             }
1686             pts = pts_int * av_q2d(is->video_st->time_base);
1687             ret = output_picture2(is, frame, pts, 0);
1688         }
1689 #else
1690         pts = pts_int * av_q2d(is->video_st->time_base);
1691         ret = output_picture2(is, frame, pts,  pkt.pos);
1692 #endif
1693
1694         if (ret < 0)
1695             goto the_end;
1696
1697
1698         if (step)
1699             if (cur_stream)
1700                 stream_pause(cur_stream);
1701     }
1702  the_end:
1703 #if CONFIG_AVFILTER
1704     av_freep(&vfilters);
1705     avfilter_graph_free(&graph);
1706 #endif
1707     av_free_packet(&pkt);
1708     av_frame_free(&frame);
1709     return 0;
1710 }
1711
1712 static int subtitle_thread(void *arg)
1713 {
1714     VideoState *is = arg;
1715     SubPicture *sp;
1716     AVPacket pkt1, *pkt = &pkt1;
1717     int got_subtitle;
1718     double pts;
1719     int i, j;
1720     int r, g, b, y, u, v, a;
1721
1722     for (;;) {
1723         while (is->paused && !is->subtitleq.abort_request) {
1724             SDL_Delay(10);
1725         }
1726         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1727             break;
1728
1729         if (pkt->data == flush_pkt.data) {
1730             avcodec_flush_buffers(is->subtitle_st->codec);
1731             continue;
1732         }
1733         SDL_LockMutex(is->subpq_mutex);
1734         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1735                !is->subtitleq.abort_request) {
1736             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1737         }
1738         SDL_UnlockMutex(is->subpq_mutex);
1739
1740         if (is->subtitleq.abort_request)
1741             return 0;
1742
1743         sp = &is->subpq[is->subpq_windex];
1744
1745        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1746            this packet, if any */
1747         pts = 0;
1748         if (pkt->pts != AV_NOPTS_VALUE)
1749             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1750
1751         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1752                                  &got_subtitle, pkt);
1753
1754         if (got_subtitle && sp->sub.format == 0) {
1755             sp->pts = pts;
1756
1757             for (i = 0; i < sp->sub.num_rects; i++)
1758             {
1759                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1760                 {
1761                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1762                     y = RGB_TO_Y_CCIR(r, g, b);
1763                     u = RGB_TO_U_CCIR(r, g, b, 0);
1764                     v = RGB_TO_V_CCIR(r, g, b, 0);
1765                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1766                 }
1767             }
1768
1769             /* now we can update the picture count */
1770             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1771                 is->subpq_windex = 0;
1772             SDL_LockMutex(is->subpq_mutex);
1773             is->subpq_size++;
1774             SDL_UnlockMutex(is->subpq_mutex);
1775         }
1776         av_free_packet(pkt);
1777     }
1778     return 0;
1779 }
1780
1781 /* copy samples for viewing in editor window */
1782 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1783 {
1784     int size, len;
1785
1786     size = samples_size / sizeof(short);
1787     while (size > 0) {
1788         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1789         if (len > size)
1790             len = size;
1791         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1792         samples += len;
1793         is->sample_array_index += len;
1794         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1795             is->sample_array_index = 0;
1796         size -= len;
1797     }
1798 }
1799
1800 /* return the new audio buffer size (samples can be added or deleted
1801    to get better sync if video or external master clock) */
1802 static int synchronize_audio(VideoState *is, short *samples,
1803                              int samples_size1, double pts)
1804 {
1805     int n, samples_size;
1806     double ref_clock;
1807
1808     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1809     samples_size = samples_size1;
1810
1811     /* if not master, then we try to remove or add samples to correct the clock */
1812     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1813          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1814         double diff, avg_diff;
1815         int wanted_size, min_size, max_size, nb_samples;
1816
1817         ref_clock = get_master_clock(is);
1818         diff = get_audio_clock(is) - ref_clock;
1819
1820         if (diff < AV_NOSYNC_THRESHOLD) {
1821             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1822             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1823                 /* not enough measures to have a correct estimate */
1824                 is->audio_diff_avg_count++;
1825             } else {
1826                 /* estimate the A-V difference */
1827                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1828
1829                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1830                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1831                     nb_samples = samples_size / n;
1832
1833                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1834                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1835                     if (wanted_size < min_size)
1836                         wanted_size = min_size;
1837                     else if (wanted_size > max_size)
1838                         wanted_size = max_size;
1839
1840                     /* add or remove samples to correction the synchro */
1841                     if (wanted_size < samples_size) {
1842                         /* remove samples */
1843                         samples_size = wanted_size;
1844                     } else if (wanted_size > samples_size) {
1845                         uint8_t *samples_end, *q;
1846                         int nb;
1847
1848                         /* add samples */
1849                         nb = (samples_size - wanted_size);
1850                         samples_end = (uint8_t *)samples + samples_size - n;
1851                         q = samples_end + n;
1852                         while (nb > 0) {
1853                             memcpy(q, samples_end, n);
1854                             q += n;
1855                             nb -= n;
1856                         }
1857                         samples_size = wanted_size;
1858                     }
1859                 }
1860                 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1861                         diff, avg_diff, samples_size - samples_size1,
1862                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1863             }
1864         } else {
1865             /* too big difference : may be initial PTS errors, so
1866                reset A-V filter */
1867             is->audio_diff_avg_count = 0;
1868             is->audio_diff_cum       = 0;
1869         }
1870     }
1871
1872     return samples_size;
1873 }
1874
1875 /* decode one audio frame and returns its uncompressed size */
1876 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1877 {
1878     AVPacket *pkt_temp = &is->audio_pkt_temp;
1879     AVPacket *pkt = &is->audio_pkt;
1880     AVCodecContext *dec = is->audio_st->codec;
1881     int n, len1, data_size, got_frame;
1882     double pts;
1883     int new_packet = 0;
1884     int flush_complete = 0;
1885
1886     for (;;) {
1887         /* NOTE: the audio packet can contain several frames */
1888         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1889             int resample_changed, audio_resample;
1890
1891             if (!is->frame) {
1892                 if (!(is->frame = av_frame_alloc()))
1893                     return AVERROR(ENOMEM);
1894             }
1895
1896             if (flush_complete)
1897                 break;
1898             new_packet = 0;
1899             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1900             if (len1 < 0) {
1901                 /* if error, we skip the frame */
1902                 pkt_temp->size = 0;
1903                 break;
1904             }
1905
1906             pkt_temp->data += len1;
1907             pkt_temp->size -= len1;
1908
1909             if (!got_frame) {
1910                 /* stop sending empty packets if the decoder is finished */
1911                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1912                     flush_complete = 1;
1913                 continue;
1914             }
1915             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1916                                                    is->frame->nb_samples,
1917                                                    is->frame->format, 1);
1918
1919             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1920                              is->frame->channel_layout != is->sdl_channel_layout ||
1921                              is->frame->sample_rate    != is->sdl_sample_rate;
1922
1923             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1924                                is->frame->channel_layout != is->resample_channel_layout ||
1925                                is->frame->sample_rate    != is->resample_sample_rate;
1926
1927             if ((!is->avr && audio_resample) || resample_changed) {
1928                 int ret;
1929                 if (is->avr)
1930                     avresample_close(is->avr);
1931                 else if (audio_resample) {
1932                     is->avr = avresample_alloc_context();
1933                     if (!is->avr) {
1934                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1935                         break;
1936                     }
1937                 }
1938                 if (audio_resample) {
1939                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1940                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1941                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1942                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1943                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1944                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1945
1946                     if ((ret = avresample_open(is->avr)) < 0) {
1947                         fprintf(stderr, "error initializing libavresample\n");
1948                         break;
1949                     }
1950                 }
1951                 is->resample_sample_fmt     = is->frame->format;
1952                 is->resample_channel_layout = is->frame->channel_layout;
1953                 is->resample_sample_rate    = is->frame->sample_rate;
1954             }
1955
1956             if (audio_resample) {
1957                 void *tmp_out;
1958                 int out_samples, out_size, out_linesize;
1959                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1960                 int nb_samples = is->frame->nb_samples;
1961
1962                 out_size = av_samples_get_buffer_size(&out_linesize,
1963                                                       is->sdl_channels,
1964                                                       nb_samples,
1965                                                       is->sdl_sample_fmt, 0);
1966                 tmp_out = av_realloc(is->audio_buf1, out_size);
1967                 if (!tmp_out)
1968                     return AVERROR(ENOMEM);
1969                 is->audio_buf1 = tmp_out;
1970
1971                 out_samples = avresample_convert(is->avr,
1972                                                  &is->audio_buf1,
1973                                                  out_linesize, nb_samples,
1974                                                  is->frame->data,
1975                                                  is->frame->linesize[0],
1976                                                  is->frame->nb_samples);
1977                 if (out_samples < 0) {
1978                     fprintf(stderr, "avresample_convert() failed\n");
1979                     break;
1980                 }
1981                 is->audio_buf = is->audio_buf1;
1982                 data_size = out_samples * osize * is->sdl_channels;
1983             } else {
1984                 is->audio_buf = is->frame->data[0];
1985             }
1986
1987             /* if no pts, then compute it */
1988             pts = is->audio_clock;
1989             *pts_ptr = pts;
1990             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1991             is->audio_clock += (double)data_size /
1992                 (double)(n * is->sdl_sample_rate);
1993 #ifdef DEBUG
1994             {
1995                 static double last_clock;
1996                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1997                        is->audio_clock - last_clock,
1998                        is->audio_clock, pts);
1999                 last_clock = is->audio_clock;
2000             }
2001 #endif
2002             return data_size;
2003         }
2004
2005         /* free the current packet */
2006         if (pkt->data)
2007             av_free_packet(pkt);
2008         memset(pkt_temp, 0, sizeof(*pkt_temp));
2009
2010         if (is->paused || is->audioq.abort_request) {
2011             return -1;
2012         }
2013
2014         /* read next packet */
2015         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2016             return -1;
2017
2018         if (pkt->data == flush_pkt.data) {
2019             avcodec_flush_buffers(dec);
2020             flush_complete = 0;
2021         }
2022
2023         *pkt_temp = *pkt;
2024
2025         /* if update the audio clock with the pts */
2026         if (pkt->pts != AV_NOPTS_VALUE) {
2027             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2028         }
2029     }
2030 }
2031
2032 /* prepare a new audio buffer */
2033 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2034 {
2035     VideoState *is = opaque;
2036     int audio_size, len1;
2037     double pts;
2038
2039     audio_callback_time = av_gettime_relative();
2040
2041     while (len > 0) {
2042         if (is->audio_buf_index >= is->audio_buf_size) {
2043            audio_size = audio_decode_frame(is, &pts);
2044            if (audio_size < 0) {
2045                 /* if error, just output silence */
2046                is->audio_buf      = is->silence_buf;
2047                is->audio_buf_size = sizeof(is->silence_buf);
2048            } else {
2049                if (is->show_audio)
2050                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2051                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2052                                               pts);
2053                is->audio_buf_size = audio_size;
2054            }
2055            is->audio_buf_index = 0;
2056         }
2057         len1 = is->audio_buf_size - is->audio_buf_index;
2058         if (len1 > len)
2059             len1 = len;
2060         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2061         len -= len1;
2062         stream += len1;
2063         is->audio_buf_index += len1;
2064     }
2065 }
2066
2067 /* open a given stream. Return 0 if OK */
2068 static int stream_component_open(VideoState *is, int stream_index)
2069 {
2070     AVFormatContext *ic = is->ic;
2071     AVCodecContext *avctx;
2072     AVCodec *codec;
2073     SDL_AudioSpec wanted_spec, spec;
2074     AVDictionary *opts;
2075     AVDictionaryEntry *t = NULL;
2076     int ret = 0;
2077
2078     if (stream_index < 0 || stream_index >= ic->nb_streams)
2079         return -1;
2080     avctx = ic->streams[stream_index]->codec;
2081
2082     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2083
2084     codec = avcodec_find_decoder(avctx->codec_id);
2085     avctx->workaround_bugs   = workaround_bugs;
2086     avctx->idct_algo         = idct;
2087     avctx->skip_frame        = skip_frame;
2088     avctx->skip_idct         = skip_idct;
2089     avctx->skip_loop_filter  = skip_loop_filter;
2090     avctx->error_concealment = error_concealment;
2091
2092     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2093
2094     if (!av_dict_get(opts, "threads", NULL, 0))
2095         av_dict_set(&opts, "threads", "auto", 0);
2096     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2097         av_dict_set(&opts, "refcounted_frames", "1", 0);
2098     if (!codec ||
2099         (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2100         goto fail;
2101     }
2102     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2103         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2104         ret =  AVERROR_OPTION_NOT_FOUND;
2105         goto fail;
2106     }
2107
2108     /* prepare audio output */
2109     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2110         is->sdl_sample_rate = avctx->sample_rate;
2111
2112         if (!avctx->channel_layout)
2113             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2114         if (!avctx->channel_layout) {
2115             fprintf(stderr, "unable to guess channel layout\n");
2116             ret = AVERROR_INVALIDDATA;
2117             goto fail;
2118         }
2119         if (avctx->channels == 1)
2120             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2121         else
2122             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2123         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2124
2125         wanted_spec.format = AUDIO_S16SYS;
2126         wanted_spec.freq = is->sdl_sample_rate;
2127         wanted_spec.channels = is->sdl_channels;
2128         wanted_spec.silence = 0;
2129         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2130         wanted_spec.callback = sdl_audio_callback;
2131         wanted_spec.userdata = is;
2132         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2133             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2134             ret = AVERROR_UNKNOWN;
2135             goto fail;
2136         }
2137         is->audio_hw_buf_size = spec.size;
2138         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2139         is->resample_sample_fmt     = is->sdl_sample_fmt;
2140         is->resample_channel_layout = avctx->channel_layout;
2141         is->resample_sample_rate    = avctx->sample_rate;
2142     }
2143
2144     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2145     switch (avctx->codec_type) {
2146     case AVMEDIA_TYPE_AUDIO:
2147         is->audio_stream = stream_index;
2148         is->audio_st = ic->streams[stream_index];
2149         is->audio_buf_size  = 0;
2150         is->audio_buf_index = 0;
2151
2152         /* init averaging filter */
2153         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2154         is->audio_diff_avg_count = 0;
2155         /* since we do not have a precise anough audio fifo fullness,
2156            we correct audio sync only if larger than this threshold */
2157         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2158
2159         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2160         packet_queue_init(&is->audioq);
2161         SDL_PauseAudio(0);
2162         break;
2163     case AVMEDIA_TYPE_VIDEO:
2164         is->video_stream = stream_index;
2165         is->video_st = ic->streams[stream_index];
2166
2167         packet_queue_init(&is->videoq);
2168         is->video_tid = SDL_CreateThread(video_thread, is);
2169         break;
2170     case AVMEDIA_TYPE_SUBTITLE:
2171         is->subtitle_stream = stream_index;
2172         is->subtitle_st = ic->streams[stream_index];
2173         packet_queue_init(&is->subtitleq);
2174
2175         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2176         break;
2177     default:
2178         break;
2179     }
2180
2181 fail:
2182     av_dict_free(&opts);
2183
2184     return ret;
2185 }
2186
2187 static void stream_component_close(VideoState *is, int stream_index)
2188 {
2189     AVFormatContext *ic = is->ic;
2190     AVCodecContext *avctx;
2191
2192     if (stream_index < 0 || stream_index >= ic->nb_streams)
2193         return;
2194     avctx = ic->streams[stream_index]->codec;
2195
2196     switch (avctx->codec_type) {
2197     case AVMEDIA_TYPE_AUDIO:
2198         packet_queue_abort(&is->audioq);
2199
2200         SDL_CloseAudio();
2201
2202         packet_queue_end(&is->audioq);
2203         av_free_packet(&is->audio_pkt);
2204         if (is->avr)
2205             avresample_free(&is->avr);
2206         av_freep(&is->audio_buf1);
2207         is->audio_buf = NULL;
2208         av_frame_free(&is->frame);
2209
2210         if (is->rdft) {
2211             av_rdft_end(is->rdft);
2212             av_freep(&is->rdft_data);
2213             is->rdft = NULL;
2214             is->rdft_bits = 0;
2215         }
2216         break;
2217     case AVMEDIA_TYPE_VIDEO:
2218         packet_queue_abort(&is->videoq);
2219
2220         /* note: we also signal this mutex to make sure we deblock the
2221            video thread in all cases */
2222         SDL_LockMutex(is->pictq_mutex);
2223         SDL_CondSignal(is->pictq_cond);
2224         SDL_UnlockMutex(is->pictq_mutex);
2225
2226         SDL_WaitThread(is->video_tid, NULL);
2227
2228         packet_queue_end(&is->videoq);
2229         break;
2230     case AVMEDIA_TYPE_SUBTITLE:
2231         packet_queue_abort(&is->subtitleq);
2232
2233         /* note: we also signal this mutex to make sure we deblock the
2234            video thread in all cases */
2235         SDL_LockMutex(is->subpq_mutex);
2236         is->subtitle_stream_changed = 1;
2237
2238         SDL_CondSignal(is->subpq_cond);
2239         SDL_UnlockMutex(is->subpq_mutex);
2240
2241         SDL_WaitThread(is->subtitle_tid, NULL);
2242
2243         packet_queue_end(&is->subtitleq);
2244         break;
2245     default:
2246         break;
2247     }
2248
2249     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2250     avcodec_close(avctx);
2251     switch (avctx->codec_type) {
2252     case AVMEDIA_TYPE_AUDIO:
2253         is->audio_st = NULL;
2254         is->audio_stream = -1;
2255         break;
2256     case AVMEDIA_TYPE_VIDEO:
2257         is->video_st = NULL;
2258         is->video_stream = -1;
2259         break;
2260     case AVMEDIA_TYPE_SUBTITLE:
2261         is->subtitle_st = NULL;
2262         is->subtitle_stream = -1;
2263         break;
2264     default:
2265         break;
2266     }
2267 }
2268
2269 /* since we have only one decoding thread, we can use a global
2270    variable instead of a thread local variable */
2271 static VideoState *global_video_state;
2272
2273 static int decode_interrupt_cb(void *ctx)
2274 {
2275     return global_video_state && global_video_state->abort_request;
2276 }
2277
2278 /* this thread gets the stream from the disk or the network */
2279 static int decode_thread(void *arg)
2280 {
2281     VideoState *is = arg;
2282     AVFormatContext *ic = NULL;
2283     int err, i, ret;
2284     int st_index[AVMEDIA_TYPE_NB];
2285     AVPacket pkt1, *pkt = &pkt1;
2286     int eof = 0;
2287     int pkt_in_play_range = 0;
2288     AVDictionaryEntry *t;
2289     AVDictionary **opts;
2290     int orig_nb_streams;
2291
2292     memset(st_index, -1, sizeof(st_index));
2293     is->video_stream = -1;
2294     is->audio_stream = -1;
2295     is->subtitle_stream = -1;
2296
2297     global_video_state = is;
2298
2299     ic = avformat_alloc_context();
2300     if (!ic) {
2301         av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2302         ret = AVERROR(ENOMEM);
2303         goto fail;
2304     }
2305     ic->interrupt_callback.callback = decode_interrupt_cb;
2306     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2307     if (err < 0) {
2308         print_error(is->filename, err);
2309         ret = -1;
2310         goto fail;
2311     }
2312     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2313         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2314         ret = AVERROR_OPTION_NOT_FOUND;
2315         goto fail;
2316     }
2317     is->ic = ic;
2318
2319     if (genpts)
2320         ic->flags |= AVFMT_FLAG_GENPTS;
2321
2322     opts = setup_find_stream_info_opts(ic, codec_opts);
2323     orig_nb_streams = ic->nb_streams;
2324
2325     err = avformat_find_stream_info(ic, opts);
2326
2327     for (i = 0; i < orig_nb_streams; i++)
2328         av_dict_free(&opts[i]);
2329     av_freep(&opts);
2330
2331     if (err < 0) {
2332         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2333         ret = -1;
2334         goto fail;
2335     }
2336
2337     if (ic->pb)
2338         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2339
2340     if (seek_by_bytes < 0)
2341         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2342
2343     /* if seeking requested, we execute it */
2344     if (start_time != AV_NOPTS_VALUE) {
2345         int64_t timestamp;
2346
2347         timestamp = start_time;
2348         /* add the stream start time */
2349         if (ic->start_time != AV_NOPTS_VALUE)
2350             timestamp += ic->start_time;
2351         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2352         if (ret < 0) {
2353             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2354                     is->filename, (double)timestamp / AV_TIME_BASE);
2355         }
2356     }
2357
2358     for (i = 0; i < ic->nb_streams; i++)
2359         ic->streams[i]->discard = AVDISCARD_ALL;
2360     if (!video_disable)
2361         st_index[AVMEDIA_TYPE_VIDEO] =
2362             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2363                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2364     if (!audio_disable)
2365         st_index[AVMEDIA_TYPE_AUDIO] =
2366             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2367                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2368                                 st_index[AVMEDIA_TYPE_VIDEO],
2369                                 NULL, 0);
2370     if (!video_disable)
2371         st_index[AVMEDIA_TYPE_SUBTITLE] =
2372             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2373                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2374                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2375                                  st_index[AVMEDIA_TYPE_AUDIO] :
2376                                  st_index[AVMEDIA_TYPE_VIDEO]),
2377                                 NULL, 0);
2378     if (show_status) {
2379         av_dump_format(ic, 0, is->filename, 0);
2380     }
2381
2382     /* open the streams */
2383     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2384         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2385     }
2386
2387     ret = -1;
2388     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2389         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2390     }
2391     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2392     if (ret < 0) {
2393         if (!display_disable)
2394             is->show_audio = 2;
2395     }
2396
2397     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2398         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2399     }
2400
2401     if (is->video_stream < 0 && is->audio_stream < 0) {
2402         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2403         ret = -1;
2404         goto fail;
2405     }
2406
2407     for (;;) {
2408         if (is->abort_request)
2409             break;
2410         if (is->paused != is->last_paused) {
2411             is->last_paused = is->paused;
2412             if (is->paused)
2413                 is->read_pause_return = av_read_pause(ic);
2414             else
2415                 av_read_play(ic);
2416         }
2417 #if CONFIG_RTSP_DEMUXER
2418         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2419             /* wait 10 ms to avoid trying to get another packet */
2420             /* XXX: horrible */
2421             SDL_Delay(10);
2422             continue;
2423         }
2424 #endif
2425         if (is->seek_req) {
2426             int64_t seek_target = is->seek_pos;
2427             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2428             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2429 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2430 //      of the seek_pos/seek_rel variables
2431
2432             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2433             if (ret < 0) {
2434                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2435             } else {
2436                 if (is->audio_stream >= 0) {
2437                     packet_queue_flush(&is->audioq);
2438                     packet_queue_put(&is->audioq, &flush_pkt);
2439                 }
2440                 if (is->subtitle_stream >= 0) {
2441                     packet_queue_flush(&is->subtitleq);
2442                     packet_queue_put(&is->subtitleq, &flush_pkt);
2443                 }
2444                 if (is->video_stream >= 0) {
2445                     packet_queue_flush(&is->videoq);
2446                     packet_queue_put(&is->videoq, &flush_pkt);
2447                 }
2448             }
2449             is->seek_req = 0;
2450             eof = 0;
2451         }
2452
2453         /* if the queue are full, no need to read more */
2454         if (!infinite_buffer &&
2455               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2456             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2457                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2458                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2459             /* wait 10 ms */
2460             SDL_Delay(10);
2461             continue;
2462         }
2463         if (eof) {
2464             if (is->video_stream >= 0) {
2465                 av_init_packet(pkt);
2466                 pkt->data = NULL;
2467                 pkt->size = 0;
2468                 pkt->stream_index = is->video_stream;
2469                 packet_queue_put(&is->videoq, pkt);
2470             }
2471             if (is->audio_stream >= 0 &&
2472                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2473                 av_init_packet(pkt);
2474                 pkt->data = NULL;
2475                 pkt->size = 0;
2476                 pkt->stream_index = is->audio_stream;
2477                 packet_queue_put(&is->audioq, pkt);
2478             }
2479             SDL_Delay(10);
2480             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2481                 if (loop != 1 && (!loop || --loop)) {
2482                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2483                 } else if (!noautoexit) {
2484                     ret = AVERROR_EOF;
2485                     goto fail;
2486                 }
2487             }
2488             continue;
2489         }
2490         ret = av_read_frame(ic, pkt);
2491         if (ret < 0) {
2492             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2493                 eof = 1;
2494             if (ic->pb && ic->pb->error)
2495                 break;
2496             SDL_Delay(100); /* wait for user event */
2497             continue;
2498         }
2499         /* check if packet is in play range specified by user, then queue, otherwise discard */
2500         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2501                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2502                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2503                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2504                 <= ((double)duration / 1000000);
2505         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2506             packet_queue_put(&is->audioq, pkt);
2507         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2508             packet_queue_put(&is->videoq, pkt);
2509         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2510             packet_queue_put(&is->subtitleq, pkt);
2511         } else {
2512             av_free_packet(pkt);
2513         }
2514     }
2515     /* wait until the end */
2516     while (!is->abort_request) {
2517         SDL_Delay(100);
2518     }
2519
2520     ret = 0;
2521  fail:
2522     /* disable interrupting */
2523     global_video_state = NULL;
2524
2525     /* close each stream */
2526     if (is->audio_stream >= 0)
2527         stream_component_close(is, is->audio_stream);
2528     if (is->video_stream >= 0)
2529         stream_component_close(is, is->video_stream);
2530     if (is->subtitle_stream >= 0)
2531         stream_component_close(is, is->subtitle_stream);
2532     if (is->ic) {
2533         avformat_close_input(&is->ic);
2534     }
2535
2536     if (ret != 0) {
2537         SDL_Event event;
2538
2539         event.type = FF_QUIT_EVENT;
2540         event.user.data1 = is;
2541         SDL_PushEvent(&event);
2542     }
2543     return 0;
2544 }
2545
2546 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2547 {
2548     VideoState *is;
2549
2550     is = av_mallocz(sizeof(VideoState));
2551     if (!is)
2552         return NULL;
2553     av_strlcpy(is->filename, filename, sizeof(is->filename));
2554     is->iformat = iformat;
2555     is->ytop    = 0;
2556     is->xleft   = 0;
2557
2558     /* start video display */
2559     is->pictq_mutex = SDL_CreateMutex();
2560     is->pictq_cond  = SDL_CreateCond();
2561
2562     is->subpq_mutex = SDL_CreateMutex();
2563     is->subpq_cond  = SDL_CreateCond();
2564
2565     is->av_sync_type = av_sync_type;
2566     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2567     if (!is->parse_tid) {
2568         av_free(is);
2569         return NULL;
2570     }
2571     return is;
2572 }
2573
2574 static void stream_cycle_channel(VideoState *is, int codec_type)
2575 {
2576     AVFormatContext *ic = is->ic;
2577     int start_index, stream_index;
2578     AVStream *st;
2579
2580     if (codec_type == AVMEDIA_TYPE_VIDEO)
2581         start_index = is->video_stream;
2582     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2583         start_index = is->audio_stream;
2584     else
2585         start_index = is->subtitle_stream;
2586     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2587         return;
2588     stream_index = start_index;
2589     for (;;) {
2590         if (++stream_index >= is->ic->nb_streams)
2591         {
2592             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2593             {
2594                 stream_index = -1;
2595                 goto the_end;
2596             } else
2597                 stream_index = 0;
2598         }
2599         if (stream_index == start_index)
2600             return;
2601         st = ic->streams[stream_index];
2602         if (st->codec->codec_type == codec_type) {
2603             /* check that parameters are OK */
2604             switch (codec_type) {
2605             case AVMEDIA_TYPE_AUDIO:
2606                 if (st->codec->sample_rate != 0 &&
2607                     st->codec->channels != 0)
2608                     goto the_end;
2609                 break;
2610             case AVMEDIA_TYPE_VIDEO:
2611             case AVMEDIA_TYPE_SUBTITLE:
2612                 goto the_end;
2613             default:
2614                 break;
2615             }
2616         }
2617     }
2618  the_end:
2619     stream_component_close(is, start_index);
2620     stream_component_open(is, stream_index);
2621 }
2622
2623
2624 static void toggle_full_screen(void)
2625 {
2626 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2627     /* OS X needs to empty the picture_queue */
2628     int i;
2629     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2630         cur_stream->pictq[i].reallocate = 1;
2631 #endif
2632     is_full_screen = !is_full_screen;
2633     video_open(cur_stream);
2634 }
2635
2636 static void toggle_pause(void)
2637 {
2638     if (cur_stream)
2639         stream_pause(cur_stream);
2640     step = 0;
2641 }
2642
2643 static void step_to_next_frame(void)
2644 {
2645     if (cur_stream) {
2646         /* if the stream is paused unpause it, then step */
2647         if (cur_stream->paused)
2648             stream_pause(cur_stream);
2649     }
2650     step = 1;
2651 }
2652
2653 static void toggle_audio_display(void)
2654 {
2655     if (cur_stream) {
2656         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2657         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2658         fill_rectangle(screen,
2659                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2660                        bgcolor);
2661         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2662     }
2663 }
2664
2665 static void seek_chapter(VideoState *is, int incr)
2666 {
2667     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2668     int i;
2669
2670     if (!is->ic->nb_chapters)
2671         return;
2672
2673     /* find the current chapter */
2674     for (i = 0; i < is->ic->nb_chapters; i++) {
2675         AVChapter *ch = is->ic->chapters[i];
2676         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2677             i--;
2678             break;
2679         }
2680     }
2681
2682     i += incr;
2683     i = FFMAX(i, 0);
2684     if (i >= is->ic->nb_chapters)
2685         return;
2686
2687     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2688     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2689                                  AV_TIME_BASE_Q), 0, 0);
2690 }
2691
2692 /* handle an event sent by the GUI */
2693 static void event_loop(void)
2694 {
2695     SDL_Event event;
2696     double incr, pos, frac;
2697
2698     for (;;) {
2699         double x;
2700         SDL_WaitEvent(&event);
2701         switch (event.type) {
2702         case SDL_KEYDOWN:
2703             if (exit_on_keydown) {
2704                 do_exit();
2705                 break;
2706             }
2707             switch (event.key.keysym.sym) {
2708             case SDLK_ESCAPE:
2709             case SDLK_q:
2710                 do_exit();
2711                 break;
2712             case SDLK_f:
2713                 toggle_full_screen();
2714                 break;
2715             case SDLK_p:
2716             case SDLK_SPACE:
2717                 toggle_pause();
2718                 break;
2719             case SDLK_s: // S: Step to next frame
2720                 step_to_next_frame();
2721                 break;
2722             case SDLK_a:
2723                 if (cur_stream)
2724                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2725                 break;
2726             case SDLK_v:
2727                 if (cur_stream)
2728                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2729                 break;
2730             case SDLK_t:
2731                 if (cur_stream)
2732                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2733                 break;
2734             case SDLK_w:
2735                 toggle_audio_display();
2736                 break;
2737             case SDLK_PAGEUP:
2738                 seek_chapter(cur_stream, 1);
2739                 break;
2740             case SDLK_PAGEDOWN:
2741                 seek_chapter(cur_stream, -1);
2742                 break;
2743             case SDLK_LEFT:
2744                 incr = -10.0;
2745                 goto do_seek;
2746             case SDLK_RIGHT:
2747                 incr = 10.0;
2748                 goto do_seek;
2749             case SDLK_UP:
2750                 incr = 60.0;
2751                 goto do_seek;
2752             case SDLK_DOWN:
2753                 incr = -60.0;
2754             do_seek:
2755                 if (cur_stream) {
2756                     if (seek_by_bytes) {
2757                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2758                             pos = cur_stream->video_current_pos;
2759                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2760                             pos = cur_stream->audio_pkt.pos;
2761                         } else
2762                             pos = avio_tell(cur_stream->ic->pb);
2763                         if (cur_stream->ic->bit_rate)
2764                             incr *= cur_stream->ic->bit_rate / 8.0;
2765                         else
2766                             incr *= 180000.0;
2767                         pos += incr;
2768                         stream_seek(cur_stream, pos, incr, 1);
2769                     } else {
2770                         pos = get_master_clock(cur_stream);
2771                         pos += incr;
2772                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2773                     }
2774                 }
2775                 break;
2776             default:
2777                 break;
2778             }
2779             break;
2780         case SDL_MOUSEBUTTONDOWN:
2781             if (exit_on_mousedown) {
2782                 do_exit();
2783                 break;
2784             }
2785         case SDL_MOUSEMOTION:
2786             if (event.type == SDL_MOUSEBUTTONDOWN) {
2787                 x = event.button.x;
2788             } else {
2789                 if (event.motion.state != SDL_PRESSED)
2790                     break;
2791                 x = event.motion.x;
2792             }
2793             if (cur_stream) {
2794                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2795                     uint64_t size =  avio_size(cur_stream->ic->pb);
2796                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2797                 } else {
2798                     int64_t ts;
2799                     int ns, hh, mm, ss;
2800                     int tns, thh, tmm, tss;
2801                     tns  = cur_stream->ic->duration / 1000000LL;
2802                     thh  = tns / 3600;
2803                     tmm  = (tns % 3600) / 60;
2804                     tss  = (tns % 60);
2805                     frac = x / cur_stream->width;
2806                     ns   = frac * tns;
2807                     hh   = ns / 3600;
2808                     mm   = (ns % 3600) / 60;
2809                     ss   = (ns % 60);
2810                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2811                             hh, mm, ss, thh, tmm, tss);
2812                     ts = frac * cur_stream->ic->duration;
2813                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2814                         ts += cur_stream->ic->start_time;
2815                     stream_seek(cur_stream, ts, 0, 0);
2816                 }
2817             }
2818             break;
2819         case SDL_VIDEORESIZE:
2820             if (cur_stream) {
2821                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2822                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2823                 screen_width  = cur_stream->width  = event.resize.w;
2824                 screen_height = cur_stream->height = event.resize.h;
2825             }
2826             break;
2827         case SDL_QUIT:
2828         case FF_QUIT_EVENT:
2829             do_exit();
2830             break;
2831         case FF_ALLOC_EVENT:
2832             video_open(event.user.data1);
2833             alloc_picture(event.user.data1);
2834             break;
2835         case FF_REFRESH_EVENT:
2836             video_refresh_timer(event.user.data1);
2837             cur_stream->refresh = 0;
2838             break;
2839         default:
2840             break;
2841         }
2842     }
2843 }
2844
2845 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2846 {
2847     av_log(NULL, AV_LOG_ERROR,
2848            "Option '%s' has been removed, use private format options instead\n", opt);
2849     return AVERROR(EINVAL);
2850 }
2851
2852 static int opt_width(void *optctx, const char *opt, const char *arg)
2853 {
2854     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2855     return 0;
2856 }
2857
2858 static int opt_height(void *optctx, const char *opt, const char *arg)
2859 {
2860     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2861     return 0;
2862 }
2863
2864 static int opt_format(void *optctx, const char *opt, const char *arg)
2865 {
2866     file_iformat = av_find_input_format(arg);
2867     if (!file_iformat) {
2868         fprintf(stderr, "Unknown input format: %s\n", arg);
2869         return AVERROR(EINVAL);
2870     }
2871     return 0;
2872 }
2873
2874 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2875 {
2876     av_log(NULL, AV_LOG_ERROR,
2877            "Option '%s' has been removed, use private format options instead\n", opt);
2878     return AVERROR(EINVAL);
2879 }
2880
2881 static int opt_sync(void *optctx, const char *opt, const char *arg)
2882 {
2883     if (!strcmp(arg, "audio"))
2884         av_sync_type = AV_SYNC_AUDIO_MASTER;
2885     else if (!strcmp(arg, "video"))
2886         av_sync_type = AV_SYNC_VIDEO_MASTER;
2887     else if (!strcmp(arg, "ext"))
2888         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2889     else {
2890         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2891         exit(1);
2892     }
2893     return 0;
2894 }
2895
2896 static int opt_seek(void *optctx, const char *opt, const char *arg)
2897 {
2898     start_time = parse_time_or_die(opt, arg, 1);
2899     return 0;
2900 }
2901
2902 static int opt_duration(void *optctx, const char *opt, const char *arg)
2903 {
2904     duration = parse_time_or_die(opt, arg, 1);
2905     return 0;
2906 }
2907
2908 static const OptionDef options[] = {
2909 #include "cmdutils_common_opts.h"
2910     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2911     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2912     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2913     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2914     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2915     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2916     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2917     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2918     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2919     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2920     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2921     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2922     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2923     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2924     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2925     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2926     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2927     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2928     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2929     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2930     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2931     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2932     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2933     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2934     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2935     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2936     { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
2937     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2938     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2939     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2940     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2941     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2942     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2943 #if CONFIG_AVFILTER
2944     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2945 #endif
2946     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2947     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2948     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2949     { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
2950     { NULL, },
2951 };
2952
2953 static void show_usage(void)
2954 {
2955     printf("Simple media player\n");
2956     printf("usage: %s [options] input_file\n", program_name);
2957     printf("\n");
2958 }
2959
2960 void show_help_default(const char *opt, const char *arg)
2961 {
2962     av_log_set_callback(log_callback_help);
2963     show_usage();
2964     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2965     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2966     printf("\n");
2967     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2968     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2969 #if !CONFIG_AVFILTER
2970     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2971 #endif
2972     printf("\nWhile playing:\n"
2973            "q, ESC              quit\n"
2974            "f                   toggle full screen\n"
2975            "p, SPC              pause\n"
2976            "a                   cycle audio channel\n"
2977            "v                   cycle video channel\n"
2978            "t                   cycle subtitle channel\n"
2979            "w                   show audio waves\n"
2980            "s                   activate frame-step mode\n"
2981            "left/right          seek backward/forward 10 seconds\n"
2982            "down/up             seek backward/forward 1 minute\n"
2983            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2984            );
2985 }
2986
2987 static void opt_input_file(void *optctx, const char *filename)
2988 {
2989     if (input_filename) {
2990         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2991                 filename, input_filename);
2992         exit(1);
2993     }
2994     if (!strcmp(filename, "-"))
2995         filename = "pipe:";
2996     input_filename = filename;
2997 }
2998
2999 /* Called from the main */
3000 int main(int argc, char **argv)
3001 {
3002     int flags;
3003
3004     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3005     parse_loglevel(argc, argv, options);
3006
3007     /* register all codecs, demux and protocols */
3008     avcodec_register_all();
3009 #if CONFIG_AVDEVICE
3010     avdevice_register_all();
3011 #endif
3012 #if CONFIG_AVFILTER
3013     avfilter_register_all();
3014 #endif
3015     av_register_all();
3016     avformat_network_init();
3017
3018     init_opts();
3019
3020     show_banner();
3021
3022     parse_options(NULL, argc, argv, options, opt_input_file);
3023
3024     if (!input_filename) {
3025         show_usage();
3026         fprintf(stderr, "An input file must be specified\n");
3027         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3028         exit(1);
3029     }
3030
3031     if (display_disable) {
3032         video_disable = 1;
3033     }
3034     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3035 #if !defined(__MINGW32__) && !defined(__APPLE__)
3036     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3037 #endif
3038     if (SDL_Init (flags)) {
3039         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3040         exit(1);
3041     }
3042
3043     if (!display_disable) {
3044         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3045         fs_screen_width = vi->current_w;
3046         fs_screen_height = vi->current_h;
3047     }
3048
3049     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3050     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3051     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3052
3053     av_init_packet(&flush_pkt);
3054     flush_pkt.data = (uint8_t *)&flush_pkt;
3055
3056     cur_stream = stream_open(input_filename, file_iformat);
3057
3058     event_loop();
3059
3060     /* never returns */
3061
3062     return 0;
3063 }