]> git.sesse.net Git - ffmpeg/blob - ffplay.c
ffplay: remove VideoState from audio_open
[ffmpeg] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavutil/opt.h"
44 #include "libavcodec/avfft.h"
45 #include "libswresample/swresample.h"
46
47 #if CONFIG_AVFILTER
48 # include "libavfilter/avcodec.h"
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/avfiltergraph.h"
51 # include "libavfilter/buffersink.h"
52 #endif
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58
59 #include <unistd.h>
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     int64_t pos;                                 ///< byte position in file
103     int skip;
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     AVRational sample_aspect_ratio;
107     int allocated;
108     int reallocate;
109
110 #if CONFIG_AVFILTER
111     AVFilterBufferRef *picref;
112 #endif
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 typedef struct AudioParams {
121     int freq;
122     int channels;
123     int channel_layout;
124     enum AVSampleFormat fmt;
125 } AudioParams;
126
127 enum {
128     AV_SYNC_AUDIO_MASTER, /* default choice */
129     AV_SYNC_VIDEO_MASTER,
130     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
131 };
132
133 typedef struct VideoState {
134     SDL_Thread *read_tid;
135     SDL_Thread *video_tid;
136     SDL_Thread *refresh_tid;
137     AVInputFormat *iformat;
138     int no_background;
139     int abort_request;
140     int force_refresh;
141     int paused;
142     int last_paused;
143     int seek_req;
144     int seek_flags;
145     int64_t seek_pos;
146     int64_t seek_rel;
147     int read_pause_return;
148     AVFormatContext *ic;
149
150     int audio_stream;
151
152     int av_sync_type;
153     double external_clock; /* external clock base */
154     int64_t external_clock_time;
155
156     double audio_clock;
157     double audio_diff_cum; /* used for AV difference average computation */
158     double audio_diff_avg_coef;
159     double audio_diff_threshold;
160     int audio_diff_avg_count;
161     AVStream *audio_st;
162     PacketQueue audioq;
163     int audio_hw_buf_size;
164     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
165     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
166     uint8_t *audio_buf;
167     uint8_t *audio_buf1;
168     unsigned int audio_buf_size; /* in bytes */
169     int audio_buf_index; /* in bytes */
170     int audio_write_buf_size;
171     AVPacket audio_pkt_temp;
172     AVPacket audio_pkt;
173     struct AudioParams audio_src;
174     struct AudioParams audio_tgt;
175     struct SwrContext *swr_ctx;
176     double audio_current_pts;
177     double audio_current_pts_drift;
178     int frame_drops_early;
179     int frame_drops_late;
180     AVFrame *frame;
181
182     enum ShowMode {
183         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
184     } show_mode;
185     int16_t sample_array[SAMPLE_ARRAY_SIZE];
186     int sample_array_index;
187     int last_i_start;
188     RDFTContext *rdft;
189     int rdft_bits;
190     FFTSample *rdft_data;
191     int xpos;
192
193     SDL_Thread *subtitle_tid;
194     int subtitle_stream;
195     int subtitle_stream_changed;
196     AVStream *subtitle_st;
197     PacketQueue subtitleq;
198     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
199     int subpq_size, subpq_rindex, subpq_windex;
200     SDL_mutex *subpq_mutex;
201     SDL_cond *subpq_cond;
202
203     double frame_timer;
204     double frame_last_pts;
205     double frame_last_duration;
206     double frame_last_dropped_pts;
207     double frame_last_returned_time;
208     double frame_last_filter_delay;
209     int64_t frame_last_dropped_pos;
210     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
211     int video_stream;
212     AVStream *video_st;
213     PacketQueue videoq;
214     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
215     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
216     int64_t video_current_pos;                   ///< current displayed file pos
217     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
218     int pictq_size, pictq_rindex, pictq_windex;
219     SDL_mutex *pictq_mutex;
220     SDL_cond *pictq_cond;
221 #if !CONFIG_AVFILTER
222     struct SwsContext *img_convert_ctx;
223 #endif
224
225     char filename[1024];
226     int width, height, xleft, ytop;
227     int step;
228
229 #if CONFIG_AVFILTER
230     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
231 #endif
232
233     int refresh;
234     int last_video_stream, last_audio_stream, last_subtitle_stream;
235 } VideoState;
236
237 typedef struct AllocEventProps {
238     VideoState *is;
239     AVFrame *frame;
240 } AllocEventProps;
241
242 static int opt_help(const char *opt, const char *arg);
243
244 /* options specified by the user */
245 static AVInputFormat *file_iformat;
246 static const char *input_filename;
247 static const char *window_title;
248 static int fs_screen_width;
249 static int fs_screen_height;
250 static int screen_width  = 0;
251 static int screen_height = 0;
252 static int audio_disable;
253 static int video_disable;
254 static int wanted_stream[AVMEDIA_TYPE_NB] = {
255     [AVMEDIA_TYPE_AUDIO]    = -1,
256     [AVMEDIA_TYPE_VIDEO]    = -1,
257     [AVMEDIA_TYPE_SUBTITLE] = -1,
258 };
259 static int seek_by_bytes = -1;
260 static int display_disable;
261 static int show_status = 1;
262 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
263 static int64_t start_time = AV_NOPTS_VALUE;
264 static int64_t duration = AV_NOPTS_VALUE;
265 static int workaround_bugs = 1;
266 static int fast = 0;
267 static int genpts = 0;
268 static int lowres = 0;
269 static int idct = FF_IDCT_AUTO;
270 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
271 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
272 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
273 static int error_concealment = 3;
274 static int decoder_reorder_pts = -1;
275 static int autoexit;
276 static int exit_on_keydown;
277 static int exit_on_mousedown;
278 static int loop = 1;
279 static int framedrop = -1;
280 static enum ShowMode show_mode = SHOW_MODE_NONE;
281 static const char *audio_codec_name;
282 static const char *subtitle_codec_name;
283 static const char *video_codec_name;
284 static int rdftspeed = 20;
285 #if CONFIG_AVFILTER
286 static char *vfilters = NULL;
287 #endif
288
289 /* current context */
290 static int is_full_screen;
291 static int64_t audio_callback_time;
292
293 static AVPacket flush_pkt;
294
295 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
296 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
297 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
298
299 static SDL_Surface *screen;
300
301 void av_noreturn exit_program(int ret)
302 {
303     exit(ret);
304 }
305
306 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
307 {
308     AVPacketList *pkt1;
309
310     if (q->abort_request)
311        return -1;
312
313     pkt1 = av_malloc(sizeof(AVPacketList));
314     if (!pkt1)
315         return -1;
316     pkt1->pkt = *pkt;
317     pkt1->next = NULL;
318
319     if (!q->last_pkt)
320         q->first_pkt = pkt1;
321     else
322         q->last_pkt->next = pkt1;
323     q->last_pkt = pkt1;
324     q->nb_packets++;
325     q->size += pkt1->pkt.size + sizeof(*pkt1);
326     /* XXX: should duplicate packet data in DV case */
327     SDL_CondSignal(q->cond);
328     return 0;
329 }
330
331 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
332 {
333     int ret;
334
335     /* duplicate the packet */
336     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
337         return -1;
338
339     SDL_LockMutex(q->mutex);
340     ret = packet_queue_put_private(q, pkt);
341     SDL_UnlockMutex(q->mutex);
342
343     if (pkt != &flush_pkt && ret < 0)
344         av_free_packet(pkt);
345
346     return ret;
347 }
348
349 /* packet queue handling */
350 static void packet_queue_init(PacketQueue *q)
351 {
352     memset(q, 0, sizeof(PacketQueue));
353     q->mutex = SDL_CreateMutex();
354     q->cond = SDL_CreateCond();
355     q->abort_request = 1;
356 }
357
358 static void packet_queue_flush(PacketQueue *q)
359 {
360     AVPacketList *pkt, *pkt1;
361
362     SDL_LockMutex(q->mutex);
363     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
364         pkt1 = pkt->next;
365         av_free_packet(&pkt->pkt);
366         av_freep(&pkt);
367     }
368     q->last_pkt = NULL;
369     q->first_pkt = NULL;
370     q->nb_packets = 0;
371     q->size = 0;
372     SDL_UnlockMutex(q->mutex);
373 }
374
375 static void packet_queue_destroy(PacketQueue *q)
376 {
377     packet_queue_flush(q);
378     SDL_DestroyMutex(q->mutex);
379     SDL_DestroyCond(q->cond);
380 }
381
382 static void packet_queue_abort(PacketQueue *q)
383 {
384     SDL_LockMutex(q->mutex);
385
386     q->abort_request = 1;
387
388     SDL_CondSignal(q->cond);
389
390     SDL_UnlockMutex(q->mutex);
391 }
392
393 static void packet_queue_start(PacketQueue *q)
394 {
395     SDL_LockMutex(q->mutex);
396     q->abort_request = 0;
397     packet_queue_put_private(q, &flush_pkt);
398     SDL_UnlockMutex(q->mutex);
399 }
400
401 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
402 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
403 {
404     AVPacketList *pkt1;
405     int ret;
406
407     SDL_LockMutex(q->mutex);
408
409     for (;;) {
410         if (q->abort_request) {
411             ret = -1;
412             break;
413         }
414
415         pkt1 = q->first_pkt;
416         if (pkt1) {
417             q->first_pkt = pkt1->next;
418             if (!q->first_pkt)
419                 q->last_pkt = NULL;
420             q->nb_packets--;
421             q->size -= pkt1->pkt.size + sizeof(*pkt1);
422             *pkt = pkt1->pkt;
423             av_free(pkt1);
424             ret = 1;
425             break;
426         } else if (!block) {
427             ret = 0;
428             break;
429         } else {
430             SDL_CondWait(q->cond, q->mutex);
431         }
432     }
433     SDL_UnlockMutex(q->mutex);
434     return ret;
435 }
436
437 static inline void fill_rectangle(SDL_Surface *screen,
438                                   int x, int y, int w, int h, int color)
439 {
440     SDL_Rect rect;
441     rect.x = x;
442     rect.y = y;
443     rect.w = w;
444     rect.h = h;
445     SDL_FillRect(screen, &rect, color);
446 }
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694         if (vp->sample_aspect_ratio.num == 0)
695             aspect_ratio = 0;
696         else
697             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
698
699         if (aspect_ratio <= 0.0)
700             aspect_ratio = 1.0;
701         aspect_ratio *= (float)vp->width / (float)vp->height;
702
703         if (is->subtitle_st) {
704             if (is->subpq_size > 0) {
705                 sp = &is->subpq[is->subpq_rindex];
706
707                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
708                     SDL_LockYUVOverlay (vp->bmp);
709
710                     pict.data[0] = vp->bmp->pixels[0];
711                     pict.data[1] = vp->bmp->pixels[2];
712                     pict.data[2] = vp->bmp->pixels[1];
713
714                     pict.linesize[0] = vp->bmp->pitches[0];
715                     pict.linesize[1] = vp->bmp->pitches[2];
716                     pict.linesize[2] = vp->bmp->pitches[1];
717
718                     for (i = 0; i < sp->sub.num_rects; i++)
719                         blend_subrect(&pict, sp->sub.rects[i],
720                                       vp->bmp->w, vp->bmp->h);
721
722                     SDL_UnlockYUVOverlay (vp->bmp);
723                 }
724             }
725         }
726
727
728         /* XXX: we suppose the screen has a 1.0 pixel ratio */
729         height = is->height;
730         width = ((int)rint(height * aspect_ratio)) & ~1;
731         if (width > is->width) {
732             width = is->width;
733             height = ((int)rint(width / aspect_ratio)) & ~1;
734         }
735         x = (is->width - width) / 2;
736         y = (is->height - height) / 2;
737         is->no_background = 0;
738         rect.x = is->xleft + x;
739         rect.y = is->ytop  + y;
740         rect.w = FFMAX(width,  1);
741         rect.h = FFMAX(height, 1);
742         SDL_DisplayYUVOverlay(vp->bmp, &rect);
743     }
744 }
745
746 static inline int compute_mod(int a, int b)
747 {
748     return a < 0 ? a%b + b : a%b;
749 }
750
751 static void video_audio_display(VideoState *s)
752 {
753     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
754     int ch, channels, h, h2, bgcolor, fgcolor;
755     int16_t time_diff;
756     int rdft_bits, nb_freq;
757
758     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
759         ;
760     nb_freq = 1 << (rdft_bits - 1);
761
762     /* compute display index : center on currently output samples */
763     channels = s->audio_tgt.channels;
764     nb_display_channels = channels;
765     if (!s->paused) {
766         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
767         n = 2 * channels;
768         delay = s->audio_write_buf_size;
769         delay /= n;
770
771         /* to be more precise, we take into account the time spent since
772            the last buffer computation */
773         if (audio_callback_time) {
774             time_diff = av_gettime() - audio_callback_time;
775             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
776         }
777
778         delay += 2 * data_used;
779         if (delay < data_used)
780             delay = data_used;
781
782         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
783         if (s->show_mode == SHOW_MODE_WAVES) {
784             h = INT_MIN;
785             for (i = 0; i < 1000; i += channels) {
786                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
787                 int a = s->sample_array[idx];
788                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
789                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
790                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
791                 int score = a - d;
792                 if (h < score && (b ^ c) < 0) {
793                     h = score;
794                     i_start = idx;
795                 }
796             }
797         }
798
799         s->last_i_start = i_start;
800     } else {
801         i_start = s->last_i_start;
802     }
803
804     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
805     if (s->show_mode == SHOW_MODE_WAVES) {
806         fill_rectangle(screen,
807                        s->xleft, s->ytop, s->width, s->height,
808                        bgcolor);
809
810         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
811
812         /* total height for one channel */
813         h = s->height / nb_display_channels;
814         /* graph height / 2 */
815         h2 = (h * 9) / 20;
816         for (ch = 0; ch < nb_display_channels; ch++) {
817             i = i_start + ch;
818             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
819             for (x = 0; x < s->width; x++) {
820                 y = (s->sample_array[i] * h2) >> 15;
821                 if (y < 0) {
822                     y = -y;
823                     ys = y1 - y;
824                 } else {
825                     ys = y1;
826                 }
827                 fill_rectangle(screen,
828                                s->xleft + x, ys, 1, y,
829                                fgcolor);
830                 i += channels;
831                 if (i >= SAMPLE_ARRAY_SIZE)
832                     i -= SAMPLE_ARRAY_SIZE;
833             }
834         }
835
836         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
837
838         for (ch = 1; ch < nb_display_channels; ch++) {
839             y = s->ytop + ch * h;
840             fill_rectangle(screen,
841                            s->xleft, y, s->width, 1,
842                            fgcolor);
843         }
844         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
845     } else {
846         nb_display_channels= FFMIN(nb_display_channels, 2);
847         if (rdft_bits != s->rdft_bits) {
848             av_rdft_end(s->rdft);
849             av_free(s->rdft_data);
850             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
851             s->rdft_bits = rdft_bits;
852             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
853         }
854         {
855             FFTSample *data[2];
856             for (ch = 0; ch < nb_display_channels; ch++) {
857                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
858                 i = i_start + ch;
859                 for (x = 0; x < 2 * nb_freq; x++) {
860                     double w = (x-nb_freq) * (1.0 / nb_freq);
861                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
862                     i += channels;
863                     if (i >= SAMPLE_ARRAY_SIZE)
864                         i -= SAMPLE_ARRAY_SIZE;
865                 }
866                 av_rdft_calc(s->rdft, data[ch]);
867             }
868             // least efficient way to do this, we should of course directly access it but its more than fast enough
869             for (y = 0; y < s->height; y++) {
870                 double w = 1 / sqrt(nb_freq);
871                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
872                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
873                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
874                 a = FFMIN(a, 255);
875                 b = FFMIN(b, 255);
876                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
877
878                 fill_rectangle(screen,
879                             s->xpos, s->height-y, 1, 1,
880                             fgcolor);
881             }
882         }
883         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
884         if (!s->paused)
885             s->xpos++;
886         if (s->xpos >= s->width)
887             s->xpos= s->xleft;
888     }
889 }
890
891 static void stream_close(VideoState *is)
892 {
893     VideoPicture *vp;
894     int i;
895     /* XXX: use a special url_shutdown call to abort parse cleanly */
896     is->abort_request = 1;
897     SDL_WaitThread(is->read_tid, NULL);
898     SDL_WaitThread(is->refresh_tid, NULL);
899     packet_queue_destroy(&is->videoq);
900     packet_queue_destroy(&is->audioq);
901     packet_queue_destroy(&is->subtitleq);
902
903     /* free all pictures */
904     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
905         vp = &is->pictq[i];
906 #if CONFIG_AVFILTER
907         if (vp->picref) {
908             avfilter_unref_buffer(vp->picref);
909             vp->picref = NULL;
910         }
911 #endif
912         if (vp->bmp) {
913             SDL_FreeYUVOverlay(vp->bmp);
914             vp->bmp = NULL;
915         }
916     }
917     SDL_DestroyMutex(is->pictq_mutex);
918     SDL_DestroyCond(is->pictq_cond);
919     SDL_DestroyMutex(is->subpq_mutex);
920     SDL_DestroyCond(is->subpq_cond);
921 #if !CONFIG_AVFILTER
922     if (is->img_convert_ctx)
923         sws_freeContext(is->img_convert_ctx);
924 #endif
925     av_free(is);
926 }
927
928 static void do_exit(VideoState *is)
929 {
930     if (is) {
931         stream_close(is);
932     }
933     av_lockmgr_register(NULL);
934     uninit_opts();
935 #if CONFIG_AVFILTER
936     avfilter_uninit();
937 #endif
938     avformat_network_deinit();
939     if (show_status)
940         printf("\n");
941     SDL_Quit();
942     av_log(NULL, AV_LOG_QUIET, "%s", "");
943     exit(0);
944 }
945
946 static void sigterm_handler(int sig)
947 {
948     exit(123);
949 }
950
951 static int video_open(VideoState *is, int force_set_video_mode)
952 {
953     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
954     int w,h;
955     VideoPicture *vp = &is->pictq[is->pictq_rindex];
956
957     if (is_full_screen) flags |= SDL_FULLSCREEN;
958     else                flags |= SDL_RESIZABLE;
959
960     if (is_full_screen && fs_screen_width) {
961         w = fs_screen_width;
962         h = fs_screen_height;
963     } else if (!is_full_screen && screen_width) {
964         w = screen_width;
965         h = screen_height;
966     } else if (vp->width) {
967         w = vp->width;
968         h = vp->height;
969     } else {
970         w = 640;
971         h = 480;
972     }
973     if (screen && is->width == screen->w && screen->w == w
974        && is->height== screen->h && screen->h == h && !force_set_video_mode)
975         return 0;
976     screen = SDL_SetVideoMode(w, h, 0, flags);
977     if (!screen) {
978         fprintf(stderr, "SDL: could not set video mode - exiting\n");
979         do_exit(is);
980     }
981     if (!window_title)
982         window_title = input_filename;
983     SDL_WM_SetCaption(window_title, window_title);
984
985     is->width  = screen->w;
986     is->height = screen->h;
987
988     return 0;
989 }
990
991 /* display the current picture, if any */
992 static void video_display(VideoState *is)
993 {
994     if (!screen)
995         video_open(is, 0);
996     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
997         video_audio_display(is);
998     else if (is->video_st)
999         video_image_display(is);
1000 }
1001
1002 static int refresh_thread(void *opaque)
1003 {
1004     VideoState *is= opaque;
1005     while (!is->abort_request) {
1006         SDL_Event event;
1007         event.type = FF_REFRESH_EVENT;
1008         event.user.data1 = opaque;
1009         if (!is->refresh && (!is->paused || is->force_refresh)) {
1010             is->refresh = 1;
1011             SDL_PushEvent(&event);
1012         }
1013         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1014         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1015     }
1016     return 0;
1017 }
1018
1019 /* get the current audio clock value */
1020 static double get_audio_clock(VideoState *is)
1021 {
1022     if (is->paused) {
1023         return is->audio_current_pts;
1024     } else {
1025         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1026     }
1027 }
1028
1029 /* get the current video clock value */
1030 static double get_video_clock(VideoState *is)
1031 {
1032     if (is->paused) {
1033         return is->video_current_pts;
1034     } else {
1035         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1036     }
1037 }
1038
1039 /* get the current external clock value */
1040 static double get_external_clock(VideoState *is)
1041 {
1042     int64_t ti;
1043     ti = av_gettime();
1044     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1045 }
1046
1047 /* get the current master clock value */
1048 static double get_master_clock(VideoState *is)
1049 {
1050     double val;
1051
1052     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1053         if (is->video_st)
1054             val = get_video_clock(is);
1055         else
1056             val = get_audio_clock(is);
1057     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1058         if (is->audio_st)
1059             val = get_audio_clock(is);
1060         else
1061             val = get_video_clock(is);
1062     } else {
1063         val = get_external_clock(is);
1064     }
1065     return val;
1066 }
1067
1068 /* seek in the stream */
1069 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1070 {
1071     if (!is->seek_req) {
1072         is->seek_pos = pos;
1073         is->seek_rel = rel;
1074         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1075         if (seek_by_bytes)
1076             is->seek_flags |= AVSEEK_FLAG_BYTE;
1077         is->seek_req = 1;
1078     }
1079 }
1080
1081 /* pause or resume the video */
1082 static void stream_toggle_pause(VideoState *is)
1083 {
1084     if (is->paused) {
1085         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1086         if (is->read_pause_return != AVERROR(ENOSYS)) {
1087             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1088         }
1089         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1090     }
1091     is->paused = !is->paused;
1092 }
1093
1094 static double compute_target_delay(double delay, VideoState *is)
1095 {
1096     double sync_threshold, diff;
1097
1098     /* update delay to follow master synchronisation source */
1099     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1100          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1101         /* if video is slave, we try to correct big delays by
1102            duplicating or deleting a frame */
1103         diff = get_video_clock(is) - get_master_clock(is);
1104
1105         /* skip or repeat frame. We take into account the
1106            delay to compute the threshold. I still don't know
1107            if it is the best guess */
1108         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1109         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1110             if (diff <= -sync_threshold)
1111                 delay = 0;
1112             else if (diff >= sync_threshold)
1113                 delay = 2 * delay;
1114         }
1115     }
1116
1117     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1118             delay, -diff);
1119
1120     return delay;
1121 }
1122
1123 static void pictq_next_picture(VideoState *is) {
1124     /* update queue size and signal for next picture */
1125     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1126         is->pictq_rindex = 0;
1127
1128     SDL_LockMutex(is->pictq_mutex);
1129     is->pictq_size--;
1130     SDL_CondSignal(is->pictq_cond);
1131     SDL_UnlockMutex(is->pictq_mutex);
1132 }
1133
1134 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1135     double time = av_gettime() / 1000000.0;
1136     /* update current video pts */
1137     is->video_current_pts = pts;
1138     is->video_current_pts_drift = is->video_current_pts - time;
1139     is->video_current_pos = pos;
1140     is->frame_last_pts = pts;
1141 }
1142
1143 /* called to display each frame */
1144 static void video_refresh(void *opaque)
1145 {
1146     VideoState *is = opaque;
1147     VideoPicture *vp;
1148     double time;
1149
1150     SubPicture *sp, *sp2;
1151
1152     if (is->video_st) {
1153 retry:
1154         if (is->pictq_size == 0) {
1155             SDL_LockMutex(is->pictq_mutex);
1156             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1157                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1158                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1159             }
1160             SDL_UnlockMutex(is->pictq_mutex);
1161             // nothing to do, no picture to display in the que
1162         } else {
1163             double last_duration, duration, delay;
1164             /* dequeue the picture */
1165             vp = &is->pictq[is->pictq_rindex];
1166
1167             if (vp->skip) {
1168                 pictq_next_picture(is);
1169                 goto retry;
1170             }
1171
1172             if (is->paused)
1173                 goto display;
1174
1175             /* compute nominal last_duration */
1176             last_duration = vp->pts - is->frame_last_pts;
1177             if (last_duration > 0 && last_duration < 10.0) {
1178                 /* if duration of the last frame was sane, update last_duration in video state */
1179                 is->frame_last_duration = last_duration;
1180             }
1181             delay = compute_target_delay(is->frame_last_duration, is);
1182
1183             time= av_gettime()/1000000.0;
1184             if (time < is->frame_timer + delay)
1185                 return;
1186
1187             if (delay > 0)
1188                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1189
1190             SDL_LockMutex(is->pictq_mutex);
1191             update_video_pts(is, vp->pts, vp->pos);
1192             SDL_UnlockMutex(is->pictq_mutex);
1193
1194             if (is->pictq_size > 1) {
1195                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1196                 duration = nextvp->pts - vp->pts;
1197                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1198                     is->frame_drops_late++;
1199                     pictq_next_picture(is);
1200                     goto retry;
1201                 }
1202             }
1203
1204             if (is->subtitle_st) {
1205                 if (is->subtitle_stream_changed) {
1206                     SDL_LockMutex(is->subpq_mutex);
1207
1208                     while (is->subpq_size) {
1209                         free_subpicture(&is->subpq[is->subpq_rindex]);
1210
1211                         /* update queue size and signal for next picture */
1212                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1213                             is->subpq_rindex = 0;
1214
1215                         is->subpq_size--;
1216                     }
1217                     is->subtitle_stream_changed = 0;
1218
1219                     SDL_CondSignal(is->subpq_cond);
1220                     SDL_UnlockMutex(is->subpq_mutex);
1221                 } else {
1222                     if (is->subpq_size > 0) {
1223                         sp = &is->subpq[is->subpq_rindex];
1224
1225                         if (is->subpq_size > 1)
1226                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1227                         else
1228                             sp2 = NULL;
1229
1230                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1231                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1232                         {
1233                             free_subpicture(sp);
1234
1235                             /* update queue size and signal for next picture */
1236                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1237                                 is->subpq_rindex = 0;
1238
1239                             SDL_LockMutex(is->subpq_mutex);
1240                             is->subpq_size--;
1241                             SDL_CondSignal(is->subpq_cond);
1242                             SDL_UnlockMutex(is->subpq_mutex);
1243                         }
1244                     }
1245                 }
1246             }
1247
1248 display:
1249             /* display picture */
1250             if (!display_disable)
1251                 video_display(is);
1252
1253             if (!is->paused)
1254                 pictq_next_picture(is);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         if (!display_disable)
1264             video_display(is);
1265     }
1266     is->force_refresh = 0;
1267     if (show_status) {
1268         static int64_t last_time;
1269         int64_t cur_time;
1270         int aqsize, vqsize, sqsize;
1271         double av_diff;
1272
1273         cur_time = av_gettime();
1274         if (!last_time || (cur_time - last_time) >= 30000) {
1275             aqsize = 0;
1276             vqsize = 0;
1277             sqsize = 0;
1278             if (is->audio_st)
1279                 aqsize = is->audioq.size;
1280             if (is->video_st)
1281                 vqsize = is->videoq.size;
1282             if (is->subtitle_st)
1283                 sqsize = is->subtitleq.size;
1284             av_diff = 0;
1285             if (is->audio_st && is->video_st)
1286                 av_diff = get_audio_clock(is) - get_video_clock(is);
1287             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1288                    get_master_clock(is),
1289                    av_diff,
1290                    is->frame_drops_early + is->frame_drops_late,
1291                    aqsize / 1024,
1292                    vqsize / 1024,
1293                    sqsize,
1294                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1295                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1296             fflush(stdout);
1297             last_time = cur_time;
1298         }
1299     }
1300 }
1301
1302 /* allocate a picture (needs to do that in main thread to avoid
1303    potential locking problems */
1304 static void alloc_picture(AllocEventProps *event_props)
1305 {
1306     VideoState *is = event_props->is;
1307     AVFrame *frame = event_props->frame;
1308     VideoPicture *vp;
1309
1310     vp = &is->pictq[is->pictq_windex];
1311
1312     if (vp->bmp)
1313         SDL_FreeYUVOverlay(vp->bmp);
1314
1315 #if CONFIG_AVFILTER
1316     if (vp->picref)
1317         avfilter_unref_buffer(vp->picref);
1318     vp->picref = NULL;
1319 #endif
1320
1321     vp->width   = frame->width;
1322     vp->height  = frame->height;
1323
1324     video_open(event_props->is, 0);
1325
1326     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1327                                    SDL_YV12_OVERLAY,
1328                                    screen);
1329     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1330         /* SDL allocates a buffer smaller than requested if the video
1331          * overlay hardware is unable to support the requested size. */
1332         fprintf(stderr, "Error: the video system does not support an image\n"
1333                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1334                         "to reduce the image size.\n", vp->width, vp->height );
1335         do_exit(is);
1336     }
1337
1338     SDL_LockMutex(is->pictq_mutex);
1339     vp->allocated = 1;
1340     SDL_CondSignal(is->pictq_cond);
1341     SDL_UnlockMutex(is->pictq_mutex);
1342 }
1343
1344 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1345 {
1346     VideoPicture *vp;
1347     double frame_delay, pts = pts1;
1348
1349     /* compute the exact PTS for the picture if it is omitted in the stream
1350      * pts1 is the dts of the pkt / pts of the frame */
1351     if (pts != 0) {
1352         /* update video clock with pts, if present */
1353         is->video_clock = pts;
1354     } else {
1355         pts = is->video_clock;
1356     }
1357     /* update video clock for next frame */
1358     frame_delay = av_q2d(is->video_st->codec->time_base);
1359     /* for MPEG2, the frame can be repeated, so we update the
1360        clock accordingly */
1361     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1362     is->video_clock += frame_delay;
1363
1364 #if defined(DEBUG_SYNC) && 0
1365     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1366            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1367 #endif
1368
1369     /* wait until we have space to put a new picture */
1370     SDL_LockMutex(is->pictq_mutex);
1371
1372     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1373            !is->videoq.abort_request) {
1374         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1375     }
1376     SDL_UnlockMutex(is->pictq_mutex);
1377
1378     if (is->videoq.abort_request)
1379         return -1;
1380
1381     vp = &is->pictq[is->pictq_windex];
1382
1383     /* alloc or resize hardware picture buffer */
1384     if (!vp->bmp || vp->reallocate ||
1385         vp->width  != src_frame->width ||
1386         vp->height != src_frame->height) {
1387         SDL_Event event;
1388         AllocEventProps event_props;
1389
1390         event_props.frame = src_frame;
1391         event_props.is = is;
1392
1393         vp->allocated  = 0;
1394         vp->reallocate = 0;
1395
1396         /* the allocation must be done in the main thread to avoid
1397            locking problems. We wait in this block for the event to complete,
1398            so we can pass a pointer to event_props to it. */
1399         event.type = FF_ALLOC_EVENT;
1400         event.user.data1 = &event_props;
1401         SDL_PushEvent(&event);
1402
1403         /* wait until the picture is allocated */
1404         SDL_LockMutex(is->pictq_mutex);
1405         while (!vp->allocated && !is->videoq.abort_request) {
1406             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1407         }
1408         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1409         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1410             while (!vp->allocated) {
1411                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1412             }
1413         }
1414         SDL_UnlockMutex(is->pictq_mutex);
1415
1416         if (is->videoq.abort_request)
1417             return -1;
1418     }
1419
1420     /* if the frame is not skipped, then display it */
1421     if (vp->bmp) {
1422         AVPicture pict = { { 0 } };
1423 #if CONFIG_AVFILTER
1424         if (vp->picref)
1425             avfilter_unref_buffer(vp->picref);
1426         vp->picref = src_frame->opaque;
1427 #endif
1428
1429         /* get a pointer on the bitmap */
1430         SDL_LockYUVOverlay (vp->bmp);
1431
1432         pict.data[0] = vp->bmp->pixels[0];
1433         pict.data[1] = vp->bmp->pixels[2];
1434         pict.data[2] = vp->bmp->pixels[1];
1435
1436         pict.linesize[0] = vp->bmp->pitches[0];
1437         pict.linesize[1] = vp->bmp->pitches[2];
1438         pict.linesize[2] = vp->bmp->pitches[1];
1439
1440 #if CONFIG_AVFILTER
1441         // FIXME use direct rendering
1442         av_picture_copy(&pict, (AVPicture *)src_frame,
1443                         src_frame->format, vp->width, vp->height);
1444         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1445 #else
1446         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1447         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1448             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1449             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1450         if (is->img_convert_ctx == NULL) {
1451             fprintf(stderr, "Cannot initialize the conversion context\n");
1452             exit(1);
1453         }
1454         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1455                   0, vp->height, pict.data, pict.linesize);
1456         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1457 #endif
1458         /* update the bitmap content */
1459         SDL_UnlockYUVOverlay(vp->bmp);
1460
1461         vp->pts = pts;
1462         vp->pos = pos;
1463         vp->skip = 0;
1464
1465         /* now we can update the picture count */
1466         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1467             is->pictq_windex = 0;
1468         SDL_LockMutex(is->pictq_mutex);
1469         is->pictq_size++;
1470         SDL_UnlockMutex(is->pictq_mutex);
1471     }
1472     return 0;
1473 }
1474
1475 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1476 {
1477     int got_picture, i;
1478
1479     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1480         return -1;
1481
1482     if (pkt->data == flush_pkt.data) {
1483         avcodec_flush_buffers(is->video_st->codec);
1484
1485         SDL_LockMutex(is->pictq_mutex);
1486         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1487         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1488             is->pictq[i].skip = 1;
1489         }
1490         while (is->pictq_size && !is->videoq.abort_request) {
1491             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1492         }
1493         is->video_current_pos = -1;
1494         is->frame_last_pts = AV_NOPTS_VALUE;
1495         is->frame_last_duration = 0;
1496         is->frame_timer = (double)av_gettime() / 1000000.0;
1497         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1498         SDL_UnlockMutex(is->pictq_mutex);
1499
1500         return 0;
1501     }
1502
1503     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1504
1505     if (got_picture) {
1506         int ret = 1;
1507
1508         if (decoder_reorder_pts == -1) {
1509             *pts = av_frame_get_best_effort_timestamp(frame);
1510         } else if (decoder_reorder_pts) {
1511             *pts = frame->pkt_pts;
1512         } else {
1513             *pts = frame->pkt_dts;
1514         }
1515
1516         if (*pts == AV_NOPTS_VALUE) {
1517             *pts = 0;
1518         }
1519
1520         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1521              (framedrop>0 || (framedrop && is->audio_st))) {
1522             SDL_LockMutex(is->pictq_mutex);
1523             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1524                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1525                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1526                 double ptsdiff = dpts - is->frame_last_pts;
1527                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1528                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1529                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1530                     is->frame_last_dropped_pos = pkt->pos;
1531                     is->frame_last_dropped_pts = dpts;
1532                     is->frame_drops_early++;
1533                     ret = 0;
1534                 }
1535             }
1536             SDL_UnlockMutex(is->pictq_mutex);
1537         }
1538
1539         if (ret)
1540             is->frame_last_returned_time = av_gettime() / 1000000.0;
1541
1542         return ret;
1543     }
1544     return 0;
1545 }
1546
1547 #if CONFIG_AVFILTER
1548 typedef struct {
1549     VideoState *is;
1550     AVFrame *frame;
1551     int use_dr1;
1552 } FilterPriv;
1553
1554 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1555 {
1556     AVFilterContext *ctx = codec->opaque;
1557     AVFilterBufferRef  *ref;
1558     int perms = AV_PERM_WRITE;
1559     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1560     unsigned edge;
1561     int pixel_size;
1562
1563     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1564
1565     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1566         perms |= AV_PERM_NEG_LINESIZES;
1567
1568     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1569         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1570         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1571         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1572     }
1573     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1574
1575     w = codec->width;
1576     h = codec->height;
1577
1578     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1579         return -1;
1580
1581     avcodec_align_dimensions2(codec, &w, &h, stride);
1582     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1583     w += edge << 1;
1584     h += edge << 1;
1585     if (codec->pix_fmt != ctx->outputs[0]->format) {
1586         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1587         return -1;
1588     }
1589     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1590         return -1;
1591
1592     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1593     ref->video->w = codec->width;
1594     ref->video->h = codec->height;
1595     for (i = 0; i < 4; i ++) {
1596         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1597         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1598
1599         pic->base[i]     = ref->data[i];
1600         if (ref->data[i]) {
1601             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1602         }
1603         pic->data[i]     = ref->data[i];
1604         pic->linesize[i] = ref->linesize[i];
1605     }
1606     pic->opaque = ref;
1607     pic->type   = FF_BUFFER_TYPE_USER;
1608     pic->reordered_opaque = codec->reordered_opaque;
1609     pic->width               = codec->width;
1610     pic->height              = codec->height;
1611     pic->format              = codec->pix_fmt;
1612     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1613     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1614     else            pic->pkt_pts = AV_NOPTS_VALUE;
1615     return 0;
1616 }
1617
1618 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1619 {
1620     memset(pic->data, 0, sizeof(pic->data));
1621     avfilter_unref_buffer(pic->opaque);
1622 }
1623
1624 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1625 {
1626     AVFilterBufferRef *ref = pic->opaque;
1627
1628     if (pic->data[0] == NULL) {
1629         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1630         return codec->get_buffer(codec, pic);
1631     }
1632
1633     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1634         (codec->pix_fmt != ref->format)) {
1635         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1636         return -1;
1637     }
1638
1639     pic->reordered_opaque = codec->reordered_opaque;
1640     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1641     else            pic->pkt_pts = AV_NOPTS_VALUE;
1642     return 0;
1643 }
1644
1645 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1646 {
1647     FilterPriv *priv = ctx->priv;
1648     AVCodecContext *codec;
1649     if (!opaque) return -1;
1650
1651     priv->is = opaque;
1652     codec    = priv->is->video_st->codec;
1653     codec->opaque = ctx;
1654     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1655         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1656         priv->use_dr1 = 1;
1657         codec->get_buffer     = input_get_buffer;
1658         codec->release_buffer = input_release_buffer;
1659         codec->reget_buffer   = input_reget_buffer;
1660         codec->thread_safe_callbacks = 1;
1661     }
1662
1663     priv->frame = avcodec_alloc_frame();
1664
1665     return 0;
1666 }
1667
1668 static void input_uninit(AVFilterContext *ctx)
1669 {
1670     FilterPriv *priv = ctx->priv;
1671     av_free(priv->frame);
1672 }
1673
1674 static int input_request_frame(AVFilterLink *link)
1675 {
1676     FilterPriv *priv = link->src->priv;
1677     AVFilterBufferRef *picref;
1678     int64_t pts = 0;
1679     AVPacket pkt;
1680     int ret;
1681
1682     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1683         av_free_packet(&pkt);
1684     if (ret < 0)
1685         return -1;
1686
1687     if (priv->use_dr1 && priv->frame->opaque) {
1688         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1689     } else {
1690         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
1691         av_image_copy(picref->data, picref->linesize,
1692                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1693                       picref->format, priv->frame->width, priv->frame->height);
1694     }
1695     av_free_packet(&pkt);
1696
1697     avfilter_copy_frame_props(picref, priv->frame);
1698     picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
1699     picref->pts = pts;
1700
1701     avfilter_start_frame(link, picref);
1702     avfilter_draw_slice(link, 0, picref->video->h, 1);
1703     avfilter_end_frame(link);
1704
1705     return 0;
1706 }
1707
1708 static int input_query_formats(AVFilterContext *ctx)
1709 {
1710     FilterPriv *priv = ctx->priv;
1711     enum PixelFormat pix_fmts[] = {
1712         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1713     };
1714
1715     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1716     return 0;
1717 }
1718
1719 static int input_config_props(AVFilterLink *link)
1720 {
1721     FilterPriv *priv  = link->src->priv;
1722     AVStream *s = priv->is->video_st;
1723
1724     link->w = s->codec->width;
1725     link->h = s->codec->height;
1726     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1727         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1728     link->time_base = s->time_base;
1729
1730     return 0;
1731 }
1732
1733 static AVFilter input_filter =
1734 {
1735     .name      = "ffplay_input",
1736
1737     .priv_size = sizeof(FilterPriv),
1738
1739     .init      = input_init,
1740     .uninit    = input_uninit,
1741
1742     .query_formats = input_query_formats,
1743
1744     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1745     .outputs   = (AVFilterPad[]) {{ .name = "default",
1746                                     .type = AVMEDIA_TYPE_VIDEO,
1747                                     .request_frame = input_request_frame,
1748                                     .config_props  = input_config_props, },
1749                                   { .name = NULL }},
1750 };
1751
1752 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1753 {
1754     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1755     char sws_flags_str[128];
1756     int ret;
1757     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1758     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
1759     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1760     graph->scale_sws_opts = av_strdup(sws_flags_str);
1761
1762     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1763                                             NULL, is, graph)) < 0)
1764         return ret;
1765
1766 #if FF_API_OLD_VSINK_API
1767     ret = avfilter_graph_create_filter(&filt_out,
1768                                        avfilter_get_by_name("buffersink"),
1769                                        "out", NULL, pix_fmts, graph);
1770 #else
1771     buffersink_params->pixel_fmts = pix_fmts;
1772     ret = avfilter_graph_create_filter(&filt_out,
1773                                        avfilter_get_by_name("buffersink"),
1774                                        "out", NULL, buffersink_params, graph);
1775 #endif
1776     av_freep(&buffersink_params);
1777     if (ret < 0)
1778         return ret;
1779
1780     if ((ret = avfilter_graph_create_filter(&filt_format,
1781                                             avfilter_get_by_name("format"),
1782                                             "format", "yuv420p", NULL, graph)) < 0)
1783         return ret;
1784     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1785         return ret;
1786
1787
1788     if (vfilters) {
1789         AVFilterInOut *outputs = avfilter_inout_alloc();
1790         AVFilterInOut *inputs  = avfilter_inout_alloc();
1791
1792         outputs->name    = av_strdup("in");
1793         outputs->filter_ctx = filt_src;
1794         outputs->pad_idx = 0;
1795         outputs->next    = NULL;
1796
1797         inputs->name    = av_strdup("out");
1798         inputs->filter_ctx = filt_format;
1799         inputs->pad_idx = 0;
1800         inputs->next    = NULL;
1801
1802         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1803             return ret;
1804     } else {
1805         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1806             return ret;
1807     }
1808
1809     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1810         return ret;
1811
1812     is->out_video_filter = filt_out;
1813
1814     return ret;
1815 }
1816
1817 #endif  /* CONFIG_AVFILTER */
1818
1819 static int video_thread(void *arg)
1820 {
1821     VideoState *is = arg;
1822     AVFrame *frame = avcodec_alloc_frame();
1823     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1824     double pts;
1825     int ret;
1826
1827 #if CONFIG_AVFILTER
1828     AVFilterGraph *graph = avfilter_graph_alloc();
1829     AVFilterContext *filt_out = NULL;
1830     int last_w = is->video_st->codec->width;
1831     int last_h = is->video_st->codec->height;
1832
1833     if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1834         SDL_Event event;
1835         event.type = FF_QUIT_EVENT;
1836         event.user.data1 = is;
1837         SDL_PushEvent(&event);
1838         goto the_end;
1839     }
1840     filt_out = is->out_video_filter;
1841 #endif
1842
1843     for (;;) {
1844 #if !CONFIG_AVFILTER
1845         AVPacket pkt;
1846 #else
1847         AVFilterBufferRef *picref;
1848         AVRational tb = filt_out->inputs[0]->time_base;
1849 #endif
1850         while (is->paused && !is->videoq.abort_request)
1851             SDL_Delay(10);
1852 #if CONFIG_AVFILTER
1853         if (   last_w != is->video_st->codec->width
1854             || last_h != is->video_st->codec->height) {
1855             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1856                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1857             avfilter_graph_free(&graph);
1858             graph = avfilter_graph_alloc();
1859             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1860                 goto the_end;
1861             filt_out = is->out_video_filter;
1862             last_w = is->video_st->codec->width;
1863             last_h = is->video_st->codec->height;
1864         }
1865         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1866         if (picref) {
1867             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1868             pts_int = picref->pts;
1869             tb      = filt_out->inputs[0]->time_base;
1870             pos     = picref->pos;
1871             frame->opaque = picref;
1872
1873             ret = 1;
1874         }
1875
1876         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1877             av_unused int64_t pts1 = pts_int;
1878             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1879             av_dlog(NULL, "video_thread(): "
1880                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1881                     tb.num, tb.den, pts1,
1882                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1883         }
1884 #else
1885         ret = get_video_frame(is, frame, &pts_int, &pkt);
1886         pos = pkt.pos;
1887         av_free_packet(&pkt);
1888         if (ret == 0)
1889             continue;
1890 #endif
1891
1892         if (ret < 0)
1893             goto the_end;
1894
1895         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1896         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1897             is->frame_last_filter_delay = 0;
1898
1899 #if CONFIG_AVFILTER
1900         if (!picref)
1901             continue;
1902 #endif
1903
1904         pts = pts_int * av_q2d(is->video_st->time_base);
1905
1906         ret = queue_picture(is, frame, pts, pos);
1907
1908         if (ret < 0)
1909             goto the_end;
1910
1911         if (is->step)
1912             stream_toggle_pause(is);
1913     }
1914  the_end:
1915     avcodec_flush_buffers(is->video_st->codec);
1916 #if CONFIG_AVFILTER
1917     av_freep(&vfilters);
1918     avfilter_graph_free(&graph);
1919 #endif
1920     av_free(frame);
1921     return 0;
1922 }
1923
1924 static int subtitle_thread(void *arg)
1925 {
1926     VideoState *is = arg;
1927     SubPicture *sp;
1928     AVPacket pkt1, *pkt = &pkt1;
1929     int got_subtitle;
1930     double pts;
1931     int i, j;
1932     int r, g, b, y, u, v, a;
1933
1934     for (;;) {
1935         while (is->paused && !is->subtitleq.abort_request) {
1936             SDL_Delay(10);
1937         }
1938         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1939             break;
1940
1941         if (pkt->data == flush_pkt.data) {
1942             avcodec_flush_buffers(is->subtitle_st->codec);
1943             continue;
1944         }
1945         SDL_LockMutex(is->subpq_mutex);
1946         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1947                !is->subtitleq.abort_request) {
1948             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1949         }
1950         SDL_UnlockMutex(is->subpq_mutex);
1951
1952         if (is->subtitleq.abort_request)
1953             return 0;
1954
1955         sp = &is->subpq[is->subpq_windex];
1956
1957        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1958            this packet, if any */
1959         pts = 0;
1960         if (pkt->pts != AV_NOPTS_VALUE)
1961             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1962
1963         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1964                                  &got_subtitle, pkt);
1965
1966         if (got_subtitle && sp->sub.format == 0) {
1967             sp->pts = pts;
1968
1969             for (i = 0; i < sp->sub.num_rects; i++)
1970             {
1971                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1972                 {
1973                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1974                     y = RGB_TO_Y_CCIR(r, g, b);
1975                     u = RGB_TO_U_CCIR(r, g, b, 0);
1976                     v = RGB_TO_V_CCIR(r, g, b, 0);
1977                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1978                 }
1979             }
1980
1981             /* now we can update the picture count */
1982             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1983                 is->subpq_windex = 0;
1984             SDL_LockMutex(is->subpq_mutex);
1985             is->subpq_size++;
1986             SDL_UnlockMutex(is->subpq_mutex);
1987         }
1988         av_free_packet(pkt);
1989     }
1990     return 0;
1991 }
1992
1993 /* copy samples for viewing in editor window */
1994 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1995 {
1996     int size, len;
1997
1998     size = samples_size / sizeof(short);
1999     while (size > 0) {
2000         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2001         if (len > size)
2002             len = size;
2003         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2004         samples += len;
2005         is->sample_array_index += len;
2006         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2007             is->sample_array_index = 0;
2008         size -= len;
2009     }
2010 }
2011
2012 /* return the wanted number of samples to get better sync if sync_type is video
2013  * or external master clock */
2014 static int synchronize_audio(VideoState *is, int nb_samples)
2015 {
2016     int wanted_nb_samples = nb_samples;
2017
2018     /* if not master, then we try to remove or add samples to correct the clock */
2019     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2020          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2021         double diff, avg_diff;
2022         int min_nb_samples, max_nb_samples;
2023
2024         diff = get_audio_clock(is) - get_master_clock(is);
2025
2026         if (diff < AV_NOSYNC_THRESHOLD) {
2027             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2028             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2029                 /* not enough measures to have a correct estimate */
2030                 is->audio_diff_avg_count++;
2031             } else {
2032                 /* estimate the A-V difference */
2033                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2034
2035                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2036                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2037                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2038                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2039                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2040                 }
2041                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2042                         diff, avg_diff, wanted_nb_samples - nb_samples,
2043                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2044             }
2045         } else {
2046             /* too big difference : may be initial PTS errors, so
2047                reset A-V filter */
2048             is->audio_diff_avg_count = 0;
2049             is->audio_diff_cum       = 0;
2050         }
2051     }
2052
2053     return wanted_nb_samples;
2054 }
2055
2056 /* decode one audio frame and returns its uncompressed size */
2057 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2058 {
2059     AVPacket *pkt_temp = &is->audio_pkt_temp;
2060     AVPacket *pkt = &is->audio_pkt;
2061     AVCodecContext *dec = is->audio_st->codec;
2062     int len1, len2, data_size, resampled_data_size;
2063     int64_t dec_channel_layout;
2064     int got_frame;
2065     double pts;
2066     int new_packet = 0;
2067     int flush_complete = 0;
2068     int wanted_nb_samples;
2069
2070     for (;;) {
2071         /* NOTE: the audio packet can contain several frames */
2072         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2073             if (!is->frame) {
2074                 if (!(is->frame = avcodec_alloc_frame()))
2075                     return AVERROR(ENOMEM);
2076             } else
2077                 avcodec_get_frame_defaults(is->frame);
2078
2079             if (is->paused)
2080                 return -1;
2081
2082             if (flush_complete)
2083                 break;
2084             new_packet = 0;
2085             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2086             if (len1 < 0) {
2087                 /* if error, we skip the frame */
2088                 pkt_temp->size = 0;
2089                 break;
2090             }
2091
2092             pkt_temp->data += len1;
2093             pkt_temp->size -= len1;
2094
2095             if (!got_frame) {
2096                 /* stop sending empty packets if the decoder is finished */
2097                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2098                     flush_complete = 1;
2099                 continue;
2100             }
2101             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2102                                                    is->frame->nb_samples,
2103                                                    dec->sample_fmt, 1);
2104
2105             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2106             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2107
2108             if (dec->sample_fmt != is->audio_src.fmt ||
2109                 dec_channel_layout != is->audio_src.channel_layout ||
2110                 dec->sample_rate != is->audio_src.freq ||
2111                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2112                 if (is->swr_ctx)
2113                     swr_free(&is->swr_ctx);
2114                 is->swr_ctx = swr_alloc_set_opts(NULL,
2115                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2116                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2117                                                  0, NULL);
2118                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2119                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2120                         dec->sample_rate,
2121                         av_get_sample_fmt_name(dec->sample_fmt),
2122                         dec->channels,
2123                         is->audio_tgt.freq,
2124                         av_get_sample_fmt_name(is->audio_tgt.fmt),
2125                         is->audio_tgt.channels);
2126                     break;
2127                 }
2128                 is->audio_src.channel_layout = dec_channel_layout;
2129                 is->audio_src.channels = dec->channels;
2130                 is->audio_src.freq = dec->sample_rate;
2131                 is->audio_src.fmt = dec->sample_fmt;
2132             }
2133
2134             resampled_data_size = data_size;
2135             if (is->swr_ctx) {
2136                 const uint8_t *in[] = { is->frame->data[0] };
2137                 uint8_t *out[] = {is->audio_buf2};
2138                 if (wanted_nb_samples != is->frame->nb_samples) {
2139                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
2140                                                 wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
2141                         fprintf(stderr, "swr_set_compensation() failed\n");
2142                         break;
2143                     }
2144                 }
2145                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt),
2146                                                 in, is->frame->nb_samples);
2147                 if (len2 < 0) {
2148                     fprintf(stderr, "audio_resample() failed\n");
2149                     break;
2150                 }
2151                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt)) {
2152                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2153                     swr_init(is->swr_ctx);
2154                 }
2155                 is->audio_buf = is->audio_buf2;
2156                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2157             } else {
2158                 is->audio_buf = is->frame->data[0];
2159             }
2160
2161             /* if no pts, then compute it */
2162             pts = is->audio_clock;
2163             *pts_ptr = pts;
2164             is->audio_clock += (double)data_size /
2165                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2166 #ifdef DEBUG
2167             {
2168                 static double last_clock;
2169                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2170                        is->audio_clock - last_clock,
2171                        is->audio_clock, pts);
2172                 last_clock = is->audio_clock;
2173             }
2174 #endif
2175             return resampled_data_size;
2176         }
2177
2178         /* free the current packet */
2179         if (pkt->data)
2180             av_free_packet(pkt);
2181         memset(pkt_temp, 0, sizeof(*pkt_temp));
2182
2183         if (is->paused || is->audioq.abort_request) {
2184             return -1;
2185         }
2186
2187         /* read next packet */
2188         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2189             return -1;
2190
2191         if (pkt->data == flush_pkt.data) {
2192             avcodec_flush_buffers(dec);
2193             flush_complete = 0;
2194         }
2195
2196         *pkt_temp = *pkt;
2197
2198         /* if update the audio clock with the pts */
2199         if (pkt->pts != AV_NOPTS_VALUE) {
2200             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2201         }
2202     }
2203 }
2204
2205 /* prepare a new audio buffer */
2206 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2207 {
2208     VideoState *is = opaque;
2209     int audio_size, len1;
2210     int bytes_per_sec;
2211     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2212     double pts;
2213
2214     audio_callback_time = av_gettime();
2215
2216     while (len > 0) {
2217         if (is->audio_buf_index >= is->audio_buf_size) {
2218            audio_size = audio_decode_frame(is, &pts);
2219            if (audio_size < 0) {
2220                 /* if error, just output silence */
2221                is->audio_buf      = is->silence_buf;
2222                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2223            } else {
2224                if (is->show_mode != SHOW_MODE_VIDEO)
2225                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2226                is->audio_buf_size = audio_size;
2227            }
2228            is->audio_buf_index = 0;
2229         }
2230         len1 = is->audio_buf_size - is->audio_buf_index;
2231         if (len1 > len)
2232             len1 = len;
2233         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2234         len -= len1;
2235         stream += len1;
2236         is->audio_buf_index += len1;
2237     }
2238     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2239     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2240     /* Let's assume the audio driver that is used by SDL has two periods. */
2241     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2242     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2243 }
2244
2245 static int audio_open(void *opaque, int64_t channel_layout, int channels, int sample_rate, struct AudioParams *audio)
2246 {
2247     SDL_AudioSpec wanted_spec, spec;
2248     const char *env;
2249     int64_t wanted_channel_layout = 0;
2250     int wanted_nb_channels;
2251
2252     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2253     if (env)
2254         wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2255     if (!wanted_channel_layout) {
2256         wanted_channel_layout = (channel_layout && channels == av_get_channel_layout_nb_channels(channel_layout)) ? channel_layout : av_get_default_channel_layout(channels);
2257         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2258         wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2259         /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2260         while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > (SDL_VERSION_ATLEAST(1, 2, 8) ? 6 : 2))) {
2261             wanted_nb_channels--;
2262             wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2263         }
2264     }
2265     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2266     wanted_spec.freq = sample_rate;
2267     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2268         fprintf(stderr, "Invalid sample rate or channel count!\n");
2269         return -1;
2270     }
2271     wanted_spec.format = AUDIO_S16SYS;
2272     wanted_spec.silence = 0;
2273     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2274     wanted_spec.callback = sdl_audio_callback;
2275     wanted_spec.userdata = opaque;
2276     if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2277         fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2278         return -1;
2279     }
2280     if (spec.format != AUDIO_S16SYS) {
2281         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2282         return -1;
2283     }
2284     if (spec.channels != wanted_spec.channels) {
2285         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2286         if (!wanted_channel_layout) {
2287             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2288             return -1;
2289         }
2290     }
2291
2292     audio->fmt = AV_SAMPLE_FMT_S16;
2293     audio->freq = spec.freq;
2294     audio->channel_layout = wanted_channel_layout;
2295     audio->channels =  spec.channels;
2296     return spec.size;
2297 }
2298
2299 /* open a given stream. Return 0 if OK */
2300 static int stream_component_open(VideoState *is, int stream_index)
2301 {
2302     AVFormatContext *ic = is->ic;
2303     AVCodecContext *avctx;
2304     AVCodec *codec;
2305     AVDictionary *opts;
2306     AVDictionaryEntry *t = NULL;
2307
2308     if (stream_index < 0 || stream_index >= ic->nb_streams)
2309         return -1;
2310     avctx = ic->streams[stream_index]->codec;
2311
2312     codec = avcodec_find_decoder(avctx->codec_id);
2313     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2314
2315     switch(avctx->codec_type){
2316         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2317         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2318         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2319     }
2320     if (!codec)
2321         return -1;
2322
2323     avctx->workaround_bugs   = workaround_bugs;
2324     avctx->lowres            = lowres;
2325     if(avctx->lowres > codec->max_lowres){
2326         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2327                 codec->max_lowres);
2328         avctx->lowres= codec->max_lowres;
2329     }
2330     avctx->idct_algo         = idct;
2331     avctx->skip_frame        = skip_frame;
2332     avctx->skip_idct         = skip_idct;
2333     avctx->skip_loop_filter  = skip_loop_filter;
2334     avctx->error_concealment = error_concealment;
2335
2336     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2337     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2338     if(codec->capabilities & CODEC_CAP_DR1)
2339         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2340
2341     if (!av_dict_get(opts, "threads", NULL, 0))
2342         av_dict_set(&opts, "threads", "auto", 0);
2343     if (!codec ||
2344         avcodec_open2(avctx, codec, &opts) < 0)
2345         return -1;
2346     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2347         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2348         return AVERROR_OPTION_NOT_FOUND;
2349     }
2350
2351     /* prepare audio output */
2352     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2353         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2354         if (audio_hw_buf_size < 0)
2355             return -1;
2356         is->audio_hw_buf_size = audio_hw_buf_size;
2357         is->audio_tgt = is->audio_src;
2358     }
2359
2360     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2361     switch (avctx->codec_type) {
2362     case AVMEDIA_TYPE_AUDIO:
2363         is->audio_stream = stream_index;
2364         is->audio_st = ic->streams[stream_index];
2365         is->audio_buf_size  = 0;
2366         is->audio_buf_index = 0;
2367
2368         /* init averaging filter */
2369         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2370         is->audio_diff_avg_count = 0;
2371         /* since we do not have a precise anough audio fifo fullness,
2372            we correct audio sync only if larger than this threshold */
2373         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2374
2375         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2376         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2377         packet_queue_start(&is->audioq);
2378         SDL_PauseAudio(0);
2379         break;
2380     case AVMEDIA_TYPE_VIDEO:
2381         is->video_stream = stream_index;
2382         is->video_st = ic->streams[stream_index];
2383
2384         packet_queue_start(&is->videoq);
2385         is->video_tid = SDL_CreateThread(video_thread, is);
2386         break;
2387     case AVMEDIA_TYPE_SUBTITLE:
2388         is->subtitle_stream = stream_index;
2389         is->subtitle_st = ic->streams[stream_index];
2390         packet_queue_start(&is->subtitleq);
2391
2392         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2393         break;
2394     default:
2395         break;
2396     }
2397     return 0;
2398 }
2399
2400 static void stream_component_close(VideoState *is, int stream_index)
2401 {
2402     AVFormatContext *ic = is->ic;
2403     AVCodecContext *avctx;
2404
2405     if (stream_index < 0 || stream_index >= ic->nb_streams)
2406         return;
2407     avctx = ic->streams[stream_index]->codec;
2408
2409     switch (avctx->codec_type) {
2410     case AVMEDIA_TYPE_AUDIO:
2411         packet_queue_abort(&is->audioq);
2412
2413         SDL_CloseAudio();
2414
2415         packet_queue_flush(&is->audioq);
2416         av_free_packet(&is->audio_pkt);
2417         if (is->swr_ctx)
2418             swr_free(&is->swr_ctx);
2419         av_freep(&is->audio_buf1);
2420         is->audio_buf = NULL;
2421         av_freep(&is->frame);
2422
2423         if (is->rdft) {
2424             av_rdft_end(is->rdft);
2425             av_freep(&is->rdft_data);
2426             is->rdft = NULL;
2427             is->rdft_bits = 0;
2428         }
2429         break;
2430     case AVMEDIA_TYPE_VIDEO:
2431         packet_queue_abort(&is->videoq);
2432
2433         /* note: we also signal this mutex to make sure we deblock the
2434            video thread in all cases */
2435         SDL_LockMutex(is->pictq_mutex);
2436         SDL_CondSignal(is->pictq_cond);
2437         SDL_UnlockMutex(is->pictq_mutex);
2438
2439         SDL_WaitThread(is->video_tid, NULL);
2440
2441         packet_queue_flush(&is->videoq);
2442         break;
2443     case AVMEDIA_TYPE_SUBTITLE:
2444         packet_queue_abort(&is->subtitleq);
2445
2446         /* note: we also signal this mutex to make sure we deblock the
2447            video thread in all cases */
2448         SDL_LockMutex(is->subpq_mutex);
2449         is->subtitle_stream_changed = 1;
2450
2451         SDL_CondSignal(is->subpq_cond);
2452         SDL_UnlockMutex(is->subpq_mutex);
2453
2454         SDL_WaitThread(is->subtitle_tid, NULL);
2455
2456         packet_queue_flush(&is->subtitleq);
2457         break;
2458     default:
2459         break;
2460     }
2461
2462     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2463     avcodec_close(avctx);
2464     switch (avctx->codec_type) {
2465     case AVMEDIA_TYPE_AUDIO:
2466         is->audio_st = NULL;
2467         is->audio_stream = -1;
2468         break;
2469     case AVMEDIA_TYPE_VIDEO:
2470         is->video_st = NULL;
2471         is->video_stream = -1;
2472         break;
2473     case AVMEDIA_TYPE_SUBTITLE:
2474         is->subtitle_st = NULL;
2475         is->subtitle_stream = -1;
2476         break;
2477     default:
2478         break;
2479     }
2480 }
2481
2482 static int decode_interrupt_cb(void *ctx)
2483 {
2484     VideoState *is = ctx;
2485     return is->abort_request;
2486 }
2487
2488 /* this thread gets the stream from the disk or the network */
2489 static int read_thread(void *arg)
2490 {
2491     VideoState *is = arg;
2492     AVFormatContext *ic = NULL;
2493     int err, i, ret;
2494     int st_index[AVMEDIA_TYPE_NB];
2495     AVPacket pkt1, *pkt = &pkt1;
2496     int eof = 0;
2497     int pkt_in_play_range = 0;
2498     AVDictionaryEntry *t;
2499     AVDictionary **opts;
2500     int orig_nb_streams;
2501
2502     memset(st_index, -1, sizeof(st_index));
2503     is->last_video_stream = is->video_stream = -1;
2504     is->last_audio_stream = is->audio_stream = -1;
2505     is->last_subtitle_stream = is->subtitle_stream = -1;
2506
2507     ic = avformat_alloc_context();
2508     ic->interrupt_callback.callback = decode_interrupt_cb;
2509     ic->interrupt_callback.opaque = is;
2510     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2511     if (err < 0) {
2512         print_error(is->filename, err);
2513         ret = -1;
2514         goto fail;
2515     }
2516     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2517         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2518         ret = AVERROR_OPTION_NOT_FOUND;
2519         goto fail;
2520     }
2521     is->ic = ic;
2522
2523     if (genpts)
2524         ic->flags |= AVFMT_FLAG_GENPTS;
2525
2526     opts = setup_find_stream_info_opts(ic, codec_opts);
2527     orig_nb_streams = ic->nb_streams;
2528
2529     err = avformat_find_stream_info(ic, opts);
2530     if (err < 0) {
2531         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2532         ret = -1;
2533         goto fail;
2534     }
2535     for (i = 0; i < orig_nb_streams; i++)
2536         av_dict_free(&opts[i]);
2537     av_freep(&opts);
2538
2539     if (ic->pb)
2540         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2541
2542     if (seek_by_bytes < 0)
2543         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2544
2545     /* if seeking requested, we execute it */
2546     if (start_time != AV_NOPTS_VALUE) {
2547         int64_t timestamp;
2548
2549         timestamp = start_time;
2550         /* add the stream start time */
2551         if (ic->start_time != AV_NOPTS_VALUE)
2552             timestamp += ic->start_time;
2553         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2554         if (ret < 0) {
2555             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2556                     is->filename, (double)timestamp / AV_TIME_BASE);
2557         }
2558     }
2559
2560     for (i = 0; i < ic->nb_streams; i++)
2561         ic->streams[i]->discard = AVDISCARD_ALL;
2562     if (!video_disable)
2563         st_index[AVMEDIA_TYPE_VIDEO] =
2564             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2565                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2566     if (!audio_disable)
2567         st_index[AVMEDIA_TYPE_AUDIO] =
2568             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2569                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2570                                 st_index[AVMEDIA_TYPE_VIDEO],
2571                                 NULL, 0);
2572     if (!video_disable)
2573         st_index[AVMEDIA_TYPE_SUBTITLE] =
2574             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2575                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2576                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2577                                  st_index[AVMEDIA_TYPE_AUDIO] :
2578                                  st_index[AVMEDIA_TYPE_VIDEO]),
2579                                 NULL, 0);
2580     if (show_status) {
2581         av_dump_format(ic, 0, is->filename, 0);
2582     }
2583
2584     is->show_mode = show_mode;
2585
2586     /* open the streams */
2587     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2588         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2589     }
2590
2591     ret = -1;
2592     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2593         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2594     }
2595     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2596     if (is->show_mode == SHOW_MODE_NONE)
2597         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2598
2599     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2600         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2601     }
2602
2603     if (is->video_stream < 0 && is->audio_stream < 0) {
2604         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2605         ret = -1;
2606         goto fail;
2607     }
2608
2609     for (;;) {
2610         if (is->abort_request)
2611             break;
2612         if (is->paused != is->last_paused) {
2613             is->last_paused = is->paused;
2614             if (is->paused)
2615                 is->read_pause_return = av_read_pause(ic);
2616             else
2617                 av_read_play(ic);
2618         }
2619 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2620         if (is->paused &&
2621                 (!strcmp(ic->iformat->name, "rtsp") ||
2622                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2623             /* wait 10 ms to avoid trying to get another packet */
2624             /* XXX: horrible */
2625             SDL_Delay(10);
2626             continue;
2627         }
2628 #endif
2629         if (is->seek_req) {
2630             int64_t seek_target = is->seek_pos;
2631             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2632             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2633 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2634 //      of the seek_pos/seek_rel variables
2635
2636             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2637             if (ret < 0) {
2638                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2639             } else {
2640                 if (is->audio_stream >= 0) {
2641                     packet_queue_flush(&is->audioq);
2642                     packet_queue_put(&is->audioq, &flush_pkt);
2643                 }
2644                 if (is->subtitle_stream >= 0) {
2645                     packet_queue_flush(&is->subtitleq);
2646                     packet_queue_put(&is->subtitleq, &flush_pkt);
2647                 }
2648                 if (is->video_stream >= 0) {
2649                     packet_queue_flush(&is->videoq);
2650                     packet_queue_put(&is->videoq, &flush_pkt);
2651                 }
2652             }
2653             is->seek_req = 0;
2654             eof = 0;
2655         }
2656
2657         /* if the queue are full, no need to read more */
2658         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2659             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2660                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2661                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
2662             /* wait 10 ms */
2663             SDL_Delay(10);
2664             continue;
2665         }
2666         if (eof) {
2667             if (is->video_stream >= 0) {
2668                 av_init_packet(pkt);
2669                 pkt->data = NULL;
2670                 pkt->size = 0;
2671                 pkt->stream_index = is->video_stream;
2672                 packet_queue_put(&is->videoq, pkt);
2673             }
2674             if (is->audio_stream >= 0 &&
2675                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2676                 av_init_packet(pkt);
2677                 pkt->data = NULL;
2678                 pkt->size = 0;
2679                 pkt->stream_index = is->audio_stream;
2680                 packet_queue_put(&is->audioq, pkt);
2681             }
2682             SDL_Delay(10);
2683             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2684                 if (loop != 1 && (!loop || --loop)) {
2685                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2686                 } else if (autoexit) {
2687                     ret = AVERROR_EOF;
2688                     goto fail;
2689                 }
2690             }
2691             eof=0;
2692             continue;
2693         }
2694         ret = av_read_frame(ic, pkt);
2695         if (ret < 0) {
2696             if (ret == AVERROR_EOF || url_feof(ic->pb))
2697                 eof = 1;
2698             if (ic->pb && ic->pb->error)
2699                 break;
2700             SDL_Delay(100); /* wait for user event */
2701             continue;
2702         }
2703         /* check if packet is in play range specified by user, then queue, otherwise discard */
2704         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2705                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2706                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2707                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2708                 <= ((double)duration / 1000000);
2709         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2710             packet_queue_put(&is->audioq, pkt);
2711         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2712             packet_queue_put(&is->videoq, pkt);
2713         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2714             packet_queue_put(&is->subtitleq, pkt);
2715         } else {
2716             av_free_packet(pkt);
2717         }
2718     }
2719     /* wait until the end */
2720     while (!is->abort_request) {
2721         SDL_Delay(100);
2722     }
2723
2724     ret = 0;
2725  fail:
2726     /* close each stream */
2727     if (is->audio_stream >= 0)
2728         stream_component_close(is, is->audio_stream);
2729     if (is->video_stream >= 0)
2730         stream_component_close(is, is->video_stream);
2731     if (is->subtitle_stream >= 0)
2732         stream_component_close(is, is->subtitle_stream);
2733     if (is->ic) {
2734         avformat_close_input(&is->ic);
2735     }
2736
2737     if (ret != 0) {
2738         SDL_Event event;
2739
2740         event.type = FF_QUIT_EVENT;
2741         event.user.data1 = is;
2742         SDL_PushEvent(&event);
2743     }
2744     return 0;
2745 }
2746
2747 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2748 {
2749     VideoState *is;
2750
2751     is = av_mallocz(sizeof(VideoState));
2752     if (!is)
2753         return NULL;
2754     av_strlcpy(is->filename, filename, sizeof(is->filename));
2755     is->iformat = iformat;
2756     is->ytop    = 0;
2757     is->xleft   = 0;
2758
2759     /* start video display */
2760     is->pictq_mutex = SDL_CreateMutex();
2761     is->pictq_cond  = SDL_CreateCond();
2762
2763     is->subpq_mutex = SDL_CreateMutex();
2764     is->subpq_cond  = SDL_CreateCond();
2765
2766     packet_queue_init(&is->videoq);
2767     packet_queue_init(&is->audioq);
2768     packet_queue_init(&is->subtitleq);
2769
2770     is->av_sync_type = av_sync_type;
2771     is->read_tid     = SDL_CreateThread(read_thread, is);
2772     if (!is->read_tid) {
2773         av_free(is);
2774         return NULL;
2775     }
2776     return is;
2777 }
2778
2779 static void stream_cycle_channel(VideoState *is, int codec_type)
2780 {
2781     AVFormatContext *ic = is->ic;
2782     int start_index, stream_index;
2783     int old_index;
2784     AVStream *st;
2785
2786     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2787         start_index = is->last_video_stream;
2788         old_index = is->video_stream;
2789     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2790         start_index = is->last_audio_stream;
2791         old_index = is->audio_stream;
2792     } else {
2793         start_index = is->last_subtitle_stream;
2794         old_index = is->subtitle_stream;
2795     }
2796     stream_index = start_index;
2797     for (;;) {
2798         if (++stream_index >= is->ic->nb_streams)
2799         {
2800             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2801             {
2802                 stream_index = -1;
2803                 is->last_subtitle_stream = -1;
2804                 goto the_end;
2805             }
2806             if (start_index == -1)
2807                 return;
2808             stream_index = 0;
2809         }
2810         if (stream_index == start_index)
2811             return;
2812         st = ic->streams[stream_index];
2813         if (st->codec->codec_type == codec_type) {
2814             /* check that parameters are OK */
2815             switch (codec_type) {
2816             case AVMEDIA_TYPE_AUDIO:
2817                 if (st->codec->sample_rate != 0 &&
2818                     st->codec->channels != 0)
2819                     goto the_end;
2820                 break;
2821             case AVMEDIA_TYPE_VIDEO:
2822             case AVMEDIA_TYPE_SUBTITLE:
2823                 goto the_end;
2824             default:
2825                 break;
2826             }
2827         }
2828     }
2829  the_end:
2830     stream_component_close(is, old_index);
2831     stream_component_open(is, stream_index);
2832 }
2833
2834
2835 static void toggle_full_screen(VideoState *is)
2836 {
2837     av_unused int i;
2838     is_full_screen = !is_full_screen;
2839 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2840     /* OS X needs to reallocate the SDL overlays */
2841     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2842         is->pictq[i].reallocate = 1;
2843     }
2844 #endif
2845     video_open(is, 1);
2846 }
2847
2848 static void toggle_pause(VideoState *is)
2849 {
2850     stream_toggle_pause(is);
2851     is->step = 0;
2852 }
2853
2854 static void step_to_next_frame(VideoState *is)
2855 {
2856     /* if the stream is paused unpause it, then step */
2857     if (is->paused)
2858         stream_toggle_pause(is);
2859     is->step = 1;
2860 }
2861
2862 static void toggle_audio_display(VideoState *is)
2863 {
2864     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2865     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2866     fill_rectangle(screen,
2867                 is->xleft, is->ytop, is->width, is->height,
2868                 bgcolor);
2869     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2870 }
2871
2872 /* handle an event sent by the GUI */
2873 static void event_loop(VideoState *cur_stream)
2874 {
2875     SDL_Event event;
2876     double incr, pos, frac;
2877
2878     for (;;) {
2879         double x;
2880         SDL_WaitEvent(&event);
2881         switch (event.type) {
2882         case SDL_KEYDOWN:
2883             if (exit_on_keydown) {
2884                 do_exit(cur_stream);
2885                 break;
2886             }
2887             switch (event.key.keysym.sym) {
2888             case SDLK_ESCAPE:
2889             case SDLK_q:
2890                 do_exit(cur_stream);
2891                 break;
2892             case SDLK_f:
2893                 toggle_full_screen(cur_stream);
2894                 cur_stream->force_refresh = 1;
2895                 break;
2896             case SDLK_p:
2897             case SDLK_SPACE:
2898                 toggle_pause(cur_stream);
2899                 break;
2900             case SDLK_s: // S: Step to next frame
2901                 step_to_next_frame(cur_stream);
2902                 break;
2903             case SDLK_a:
2904                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2905                 break;
2906             case SDLK_v:
2907                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2908                 break;
2909             case SDLK_t:
2910                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2911                 break;
2912             case SDLK_w:
2913                 toggle_audio_display(cur_stream);
2914                 cur_stream->force_refresh = 1;
2915                 break;
2916             case SDLK_PAGEUP:
2917                 incr = 600.0;
2918                 goto do_seek;
2919             case SDLK_PAGEDOWN:
2920                 incr = -600.0;
2921                 goto do_seek;
2922             case SDLK_LEFT:
2923                 incr = -10.0;
2924                 goto do_seek;
2925             case SDLK_RIGHT:
2926                 incr = 10.0;
2927                 goto do_seek;
2928             case SDLK_UP:
2929                 incr = 60.0;
2930                 goto do_seek;
2931             case SDLK_DOWN:
2932                 incr = -60.0;
2933             do_seek:
2934                     if (seek_by_bytes) {
2935                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2936                             pos = cur_stream->video_current_pos;
2937                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2938                             pos = cur_stream->audio_pkt.pos;
2939                         } else
2940                             pos = avio_tell(cur_stream->ic->pb);
2941                         if (cur_stream->ic->bit_rate)
2942                             incr *= cur_stream->ic->bit_rate / 8.0;
2943                         else
2944                             incr *= 180000.0;
2945                         pos += incr;
2946                         stream_seek(cur_stream, pos, incr, 1);
2947                     } else {
2948                         pos = get_master_clock(cur_stream);
2949                         pos += incr;
2950                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2951                     }
2952                 break;
2953             default:
2954                 break;
2955             }
2956             break;
2957         case SDL_VIDEOEXPOSE:
2958             cur_stream->force_refresh = 1;
2959             break;
2960         case SDL_MOUSEBUTTONDOWN:
2961             if (exit_on_mousedown) {
2962                 do_exit(cur_stream);
2963                 break;
2964             }
2965         case SDL_MOUSEMOTION:
2966             if (event.type == SDL_MOUSEBUTTONDOWN) {
2967                 x = event.button.x;
2968             } else {
2969                 if (event.motion.state != SDL_PRESSED)
2970                     break;
2971                 x = event.motion.x;
2972             }
2973                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2974                     uint64_t size =  avio_size(cur_stream->ic->pb);
2975                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2976                 } else {
2977                     int64_t ts;
2978                     int ns, hh, mm, ss;
2979                     int tns, thh, tmm, tss;
2980                     tns  = cur_stream->ic->duration / 1000000LL;
2981                     thh  = tns / 3600;
2982                     tmm  = (tns % 3600) / 60;
2983                     tss  = (tns % 60);
2984                     frac = x / cur_stream->width;
2985                     ns   = frac * tns;
2986                     hh   = ns / 3600;
2987                     mm   = (ns % 3600) / 60;
2988                     ss   = (ns % 60);
2989                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2990                             hh, mm, ss, thh, tmm, tss);
2991                     ts = frac * cur_stream->ic->duration;
2992                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2993                         ts += cur_stream->ic->start_time;
2994                     stream_seek(cur_stream, ts, 0, 0);
2995                 }
2996             break;
2997         case SDL_VIDEORESIZE:
2998                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2999                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3000                 screen_width  = cur_stream->width  = event.resize.w;
3001                 screen_height = cur_stream->height = event.resize.h;
3002                 cur_stream->force_refresh = 1;
3003             break;
3004         case SDL_QUIT:
3005         case FF_QUIT_EVENT:
3006             do_exit(cur_stream);
3007             break;
3008         case FF_ALLOC_EVENT:
3009             alloc_picture(event.user.data1);
3010             break;
3011         case FF_REFRESH_EVENT:
3012             video_refresh(event.user.data1);
3013             cur_stream->refresh = 0;
3014             break;
3015         default:
3016             break;
3017         }
3018     }
3019 }
3020
3021 static int opt_frame_size(const char *opt, const char *arg)
3022 {
3023     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3024     return opt_default("video_size", arg);
3025 }
3026
3027 static int opt_width(const char *opt, const char *arg)
3028 {
3029     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3030     return 0;
3031 }
3032
3033 static int opt_height(const char *opt, const char *arg)
3034 {
3035     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3036     return 0;
3037 }
3038
3039 static int opt_format(const char *opt, const char *arg)
3040 {
3041     file_iformat = av_find_input_format(arg);
3042     if (!file_iformat) {
3043         fprintf(stderr, "Unknown input format: %s\n", arg);
3044         return AVERROR(EINVAL);
3045     }
3046     return 0;
3047 }
3048
3049 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3050 {
3051     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3052     return opt_default("pixel_format", arg);
3053 }
3054
3055 static int opt_sync(const char *opt, const char *arg)
3056 {
3057     if (!strcmp(arg, "audio"))
3058         av_sync_type = AV_SYNC_AUDIO_MASTER;
3059     else if (!strcmp(arg, "video"))
3060         av_sync_type = AV_SYNC_VIDEO_MASTER;
3061     else if (!strcmp(arg, "ext"))
3062         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3063     else {
3064         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3065         exit(1);
3066     }
3067     return 0;
3068 }
3069
3070 static int opt_seek(const char *opt, const char *arg)
3071 {
3072     start_time = parse_time_or_die(opt, arg, 1);
3073     return 0;
3074 }
3075
3076 static int opt_duration(const char *opt, const char *arg)
3077 {
3078     duration = parse_time_or_die(opt, arg, 1);
3079     return 0;
3080 }
3081
3082 static int opt_show_mode(const char *opt, const char *arg)
3083 {
3084     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3085                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3086                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3087                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3088     return 0;
3089 }
3090
3091 static void opt_input_file(void *optctx, const char *filename)
3092 {
3093     if (input_filename) {
3094         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3095                 filename, input_filename);
3096         exit_program(1);
3097     }
3098     if (!strcmp(filename, "-"))
3099         filename = "pipe:";
3100     input_filename = filename;
3101 }
3102
3103 static int opt_codec(void *o, const char *opt, const char *arg)
3104 {
3105     switch(opt[strlen(opt)-1]){
3106     case 'a' :    audio_codec_name = arg; break;
3107     case 's' : subtitle_codec_name = arg; break;
3108     case 'v' :    video_codec_name = arg; break;
3109     }
3110     return 0;
3111 }
3112
3113 static int dummy;
3114
3115 static const OptionDef options[] = {
3116 #include "cmdutils_common_opts.h"
3117     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3118     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3119     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3120     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3121     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3122     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3123     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3124     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3125     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3126     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3127     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3128     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3129     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3130     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3131     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3132     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3133     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3134     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3135     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3136     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3137     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3138     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3139     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3140     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3141     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3142     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3143     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3144     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3145     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3146     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3147     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3148     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3149     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3150 #if CONFIG_AVFILTER
3151     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3152 #endif
3153     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3154     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3155     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3156     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3157     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3158     { NULL, },
3159 };
3160
3161 static void show_usage(void)
3162 {
3163     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3164     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3165     av_log(NULL, AV_LOG_INFO, "\n");
3166 }
3167
3168 static int opt_help(const char *opt, const char *arg)
3169 {
3170     av_log_set_callback(log_callback_help);
3171     show_usage();
3172     show_help_options(options, "Main options:\n",
3173                       OPT_EXPERT, 0);
3174     show_help_options(options, "\nAdvanced options:\n",
3175                       OPT_EXPERT, OPT_EXPERT);
3176     printf("\n");
3177     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3178     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3179 #if !CONFIG_AVFILTER
3180     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3181 #endif
3182     printf("\nWhile playing:\n"
3183            "q, ESC              quit\n"
3184            "f                   toggle full screen\n"
3185            "p, SPC              pause\n"
3186            "a                   cycle audio channel\n"
3187            "v                   cycle video channel\n"
3188            "t                   cycle subtitle channel\n"
3189            "w                   show audio waves\n"
3190            "s                   activate frame-step mode\n"
3191            "left/right          seek backward/forward 10 seconds\n"
3192            "down/up             seek backward/forward 1 minute\n"
3193            "page down/page up   seek backward/forward 10 minutes\n"
3194            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3195            );
3196     return 0;
3197 }
3198
3199 static int lockmgr(void **mtx, enum AVLockOp op)
3200 {
3201    switch(op) {
3202       case AV_LOCK_CREATE:
3203           *mtx = SDL_CreateMutex();
3204           if(!*mtx)
3205               return 1;
3206           return 0;
3207       case AV_LOCK_OBTAIN:
3208           return !!SDL_LockMutex(*mtx);
3209       case AV_LOCK_RELEASE:
3210           return !!SDL_UnlockMutex(*mtx);
3211       case AV_LOCK_DESTROY:
3212           SDL_DestroyMutex(*mtx);
3213           return 0;
3214    }
3215    return 1;
3216 }
3217
3218 /* Called from the main */
3219 int main(int argc, char **argv)
3220 {
3221     int flags;
3222     VideoState *is;
3223
3224     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3225     parse_loglevel(argc, argv, options);
3226
3227     /* register all codecs, demux and protocols */
3228     avcodec_register_all();
3229 #if CONFIG_AVDEVICE
3230     avdevice_register_all();
3231 #endif
3232 #if CONFIG_AVFILTER
3233     avfilter_register_all();
3234 #endif
3235     av_register_all();
3236     avformat_network_init();
3237
3238     init_opts();
3239
3240     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3241     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3242
3243     show_banner(argc, argv, options);
3244
3245     parse_options(NULL, argc, argv, options, opt_input_file);
3246
3247     if (!input_filename) {
3248         show_usage();
3249         fprintf(stderr, "An input file must be specified\n");
3250         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3251         exit(1);
3252     }
3253
3254     if (display_disable) {
3255         video_disable = 1;
3256     }
3257     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3258     if (audio_disable)
3259         flags &= ~SDL_INIT_AUDIO;
3260 #if !defined(__MINGW32__) && !defined(__APPLE__)
3261     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3262 #endif
3263     if (SDL_Init (flags)) {
3264         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3265         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3266         exit(1);
3267     }
3268
3269     if (!display_disable) {
3270 #if HAVE_SDL_VIDEO_SIZE
3271         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3272         fs_screen_width = vi->current_w;
3273         fs_screen_height = vi->current_h;
3274 #endif
3275     }
3276
3277     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3278     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3279     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3280
3281     if (av_lockmgr_register(lockmgr)) {
3282         fprintf(stderr, "Could not initialize lock manager!\n");
3283         do_exit(NULL);
3284     }
3285
3286     av_init_packet(&flush_pkt);
3287     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3288
3289     is = stream_open(input_filename, file_iformat);
3290     if (!is) {
3291         fprintf(stderr, "Failed to initialize VideoState!\n");
3292         do_exit(NULL);
3293     }
3294
3295     event_loop(is);
3296
3297     /* never returns */
3298
3299     return 0;
3300 }