2 * Copyright (c) 2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * simple media player based on the FFmpeg libraries
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/buffersink.h"
52 # include "libavfilter/buffersrc.h"
56 #include <SDL_thread.h>
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 /* SDL audio buffer size, in samples. Should be small to have precise
69 A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
72 /* no AV sync correction is done if below the minimum AV sync threshold */
73 #define AV_SYNC_THRESHOLD_MIN 0.01
74 /* AV sync correction is done if above the maximum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MAX 0.1
76 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
77 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
78 /* no AV correction is done if too big error */
79 #define AV_NOSYNC_THRESHOLD 10.0
81 /* maximum audio speed change to get correct sync */
82 #define SAMPLE_CORRECTION_PERCENT_MAX 10
84 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
85 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
86 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
87 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
89 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
90 #define AUDIO_DIFF_AVG_NB 20
92 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
93 #define REFRESH_RATE 0.01
95 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
96 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
97 #define SAMPLE_ARRAY_SIZE (8 * 65536)
99 #define CURSOR_HIDE_DELAY 1000000
101 static int64_t sws_flags = SWS_BICUBIC;
103 typedef struct MyAVPacketList {
105 struct MyAVPacketList *next;
109 typedef struct PacketQueue {
110 MyAVPacketList *first_pkt, *last_pkt;
119 #define VIDEO_PICTURE_QUEUE_SIZE 3
120 #define SUBPICTURE_QUEUE_SIZE 4
122 typedef struct VideoPicture {
123 double pts; // presentation timestamp for this picture
124 int64_t pos; // byte position in file
126 int width, height; /* source height & width */
134 typedef struct SubPicture {
135 double pts; /* presentation time stamp for this picture */
139 typedef struct AudioParams {
142 int64_t channel_layout;
143 enum AVSampleFormat fmt;
146 typedef struct Clock {
147 double pts; /* clock base */
148 double pts_drift; /* clock base minus time at which we updated the clock */
151 int serial; /* clock is based on a packet with this serial */
153 int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
157 AV_SYNC_AUDIO_MASTER, /* default choice */
158 AV_SYNC_VIDEO_MASTER,
159 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
162 typedef struct VideoState {
163 SDL_Thread *read_tid;
164 SDL_Thread *video_tid;
165 AVInputFormat *iformat;
171 int queue_attachments_req;
176 int read_pause_return;
189 int audio_clock_serial;
190 double audio_diff_cum; /* used for AV difference average computation */
191 double audio_diff_avg_coef;
192 double audio_diff_threshold;
193 int audio_diff_avg_count;
196 int audio_hw_buf_size;
197 uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
200 unsigned int audio_buf_size; /* in bytes */
201 unsigned int audio_buf1_size;
202 int audio_buf_index; /* in bytes */
203 int audio_write_buf_size;
204 int audio_buf_frames_pending;
205 AVPacket audio_pkt_temp;
207 int audio_pkt_temp_serial;
208 int audio_last_serial;
209 struct AudioParams audio_src;
211 struct AudioParams audio_filter_src;
213 struct AudioParams audio_tgt;
214 struct SwrContext *swr_ctx;
215 double audio_current_pts;
216 double audio_current_pts_drift;
217 int frame_drops_early;
218 int frame_drops_late;
222 SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
224 int16_t sample_array[SAMPLE_ARRAY_SIZE];
225 int sample_array_index;
229 FFTSample *rdft_data;
231 double last_vis_time;
233 SDL_Thread *subtitle_tid;
235 int subtitle_stream_changed;
236 AVStream *subtitle_st;
237 PacketQueue subtitleq;
238 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
239 int subpq_size, subpq_rindex, subpq_windex;
240 SDL_mutex *subpq_mutex;
241 SDL_cond *subpq_cond;
244 double frame_last_pts;
245 double frame_last_duration;
246 double frame_last_dropped_pts;
247 double frame_last_returned_time;
248 double frame_last_filter_delay;
249 int64_t frame_last_dropped_pos;
250 int frame_last_dropped_serial;
254 double video_current_pts; // current displayed pts
255 double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
256 int64_t video_current_pos; // current displayed file pos
257 double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
258 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
259 int pictq_size, pictq_rindex, pictq_windex;
260 SDL_mutex *pictq_mutex;
261 SDL_cond *pictq_cond;
263 struct SwsContext *img_convert_ctx;
265 SDL_Rect last_display_rect;
268 int width, height, xleft, ytop;
272 AVFilterContext *in_video_filter; // the first filter in the video chain
273 AVFilterContext *out_video_filter; // the last filter in the video chain
274 AVFilterContext *in_audio_filter; // the first filter in the audio chain
275 AVFilterContext *out_audio_filter; // the last filter in the audio chain
276 AVFilterGraph *agraph; // audio filter graph
279 int last_video_stream, last_audio_stream, last_subtitle_stream;
281 SDL_cond *continue_read_thread;
284 /* options specified by the user */
285 static AVInputFormat *file_iformat;
286 static const char *input_filename;
287 static const char *window_title;
288 static int fs_screen_width;
289 static int fs_screen_height;
290 static int default_width = 640;
291 static int default_height = 480;
292 static int screen_width = 0;
293 static int screen_height = 0;
294 static int audio_disable;
295 static int video_disable;
296 static int subtitle_disable;
297 static int wanted_stream[AVMEDIA_TYPE_NB] = {
298 [AVMEDIA_TYPE_AUDIO] = -1,
299 [AVMEDIA_TYPE_VIDEO] = -1,
300 [AVMEDIA_TYPE_SUBTITLE] = -1,
302 static int seek_by_bytes = -1;
303 static int display_disable;
304 static int show_status = 1;
305 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
306 static int64_t start_time = AV_NOPTS_VALUE;
307 static int64_t duration = AV_NOPTS_VALUE;
308 static int workaround_bugs = 1;
310 static int genpts = 0;
311 static int lowres = 0;
312 static int idct = FF_IDCT_AUTO;
313 static int error_concealment = 3;
314 static int decoder_reorder_pts = -1;
316 static int exit_on_keydown;
317 static int exit_on_mousedown;
319 static int framedrop = -1;
320 static int infinite_buffer = -1;
321 static enum ShowMode show_mode = SHOW_MODE_NONE;
322 static const char *audio_codec_name;
323 static const char *subtitle_codec_name;
324 static const char *video_codec_name;
325 double rdftspeed = 0.02;
326 static int64_t cursor_last_shown;
327 static int cursor_hidden = 0;
329 static char *vfilters = NULL;
330 static char *afilters = NULL;
333 /* current context */
334 static int is_full_screen;
335 static int64_t audio_callback_time;
337 static AVPacket flush_pkt;
339 #define FF_ALLOC_EVENT (SDL_USEREVENT)
340 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
342 static SDL_Surface *screen;
345 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
346 enum AVSampleFormat fmt2, int64_t channel_count2)
348 /* If channel count == 1, planar and non-planar formats are the same */
349 if (channel_count1 == 1 && channel_count2 == 1)
350 return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
352 return channel_count1 != channel_count2 || fmt1 != fmt2;
356 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
358 if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
359 return channel_layout;
364 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
366 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
368 MyAVPacketList *pkt1;
370 if (q->abort_request)
373 pkt1 = av_malloc(sizeof(MyAVPacketList));
378 if (pkt == &flush_pkt)
380 pkt1->serial = q->serial;
385 q->last_pkt->next = pkt1;
388 q->size += pkt1->pkt.size + sizeof(*pkt1);
389 /* XXX: should duplicate packet data in DV case */
390 SDL_CondSignal(q->cond);
394 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
398 /* duplicate the packet */
399 if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
402 SDL_LockMutex(q->mutex);
403 ret = packet_queue_put_private(q, pkt);
404 SDL_UnlockMutex(q->mutex);
406 if (pkt != &flush_pkt && ret < 0)
412 /* packet queue handling */
413 static void packet_queue_init(PacketQueue *q)
415 memset(q, 0, sizeof(PacketQueue));
416 q->mutex = SDL_CreateMutex();
417 q->cond = SDL_CreateCond();
418 q->abort_request = 1;
421 static void packet_queue_flush(PacketQueue *q)
423 MyAVPacketList *pkt, *pkt1;
425 SDL_LockMutex(q->mutex);
426 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
428 av_free_packet(&pkt->pkt);
435 SDL_UnlockMutex(q->mutex);
438 static void packet_queue_destroy(PacketQueue *q)
440 packet_queue_flush(q);
441 SDL_DestroyMutex(q->mutex);
442 SDL_DestroyCond(q->cond);
445 static void packet_queue_abort(PacketQueue *q)
447 SDL_LockMutex(q->mutex);
449 q->abort_request = 1;
451 SDL_CondSignal(q->cond);
453 SDL_UnlockMutex(q->mutex);
456 static void packet_queue_start(PacketQueue *q)
458 SDL_LockMutex(q->mutex);
459 q->abort_request = 0;
460 packet_queue_put_private(q, &flush_pkt);
461 SDL_UnlockMutex(q->mutex);
464 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
465 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
467 MyAVPacketList *pkt1;
470 SDL_LockMutex(q->mutex);
473 if (q->abort_request) {
480 q->first_pkt = pkt1->next;
484 q->size -= pkt1->pkt.size + sizeof(*pkt1);
487 *serial = pkt1->serial;
495 SDL_CondWait(q->cond, q->mutex);
498 SDL_UnlockMutex(q->mutex);
502 static inline void fill_rectangle(SDL_Surface *screen,
503 int x, int y, int w, int h, int color, int update)
510 SDL_FillRect(screen, &rect, color);
511 if (update && w > 0 && h > 0)
512 SDL_UpdateRect(screen, x, y, w, h);
515 /* draw only the border of a rectangle */
516 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
520 /* fill the background */
524 w2 = width - (x + w);
530 h2 = height - (y + h);
533 fill_rectangle(screen,
537 fill_rectangle(screen,
538 xleft + width - w2, ytop,
541 fill_rectangle(screen,
545 fill_rectangle(screen,
546 xleft + w1, ytop + height - h2,
551 #define ALPHA_BLEND(a, oldp, newp, s)\
552 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
554 #define RGBA_IN(r, g, b, a, s)\
556 unsigned int v = ((const uint32_t *)(s))[0];\
557 a = (v >> 24) & 0xff;\
558 r = (v >> 16) & 0xff;\
559 g = (v >> 8) & 0xff;\
563 #define YUVA_IN(y, u, v, a, s, pal)\
565 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
566 a = (val >> 24) & 0xff;\
567 y = (val >> 16) & 0xff;\
568 u = (val >> 8) & 0xff;\
572 #define YUVA_OUT(d, y, u, v, a)\
574 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
580 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
582 int wrap, wrap3, width2, skip2;
583 int y, u, v, a, u1, v1, a1, w, h;
584 uint8_t *lum, *cb, *cr;
587 int dstx, dsty, dstw, dsth;
589 dstw = av_clip(rect->w, 0, imgw);
590 dsth = av_clip(rect->h, 0, imgh);
591 dstx = av_clip(rect->x, 0, imgw - dstw);
592 dsty = av_clip(rect->y, 0, imgh - dsth);
593 lum = dst->data[0] + dsty * dst->linesize[0];
594 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
595 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
597 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
599 wrap = dst->linesize[0];
600 wrap3 = rect->pict.linesize[0];
601 p = rect->pict.data[0];
602 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
610 YUVA_IN(y, u, v, a, p, pal);
611 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
613 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
619 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
620 YUVA_IN(y, u, v, a, p, pal);
624 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626 YUVA_IN(y, u, v, a, p + BPP, pal);
630 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
631 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
632 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
639 YUVA_IN(y, u, v, a, p, pal);
640 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646 p += wrap3 - dstw * BPP;
647 lum += wrap - dstw - dstx;
648 cb += dst->linesize[1] - width2 - skip2;
649 cr += dst->linesize[2] - width2 - skip2;
651 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
657 YUVA_IN(y, u, v, a, p, pal);
661 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
664 YUVA_IN(y, u, v, a, p, pal);
668 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
670 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
676 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
677 YUVA_IN(y, u, v, a, p, pal);
681 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
683 YUVA_IN(y, u, v, a, p + BPP, pal);
687 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
691 YUVA_IN(y, u, v, a, p, pal);
695 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
697 YUVA_IN(y, u, v, a, p + BPP, pal);
701 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
703 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
704 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
708 p += -wrap3 + 2 * BPP;
712 YUVA_IN(y, u, v, a, p, pal);
716 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
719 YUVA_IN(y, u, v, a, p, pal);
723 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
724 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
725 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
731 p += wrap3 + (wrap3 - dstw * BPP);
732 lum += wrap + (wrap - dstw - dstx);
733 cb += dst->linesize[1] - width2 - skip2;
734 cr += dst->linesize[2] - width2 - skip2;
736 /* handle odd height */
743 YUVA_IN(y, u, v, a, p, pal);
744 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
745 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
746 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
752 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
753 YUVA_IN(y, u, v, a, p, pal);
757 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
759 YUVA_IN(y, u, v, a, p + BPP, pal);
763 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
764 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
765 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
772 YUVA_IN(y, u, v, a, p, pal);
773 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
774 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
775 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
780 static void free_subpicture(SubPicture *sp)
782 avsubtitle_free(&sp->sub);
785 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
788 int width, height, x, y;
790 if (vp->sar.num == 0)
793 aspect_ratio = av_q2d(vp->sar);
795 if (aspect_ratio <= 0.0)
797 aspect_ratio *= (float)vp->width / (float)vp->height;
799 /* XXX: we suppose the screen has a 1.0 pixel ratio */
801 width = ((int)rint(height * aspect_ratio)) & ~1;
802 if (width > scr_width) {
804 height = ((int)rint(width / aspect_ratio)) & ~1;
806 x = (scr_width - width) / 2;
807 y = (scr_height - height) / 2;
808 rect->x = scr_xleft + x;
809 rect->y = scr_ytop + y;
810 rect->w = FFMAX(width, 1);
811 rect->h = FFMAX(height, 1);
814 static void video_image_display(VideoState *is)
822 vp = &is->pictq[is->pictq_rindex];
824 if (is->subtitle_st) {
825 if (is->subpq_size > 0) {
826 sp = &is->subpq[is->subpq_rindex];
828 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
829 SDL_LockYUVOverlay (vp->bmp);
831 pict.data[0] = vp->bmp->pixels[0];
832 pict.data[1] = vp->bmp->pixels[2];
833 pict.data[2] = vp->bmp->pixels[1];
835 pict.linesize[0] = vp->bmp->pitches[0];
836 pict.linesize[1] = vp->bmp->pitches[2];
837 pict.linesize[2] = vp->bmp->pitches[1];
839 for (i = 0; i < sp->sub.num_rects; i++)
840 blend_subrect(&pict, sp->sub.rects[i],
841 vp->bmp->w, vp->bmp->h);
843 SDL_UnlockYUVOverlay (vp->bmp);
848 calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
850 SDL_DisplayYUVOverlay(vp->bmp, &rect);
852 if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
853 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
854 fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
855 is->last_display_rect = rect;
860 static inline int compute_mod(int a, int b)
862 return a < 0 ? a%b + b : a%b;
865 static void video_audio_display(VideoState *s)
867 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
868 int ch, channels, h, h2, bgcolor, fgcolor;
870 int rdft_bits, nb_freq;
872 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
874 nb_freq = 1 << (rdft_bits - 1);
876 /* compute display index : center on currently output samples */
877 channels = s->audio_tgt.channels;
878 nb_display_channels = channels;
880 int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
882 delay = s->audio_write_buf_size;
885 /* to be more precise, we take into account the time spent since
886 the last buffer computation */
887 if (audio_callback_time) {
888 time_diff = av_gettime() - audio_callback_time;
889 delay -= (time_diff * s->audio_tgt.freq) / 1000000;
892 delay += 2 * data_used;
893 if (delay < data_used)
896 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
897 if (s->show_mode == SHOW_MODE_WAVES) {
899 for (i = 0; i < 1000; i += channels) {
900 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
901 int a = s->sample_array[idx];
902 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
903 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
904 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
906 if (h < score && (b ^ c) < 0) {
913 s->last_i_start = i_start;
915 i_start = s->last_i_start;
918 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
919 if (s->show_mode == SHOW_MODE_WAVES) {
920 fill_rectangle(screen,
921 s->xleft, s->ytop, s->width, s->height,
924 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
926 /* total height for one channel */
927 h = s->height / nb_display_channels;
928 /* graph height / 2 */
930 for (ch = 0; ch < nb_display_channels; ch++) {
932 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
933 for (x = 0; x < s->width; x++) {
934 y = (s->sample_array[i] * h2) >> 15;
941 fill_rectangle(screen,
942 s->xleft + x, ys, 1, y,
945 if (i >= SAMPLE_ARRAY_SIZE)
946 i -= SAMPLE_ARRAY_SIZE;
950 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
952 for (ch = 1; ch < nb_display_channels; ch++) {
953 y = s->ytop + ch * h;
954 fill_rectangle(screen,
955 s->xleft, y, s->width, 1,
958 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
960 nb_display_channels= FFMIN(nb_display_channels, 2);
961 if (rdft_bits != s->rdft_bits) {
962 av_rdft_end(s->rdft);
963 av_free(s->rdft_data);
964 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
965 s->rdft_bits = rdft_bits;
966 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
970 for (ch = 0; ch < nb_display_channels; ch++) {
971 data[ch] = s->rdft_data + 2 * nb_freq * ch;
973 for (x = 0; x < 2 * nb_freq; x++) {
974 double w = (x-nb_freq) * (1.0 / nb_freq);
975 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
977 if (i >= SAMPLE_ARRAY_SIZE)
978 i -= SAMPLE_ARRAY_SIZE;
980 av_rdft_calc(s->rdft, data[ch]);
982 // least efficient way to do this, we should of course directly access it but its more than fast enough
983 for (y = 0; y < s->height; y++) {
984 double w = 1 / sqrt(nb_freq);
985 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
986 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
987 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
990 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
992 fill_rectangle(screen,
993 s->xpos, s->height-y, 1, 1,
997 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1000 if (s->xpos >= s->width)
1005 static void stream_close(VideoState *is)
1009 /* XXX: use a special url_shutdown call to abort parse cleanly */
1010 is->abort_request = 1;
1011 SDL_WaitThread(is->read_tid, NULL);
1012 packet_queue_destroy(&is->videoq);
1013 packet_queue_destroy(&is->audioq);
1014 packet_queue_destroy(&is->subtitleq);
1016 /* free all pictures */
1017 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1020 SDL_FreeYUVOverlay(vp->bmp);
1024 SDL_DestroyMutex(is->pictq_mutex);
1025 SDL_DestroyCond(is->pictq_cond);
1026 SDL_DestroyMutex(is->subpq_mutex);
1027 SDL_DestroyCond(is->subpq_cond);
1028 SDL_DestroyCond(is->continue_read_thread);
1029 #if !CONFIG_AVFILTER
1030 sws_freeContext(is->img_convert_ctx);
1035 static void do_exit(VideoState *is)
1040 av_lockmgr_register(NULL);
1043 av_freep(&vfilters);
1045 avformat_network_deinit();
1049 av_log(NULL, AV_LOG_QUIET, "%s", "");
1053 static void sigterm_handler(int sig)
1058 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1060 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1064 if (is_full_screen) flags |= SDL_FULLSCREEN;
1065 else flags |= SDL_RESIZABLE;
1067 if (vp && vp->width) {
1068 calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1069 default_width = rect.w;
1070 default_height = rect.h;
1073 if (is_full_screen && fs_screen_width) {
1074 w = fs_screen_width;
1075 h = fs_screen_height;
1076 } else if (!is_full_screen && screen_width) {
1083 w = FFMIN(16383, w);
1084 if (screen && is->width == screen->w && screen->w == w
1085 && is->height== screen->h && screen->h == h && !force_set_video_mode)
1087 screen = SDL_SetVideoMode(w, h, 0, flags);
1089 av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1093 window_title = input_filename;
1094 SDL_WM_SetCaption(window_title, window_title);
1096 is->width = screen->w;
1097 is->height = screen->h;
1102 /* display the current picture, if any */
1103 static void video_display(VideoState *is)
1106 video_open(is, 0, NULL);
1107 if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1108 video_audio_display(is);
1109 else if (is->video_st)
1110 video_image_display(is);
1113 static double get_clock(Clock *c)
1115 if (*c->queue_serial != c->serial)
1120 double time = av_gettime() / 1000000.0;
1121 return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1125 static void set_clock_at(Clock *c, double pts, int serial, double time)
1128 c->last_updated = time;
1129 c->pts_drift = c->pts - time;
1133 static void set_clock(Clock *c, double pts, int serial)
1135 double time = av_gettime() / 1000000.0;
1136 set_clock_at(c, pts, serial, time);
1139 static void set_clock_speed(Clock *c, double speed)
1141 set_clock(c, get_clock(c), c->serial);
1145 static void init_clock(Clock *c, int *queue_serial)
1149 c->queue_serial = queue_serial;
1150 set_clock(c, NAN, -1);
1153 static void sync_clock_to_slave(Clock *c, Clock *slave)
1155 double clock = get_clock(c);
1156 double slave_clock = get_clock(slave);
1157 if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1158 set_clock(c, slave_clock, slave->serial);
1161 static int get_master_sync_type(VideoState *is) {
1162 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1164 return AV_SYNC_VIDEO_MASTER;
1166 return AV_SYNC_AUDIO_MASTER;
1167 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1169 return AV_SYNC_AUDIO_MASTER;
1171 return AV_SYNC_EXTERNAL_CLOCK;
1173 return AV_SYNC_EXTERNAL_CLOCK;
1177 /* get the current master clock value */
1178 static double get_master_clock(VideoState *is)
1182 switch (get_master_sync_type(is)) {
1183 case AV_SYNC_VIDEO_MASTER:
1184 val = get_clock(&is->vidclk);
1186 case AV_SYNC_AUDIO_MASTER:
1187 val = get_clock(&is->audclk);
1190 val = get_clock(&is->extclk);
1196 static void check_external_clock_speed(VideoState *is) {
1197 if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1198 is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1199 set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1200 } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1201 (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1202 set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1204 double speed = is->extclk.speed;
1206 set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1210 /* seek in the stream */
1211 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1213 if (!is->seek_req) {
1216 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1218 is->seek_flags |= AVSEEK_FLAG_BYTE;
1220 SDL_CondSignal(is->continue_read_thread);
1224 /* pause or resume the video */
1225 static void stream_toggle_pause(VideoState *is)
1228 is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1229 if (is->read_pause_return != AVERROR(ENOSYS)) {
1230 is->vidclk.paused = 0;
1232 set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1234 set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1235 is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1238 static void toggle_pause(VideoState *is)
1240 stream_toggle_pause(is);
1244 static void step_to_next_frame(VideoState *is)
1246 /* if the stream is paused unpause it, then step */
1248 stream_toggle_pause(is);
1252 static double compute_target_delay(double delay, VideoState *is)
1254 double sync_threshold, diff;
1256 /* update delay to follow master synchronisation source */
1257 if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1258 /* if video is slave, we try to correct big delays by
1259 duplicating or deleting a frame */
1260 diff = get_clock(&is->vidclk) - get_master_clock(is);
1262 /* skip or repeat frame. We take into account the
1263 delay to compute the threshold. I still don't know
1264 if it is the best guess */
1265 sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1266 if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1267 if (diff <= -sync_threshold)
1268 delay = FFMAX(0, delay + diff);
1269 else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1270 delay = delay + diff;
1271 else if (diff >= sync_threshold)
1276 av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1282 static void pictq_next_picture(VideoState *is) {
1283 /* update queue size and signal for next picture */
1284 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1285 is->pictq_rindex = 0;
1287 SDL_LockMutex(is->pictq_mutex);
1289 SDL_CondSignal(is->pictq_cond);
1290 SDL_UnlockMutex(is->pictq_mutex);
1293 static int pictq_prev_picture(VideoState *is) {
1294 VideoPicture *prevvp;
1296 /* update queue size and signal for the previous picture */
1297 prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1298 if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1299 SDL_LockMutex(is->pictq_mutex);
1300 if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
1301 if (--is->pictq_rindex == -1)
1302 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1306 SDL_CondSignal(is->pictq_cond);
1307 SDL_UnlockMutex(is->pictq_mutex);
1312 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1313 /* update current video pts */
1314 set_clock(&is->vidclk, pts, serial);
1315 sync_clock_to_slave(&is->extclk, &is->vidclk);
1316 is->video_current_pos = pos;
1317 is->frame_last_pts = pts;
1320 /* called to display each frame */
1321 static void video_refresh(void *opaque, double *remaining_time)
1323 VideoState *is = opaque;
1327 SubPicture *sp, *sp2;
1329 if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1330 check_external_clock_speed(is);
1332 if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1333 time = av_gettime() / 1000000.0;
1334 if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1336 is->last_vis_time = time;
1338 *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1343 if (is->force_refresh)
1344 redisplay = pictq_prev_picture(is);
1346 if (is->pictq_size == 0) {
1347 SDL_LockMutex(is->pictq_mutex);
1348 if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1349 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
1350 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1352 SDL_UnlockMutex(is->pictq_mutex);
1353 // nothing to do, no picture to display in the queue
1355 double last_duration, duration, delay;
1356 /* dequeue the picture */
1357 vp = &is->pictq[is->pictq_rindex];
1359 if (vp->serial != is->videoq.serial) {
1360 pictq_next_picture(is);
1368 /* compute nominal last_duration */
1369 last_duration = vp->pts - is->frame_last_pts;
1370 if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {
1371 /* if duration of the last frame was sane, update last_duration in video state */
1372 is->frame_last_duration = last_duration;
1377 delay = compute_target_delay(is->frame_last_duration, is);
1379 time= av_gettime()/1000000.0;
1380 if (time < is->frame_timer + delay && !redisplay) {
1381 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1385 is->frame_timer += delay;
1386 if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1387 is->frame_timer = time;
1389 SDL_LockMutex(is->pictq_mutex);
1390 if (!redisplay && !isnan(vp->pts))
1391 update_video_pts(is, vp->pts, vp->pos, vp->serial);
1392 SDL_UnlockMutex(is->pictq_mutex);
1394 if (is->pictq_size > 1) {
1395 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1396 duration = nextvp->pts - vp->pts;
1397 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1399 is->frame_drops_late++;
1400 pictq_next_picture(is);
1406 if (is->subtitle_st) {
1407 if (is->subtitle_stream_changed) {
1408 SDL_LockMutex(is->subpq_mutex);
1410 while (is->subpq_size) {
1411 free_subpicture(&is->subpq[is->subpq_rindex]);
1413 /* update queue size and signal for next picture */
1414 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1415 is->subpq_rindex = 0;
1419 is->subtitle_stream_changed = 0;
1421 SDL_CondSignal(is->subpq_cond);
1422 SDL_UnlockMutex(is->subpq_mutex);
1424 if (is->subpq_size > 0) {
1425 sp = &is->subpq[is->subpq_rindex];
1427 if (is->subpq_size > 1)
1428 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1432 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1433 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1435 free_subpicture(sp);
1437 /* update queue size and signal for next picture */
1438 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1439 is->subpq_rindex = 0;
1441 SDL_LockMutex(is->subpq_mutex);
1443 SDL_CondSignal(is->subpq_cond);
1444 SDL_UnlockMutex(is->subpq_mutex);
1451 /* display picture */
1452 if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1455 pictq_next_picture(is);
1457 if (is->step && !is->paused)
1458 stream_toggle_pause(is);
1461 is->force_refresh = 0;
1463 static int64_t last_time;
1465 int aqsize, vqsize, sqsize;
1468 cur_time = av_gettime();
1469 if (!last_time || (cur_time - last_time) >= 30000) {
1474 aqsize = is->audioq.size;
1476 vqsize = is->videoq.size;
1477 if (is->subtitle_st)
1478 sqsize = is->subtitleq.size;
1480 if (is->audio_st && is->video_st)
1481 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1482 else if (is->video_st)
1483 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1484 else if (is->audio_st)
1485 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1486 av_log(NULL, AV_LOG_INFO,
1487 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1488 get_master_clock(is),
1489 (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1491 is->frame_drops_early + is->frame_drops_late,
1495 is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1496 is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1498 last_time = cur_time;
1503 /* allocate a picture (needs to do that in main thread to avoid
1504 potential locking problems */
1505 static void alloc_picture(VideoState *is)
1510 vp = &is->pictq[is->pictq_windex];
1513 SDL_FreeYUVOverlay(vp->bmp);
1515 video_open(is, 0, vp);
1517 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1520 bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1521 if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < vp->height * vp->bmp->pitches[0]) {
1522 /* SDL allocates a buffer smaller than requested if the video
1523 * overlay hardware is unable to support the requested size. */
1524 av_log(NULL, AV_LOG_FATAL,
1525 "Error: the video system does not support an image\n"
1526 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1527 "to reduce the image size.\n", vp->width, vp->height );
1531 SDL_LockMutex(is->pictq_mutex);
1533 SDL_CondSignal(is->pictq_cond);
1534 SDL_UnlockMutex(is->pictq_mutex);
1537 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1538 int i, width, height;
1540 for (i = 0; i < 3; i++) {
1547 if (bmp->pitches[i] > width) {
1548 maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1549 for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1555 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1559 #if defined(DEBUG_SYNC) && 0
1560 printf("frame_type=%c pts=%0.3f\n",
1561 av_get_picture_type_char(src_frame->pict_type), pts);
1564 /* wait until we have space to put a new picture */
1565 SDL_LockMutex(is->pictq_mutex);
1567 /* keep the last already displayed picture in the queue */
1568 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1569 !is->videoq.abort_request) {
1570 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1572 SDL_UnlockMutex(is->pictq_mutex);
1574 if (is->videoq.abort_request)
1577 vp = &is->pictq[is->pictq_windex];
1579 vp->sar = src_frame->sample_aspect_ratio;
1581 /* alloc or resize hardware picture buffer */
1582 if (!vp->bmp || vp->reallocate || !vp->allocated ||
1583 vp->width != src_frame->width ||
1584 vp->height != src_frame->height) {
1589 vp->width = src_frame->width;
1590 vp->height = src_frame->height;
1592 /* the allocation must be done in the main thread to avoid
1593 locking problems. */
1594 event.type = FF_ALLOC_EVENT;
1595 event.user.data1 = is;
1596 SDL_PushEvent(&event);
1598 /* wait until the picture is allocated */
1599 SDL_LockMutex(is->pictq_mutex);
1600 while (!vp->allocated && !is->videoq.abort_request) {
1601 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1603 /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1604 if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1605 while (!vp->allocated) {
1606 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1609 SDL_UnlockMutex(is->pictq_mutex);
1611 if (is->videoq.abort_request)
1615 /* if the frame is not skipped, then display it */
1617 AVPicture pict = { { 0 } };
1619 /* get a pointer on the bitmap */
1620 SDL_LockYUVOverlay (vp->bmp);
1622 pict.data[0] = vp->bmp->pixels[0];
1623 pict.data[1] = vp->bmp->pixels[2];
1624 pict.data[2] = vp->bmp->pixels[1];
1626 pict.linesize[0] = vp->bmp->pitches[0];
1627 pict.linesize[1] = vp->bmp->pitches[2];
1628 pict.linesize[2] = vp->bmp->pitches[1];
1631 // FIXME use direct rendering
1632 av_picture_copy(&pict, (AVPicture *)src_frame,
1633 src_frame->format, vp->width, vp->height);
1635 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1636 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1637 vp->width, vp->height, src_frame->format, vp->width, vp->height,
1638 AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1639 if (is->img_convert_ctx == NULL) {
1640 av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1643 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1644 0, vp->height, pict.data, pict.linesize);
1646 /* workaround SDL PITCH_WORKAROUND */
1647 duplicate_right_border_pixels(vp->bmp);
1648 /* update the bitmap content */
1649 SDL_UnlockYUVOverlay(vp->bmp);
1653 vp->serial = serial;
1655 /* now we can update the picture count */
1656 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1657 is->pictq_windex = 0;
1658 SDL_LockMutex(is->pictq_mutex);
1660 SDL_UnlockMutex(is->pictq_mutex);
1665 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1669 if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1672 if (pkt->data == flush_pkt.data) {
1673 avcodec_flush_buffers(is->video_st->codec);
1675 SDL_LockMutex(is->pictq_mutex);
1676 // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1677 while (is->pictq_size && !is->videoq.abort_request) {
1678 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1680 is->video_current_pos = -1;
1681 is->frame_last_pts = AV_NOPTS_VALUE;
1682 is->frame_last_duration = 0;
1683 is->frame_timer = (double)av_gettime() / 1000000.0;
1684 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1685 SDL_UnlockMutex(is->pictq_mutex);
1689 if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1696 if (decoder_reorder_pts == -1) {
1697 frame->pts = av_frame_get_best_effort_timestamp(frame);
1698 } else if (decoder_reorder_pts) {
1699 frame->pts = frame->pkt_pts;
1701 frame->pts = frame->pkt_dts;
1704 if (frame->pts != AV_NOPTS_VALUE)
1705 dpts = av_q2d(is->video_st->time_base) * frame->pts;
1707 frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1709 if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1710 SDL_LockMutex(is->pictq_mutex);
1711 if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
1712 double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
1713 double ptsdiff = dpts - is->frame_last_pts;
1714 if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1715 !isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1716 clockdiff + ptsdiff - is->frame_last_filter_delay < 0 &&
1717 is->videoq.nb_packets) {
1718 is->frame_last_dropped_pos = pkt->pos;
1719 is->frame_last_dropped_pts = dpts;
1720 is->frame_last_dropped_serial = *serial;
1721 is->frame_drops_early++;
1722 av_frame_unref(frame);
1726 SDL_UnlockMutex(is->pictq_mutex);
1735 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1736 AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1739 AVFilterInOut *outputs = NULL, *inputs = NULL;
1742 outputs = avfilter_inout_alloc();
1743 inputs = avfilter_inout_alloc();
1744 if (!outputs || !inputs) {
1745 ret = AVERROR(ENOMEM);
1749 outputs->name = av_strdup("in");
1750 outputs->filter_ctx = source_ctx;
1751 outputs->pad_idx = 0;
1752 outputs->next = NULL;
1754 inputs->name = av_strdup("out");
1755 inputs->filter_ctx = sink_ctx;
1756 inputs->pad_idx = 0;
1757 inputs->next = NULL;
1759 if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1762 if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1766 ret = avfilter_graph_config(graph, NULL);
1768 avfilter_inout_free(&outputs);
1769 avfilter_inout_free(&inputs);
1773 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1775 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1776 char sws_flags_str[128];
1777 char buffersrc_args[256];
1779 AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1780 AVCodecContext *codec = is->video_st->codec;
1781 AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1783 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1784 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1785 graph->scale_sws_opts = av_strdup(sws_flags_str);
1787 snprintf(buffersrc_args, sizeof(buffersrc_args),
1788 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1789 frame->width, frame->height, frame->format,
1790 is->video_st->time_base.num, is->video_st->time_base.den,
1791 codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1792 if (fr.num && fr.den)
1793 av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1795 if ((ret = avfilter_graph_create_filter(&filt_src,
1796 avfilter_get_by_name("buffer"),
1797 "ffplay_buffer", buffersrc_args, NULL,
1801 ret = avfilter_graph_create_filter(&filt_out,
1802 avfilter_get_by_name("buffersink"),
1803 "ffplay_buffersink", NULL, NULL, graph);
1807 if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1810 /* SDL YUV code is not handling odd width/height for some driver
1811 * combinations, therefore we crop the picture to an even width/height. */
1812 if ((ret = avfilter_graph_create_filter(&filt_crop,
1813 avfilter_get_by_name("crop"),
1814 "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1816 if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1819 if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1822 is->in_video_filter = filt_src;
1823 is->out_video_filter = filt_out;
1829 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1831 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1832 int sample_rates[2] = { 0, -1 };
1833 int64_t channel_layouts[2] = { 0, -1 };
1834 int channels[2] = { 0, -1 };
1835 AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1836 char asrc_args[256];
1839 avfilter_graph_free(&is->agraph);
1840 if (!(is->agraph = avfilter_graph_alloc()))
1841 return AVERROR(ENOMEM);
1843 ret = snprintf(asrc_args, sizeof(asrc_args),
1844 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1845 is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1846 is->audio_filter_src.channels,
1847 1, is->audio_filter_src.freq);
1848 if (is->audio_filter_src.channel_layout)
1849 snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1850 ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1852 ret = avfilter_graph_create_filter(&filt_asrc,
1853 avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1854 asrc_args, NULL, is->agraph);
1859 ret = avfilter_graph_create_filter(&filt_asink,
1860 avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1861 NULL, NULL, is->agraph);
1865 if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1867 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1870 if (force_output_format) {
1871 channel_layouts[0] = is->audio_tgt.channel_layout;
1872 channels [0] = is->audio_tgt.channels;
1873 sample_rates [0] = is->audio_tgt.freq;
1874 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1876 if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1878 if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1880 if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1885 if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1888 is->in_audio_filter = filt_asrc;
1889 is->out_audio_filter = filt_asink;
1893 avfilter_graph_free(&is->agraph);
1896 #endif /* CONFIG_AVFILTER */
1898 static int video_thread(void *arg)
1900 AVPacket pkt = { 0 };
1901 VideoState *is = arg;
1902 AVFrame *frame = av_frame_alloc();
1908 AVFilterGraph *graph = avfilter_graph_alloc();
1909 AVFilterContext *filt_out = NULL, *filt_in = NULL;
1912 enum AVPixelFormat last_format = -2;
1913 int last_serial = -1;
1917 while (is->paused && !is->videoq.abort_request)
1920 avcodec_get_frame_defaults(frame);
1921 av_free_packet(&pkt);
1923 ret = get_video_frame(is, frame, &pkt, &serial);
1930 if ( last_w != frame->width
1931 || last_h != frame->height
1932 || last_format != frame->format
1933 || last_serial != serial) {
1934 av_log(NULL, AV_LOG_DEBUG,
1935 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1937 (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1938 frame->width, frame->height,
1939 (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1940 avfilter_graph_free(&graph);
1941 graph = avfilter_graph_alloc();
1942 if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1944 event.type = FF_QUIT_EVENT;
1945 event.user.data1 = is;
1946 SDL_PushEvent(&event);
1947 av_free_packet(&pkt);
1950 filt_in = is->in_video_filter;
1951 filt_out = is->out_video_filter;
1952 last_w = frame->width;
1953 last_h = frame->height;
1954 last_format = frame->format;
1955 last_serial = serial;
1958 ret = av_buffersrc_add_frame(filt_in, frame);
1961 av_frame_unref(frame);
1962 avcodec_get_frame_defaults(frame);
1963 av_free_packet(&pkt);
1966 is->frame_last_returned_time = av_gettime() / 1000000.0;
1968 ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1974 is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1975 if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1976 is->frame_last_filter_delay = 0;
1978 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
1979 ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1980 av_frame_unref(frame);
1983 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
1984 ret = queue_picture(is, frame, pts, pkt.pos, serial);
1985 av_frame_unref(frame);
1992 avcodec_flush_buffers(is->video_st->codec);
1994 avfilter_graph_free(&graph);
1996 av_free_packet(&pkt);
1997 av_frame_free(&frame);
2001 static int subtitle_thread(void *arg)
2003 VideoState *is = arg;
2005 AVPacket pkt1, *pkt = &pkt1;
2009 int r, g, b, y, u, v, a;
2012 while (is->paused && !is->subtitleq.abort_request) {
2015 if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
2018 if (pkt->data == flush_pkt.data) {
2019 avcodec_flush_buffers(is->subtitle_st->codec);
2022 SDL_LockMutex(is->subpq_mutex);
2023 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2024 !is->subtitleq.abort_request) {
2025 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2027 SDL_UnlockMutex(is->subpq_mutex);
2029 if (is->subtitleq.abort_request)
2032 sp = &is->subpq[is->subpq_windex];
2034 /* NOTE: ipts is the PTS of the _first_ picture beginning in
2035 this packet, if any */
2037 if (pkt->pts != AV_NOPTS_VALUE)
2038 pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2040 avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2041 &got_subtitle, pkt);
2042 if (got_subtitle && sp->sub.format == 0) {
2043 if (sp->sub.pts != AV_NOPTS_VALUE)
2044 pts = sp->sub.pts / (double)AV_TIME_BASE;
2047 for (i = 0; i < sp->sub.num_rects; i++)
2049 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2051 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2052 y = RGB_TO_Y_CCIR(r, g, b);
2053 u = RGB_TO_U_CCIR(r, g, b, 0);
2054 v = RGB_TO_V_CCIR(r, g, b, 0);
2055 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2059 /* now we can update the picture count */
2060 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2061 is->subpq_windex = 0;
2062 SDL_LockMutex(is->subpq_mutex);
2064 SDL_UnlockMutex(is->subpq_mutex);
2066 av_free_packet(pkt);
2071 /* copy samples for viewing in editor window */
2072 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2076 size = samples_size / sizeof(short);
2078 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2081 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2083 is->sample_array_index += len;
2084 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2085 is->sample_array_index = 0;
2090 /* return the wanted number of samples to get better sync if sync_type is video
2091 * or external master clock */
2092 static int synchronize_audio(VideoState *is, int nb_samples)
2094 int wanted_nb_samples = nb_samples;
2096 /* if not master, then we try to remove or add samples to correct the clock */
2097 if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2098 double diff, avg_diff;
2099 int min_nb_samples, max_nb_samples;
2101 diff = get_clock(&is->audclk) - get_master_clock(is);
2103 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2104 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2105 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2106 /* not enough measures to have a correct estimate */
2107 is->audio_diff_avg_count++;
2109 /* estimate the A-V difference */
2110 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2112 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2113 wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2114 min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2115 max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2116 wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2118 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2119 diff, avg_diff, wanted_nb_samples - nb_samples,
2120 is->audio_clock, is->audio_diff_threshold);
2123 /* too big difference : may be initial PTS errors, so
2125 is->audio_diff_avg_count = 0;
2126 is->audio_diff_cum = 0;
2130 return wanted_nb_samples;
2134 * Decode one audio frame and return its uncompressed size.
2136 * The processed audio frame is decoded, converted if required, and
2137 * stored in is->audio_buf, with size in bytes given by the return
2140 static int audio_decode_frame(VideoState *is)
2142 AVPacket *pkt_temp = &is->audio_pkt_temp;
2143 AVPacket *pkt = &is->audio_pkt;
2144 AVCodecContext *dec = is->audio_st->codec;
2145 int len1, data_size, resampled_data_size;
2146 int64_t dec_channel_layout;
2148 av_unused double audio_clock0;
2150 int flush_complete = 0;
2151 int wanted_nb_samples;
2157 /* NOTE: the audio packet can contain several frames */
2158 while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) {
2160 if (!(is->frame = avcodec_alloc_frame()))
2161 return AVERROR(ENOMEM);
2163 av_frame_unref(is->frame);
2164 avcodec_get_frame_defaults(is->frame);
2167 if (is->audioq.serial != is->audio_pkt_temp_serial)
2173 if (!is->audio_buf_frames_pending) {
2177 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2179 /* if error, we skip the frame */
2184 pkt_temp->data += len1;
2185 pkt_temp->size -= len1;
2188 /* stop sending empty packets if the decoder is finished */
2189 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2194 tb = (AVRational){1, is->frame->sample_rate};
2195 if (is->frame->pts != AV_NOPTS_VALUE)
2196 is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2197 if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
2198 is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, tb);
2199 if (pkt_temp->pts != AV_NOPTS_VALUE)
2200 pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2203 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2206 cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2207 is->frame->format, av_frame_get_channels(is->frame)) ||
2208 is->audio_filter_src.channel_layout != dec_channel_layout ||
2209 is->audio_filter_src.freq != is->frame->sample_rate ||
2210 is->audio_pkt_temp_serial != is->audio_last_serial;
2213 char buf1[1024], buf2[1024];
2214 av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2215 av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2216 av_log(NULL, AV_LOG_DEBUG,
2217 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2218 is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2219 is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2221 is->audio_filter_src.fmt = is->frame->format;
2222 is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2223 is->audio_filter_src.channel_layout = dec_channel_layout;
2224 is->audio_filter_src.freq = is->frame->sample_rate;
2225 is->audio_last_serial = is->audio_pkt_temp_serial;
2227 if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2231 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2233 av_frame_unref(is->frame);
2237 if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2238 if (ret == AVERROR(EAGAIN)) {
2239 is->audio_buf_frames_pending = 0;
2244 is->audio_buf_frames_pending = 1;
2245 tb = is->out_audio_filter->inputs[0]->time_base;
2248 data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2249 is->frame->nb_samples,
2250 is->frame->format, 1);
2252 dec_channel_layout =
2253 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2254 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2255 wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2257 if (is->frame->format != is->audio_src.fmt ||
2258 dec_channel_layout != is->audio_src.channel_layout ||
2259 is->frame->sample_rate != is->audio_src.freq ||
2260 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2261 swr_free(&is->swr_ctx);
2262 is->swr_ctx = swr_alloc_set_opts(NULL,
2263 is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2264 dec_channel_layout, is->frame->format, is->frame->sample_rate,
2266 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2267 av_log(NULL, AV_LOG_ERROR,
2268 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2269 is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2270 is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2273 is->audio_src.channel_layout = dec_channel_layout;
2274 is->audio_src.channels = av_frame_get_channels(is->frame);
2275 is->audio_src.freq = is->frame->sample_rate;
2276 is->audio_src.fmt = is->frame->format;
2280 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2281 uint8_t **out = &is->audio_buf1;
2282 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2283 int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2286 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2289 if (wanted_nb_samples != is->frame->nb_samples) {
2290 if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2291 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2292 av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2296 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2297 if (!is->audio_buf1)
2298 return AVERROR(ENOMEM);
2299 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2301 av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2304 if (len2 == out_count) {
2305 av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2306 swr_init(is->swr_ctx);
2308 is->audio_buf = is->audio_buf1;
2309 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2311 is->audio_buf = is->frame->data[0];
2312 resampled_data_size = data_size;
2315 audio_clock0 = is->audio_clock;
2316 /* update the audio clock with the pts */
2317 if (is->frame->pts != AV_NOPTS_VALUE) {
2318 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2319 is->audio_clock_serial = is->audio_pkt_temp_serial;
2323 static double last_clock;
2324 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2325 is->audio_clock - last_clock,
2326 is->audio_clock, audio_clock0);
2327 last_clock = is->audio_clock;
2330 return resampled_data_size;
2333 /* free the current packet */
2335 av_free_packet(pkt);
2336 memset(pkt_temp, 0, sizeof(*pkt_temp));
2338 if (is->audioq.abort_request) {
2342 if (is->audioq.nb_packets == 0)
2343 SDL_CondSignal(is->continue_read_thread);
2345 /* read next packet */
2346 if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2349 if (pkt->data == flush_pkt.data) {
2350 avcodec_flush_buffers(dec);
2352 is->audio_buf_frames_pending = 0;
2359 /* prepare a new audio buffer */
2360 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2362 VideoState *is = opaque;
2363 int audio_size, len1;
2365 int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2367 audio_callback_time = av_gettime();
2370 if (is->audio_buf_index >= is->audio_buf_size) {
2371 audio_size = audio_decode_frame(is);
2372 if (audio_size < 0) {
2373 /* if error, just output silence */
2374 is->audio_buf = is->silence_buf;
2375 is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2377 if (is->show_mode != SHOW_MODE_VIDEO)
2378 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2379 is->audio_buf_size = audio_size;
2381 is->audio_buf_index = 0;
2383 len1 = is->audio_buf_size - is->audio_buf_index;
2386 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2389 is->audio_buf_index += len1;
2391 bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2392 is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2393 /* Let's assume the audio driver that is used by SDL has two periods. */
2394 set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2395 sync_clock_to_slave(&is->extclk, &is->audclk);
2398 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2400 SDL_AudioSpec wanted_spec, spec;
2402 const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2404 env = SDL_getenv("SDL_AUDIO_CHANNELS");
2406 wanted_nb_channels = atoi(env);
2407 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2409 if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2410 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2411 wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2413 wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2414 wanted_spec.freq = wanted_sample_rate;
2415 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2416 av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2419 wanted_spec.format = AUDIO_S16SYS;
2420 wanted_spec.silence = 0;
2421 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2422 wanted_spec.callback = sdl_audio_callback;
2423 wanted_spec.userdata = opaque;
2424 while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2425 av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2426 wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2427 if (!wanted_spec.channels) {
2428 av_log(NULL, AV_LOG_ERROR,
2429 "No more channel combinations to try, audio open failed\n");
2432 wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2434 if (spec.format != AUDIO_S16SYS) {
2435 av_log(NULL, AV_LOG_ERROR,
2436 "SDL advised audio format %d is not supported!\n", spec.format);
2439 if (spec.channels != wanted_spec.channels) {
2440 wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2441 if (!wanted_channel_layout) {
2442 av_log(NULL, AV_LOG_ERROR,
2443 "SDL advised channel count %d is not supported!\n", spec.channels);
2448 audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2449 audio_hw_params->freq = spec.freq;
2450 audio_hw_params->channel_layout = wanted_channel_layout;
2451 audio_hw_params->channels = spec.channels;
2455 /* open a given stream. Return 0 if OK */
2456 static int stream_component_open(VideoState *is, int stream_index)
2458 AVFormatContext *ic = is->ic;
2459 AVCodecContext *avctx;
2461 const char *forced_codec_name = NULL;
2463 AVDictionaryEntry *t = NULL;
2464 int sample_rate, nb_channels;
2465 int64_t channel_layout;
2468 if (stream_index < 0 || stream_index >= ic->nb_streams)
2470 avctx = ic->streams[stream_index]->codec;
2472 codec = avcodec_find_decoder(avctx->codec_id);
2474 switch(avctx->codec_type){
2475 case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2476 case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2477 case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2479 if (forced_codec_name)
2480 codec = avcodec_find_decoder_by_name(forced_codec_name);
2482 if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2483 "No codec could be found with name '%s'\n", forced_codec_name);
2484 else av_log(NULL, AV_LOG_WARNING,
2485 "No codec could be found with id %d\n", avctx->codec_id);
2489 avctx->codec_id = codec->id;
2490 avctx->workaround_bugs = workaround_bugs;
2491 avctx->lowres = lowres;
2492 if(avctx->lowres > codec->max_lowres){
2493 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2495 avctx->lowres= codec->max_lowres;
2497 avctx->idct_algo = idct;
2498 avctx->error_concealment = error_concealment;
2500 if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2501 if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2502 if(codec->capabilities & CODEC_CAP_DR1)
2503 avctx->flags |= CODEC_FLAG_EMU_EDGE;
2505 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2506 if (!av_dict_get(opts, "threads", NULL, 0))
2507 av_dict_set(&opts, "threads", "auto", 0);
2509 av_dict_set(&opts, "lowres", av_asprintf("%d", avctx->lowres), AV_DICT_DONT_STRDUP_VAL);
2510 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2511 av_dict_set(&opts, "refcounted_frames", "1", 0);
2512 if (avcodec_open2(avctx, codec, &opts) < 0)
2514 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2515 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2516 return AVERROR_OPTION_NOT_FOUND;
2519 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2520 switch (avctx->codec_type) {
2521 case AVMEDIA_TYPE_AUDIO:
2526 is->audio_filter_src.freq = avctx->sample_rate;
2527 is->audio_filter_src.channels = avctx->channels;
2528 is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2529 is->audio_filter_src.fmt = avctx->sample_fmt;
2530 if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2532 link = is->out_audio_filter->inputs[0];
2533 sample_rate = link->sample_rate;
2534 nb_channels = link->channels;
2535 channel_layout = link->channel_layout;
2538 sample_rate = avctx->sample_rate;
2539 nb_channels = avctx->channels;
2540 channel_layout = avctx->channel_layout;
2543 /* prepare audio output */
2544 if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2546 is->audio_hw_buf_size = ret;
2547 is->audio_src = is->audio_tgt;
2548 is->audio_buf_size = 0;
2549 is->audio_buf_index = 0;
2551 /* init averaging filter */
2552 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2553 is->audio_diff_avg_count = 0;
2554 /* since we do not have a precise anough audio fifo fullness,
2555 we correct audio sync only if larger than this threshold */
2556 is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2558 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2559 memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2561 is->audio_stream = stream_index;
2562 is->audio_st = ic->streams[stream_index];
2564 packet_queue_start(&is->audioq);
2567 case AVMEDIA_TYPE_VIDEO:
2568 is->video_stream = stream_index;
2569 is->video_st = ic->streams[stream_index];
2571 packet_queue_start(&is->videoq);
2572 is->video_tid = SDL_CreateThread(video_thread, is);
2573 is->queue_attachments_req = 1;
2575 case AVMEDIA_TYPE_SUBTITLE:
2576 is->subtitle_stream = stream_index;
2577 is->subtitle_st = ic->streams[stream_index];
2578 packet_queue_start(&is->subtitleq);
2580 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2588 static void stream_component_close(VideoState *is, int stream_index)
2590 AVFormatContext *ic = is->ic;
2591 AVCodecContext *avctx;
2593 if (stream_index < 0 || stream_index >= ic->nb_streams)
2595 avctx = ic->streams[stream_index]->codec;
2597 switch (avctx->codec_type) {
2598 case AVMEDIA_TYPE_AUDIO:
2599 packet_queue_abort(&is->audioq);
2603 packet_queue_flush(&is->audioq);
2604 av_free_packet(&is->audio_pkt);
2605 swr_free(&is->swr_ctx);
2606 av_freep(&is->audio_buf1);
2607 is->audio_buf1_size = 0;
2608 is->audio_buf = NULL;
2609 av_frame_free(&is->frame);
2612 av_rdft_end(is->rdft);
2613 av_freep(&is->rdft_data);
2618 avfilter_graph_free(&is->agraph);
2621 case AVMEDIA_TYPE_VIDEO:
2622 packet_queue_abort(&is->videoq);
2624 /* note: we also signal this mutex to make sure we deblock the
2625 video thread in all cases */
2626 SDL_LockMutex(is->pictq_mutex);
2627 SDL_CondSignal(is->pictq_cond);
2628 SDL_UnlockMutex(is->pictq_mutex);
2630 SDL_WaitThread(is->video_tid, NULL);
2632 packet_queue_flush(&is->videoq);
2634 case AVMEDIA_TYPE_SUBTITLE:
2635 packet_queue_abort(&is->subtitleq);
2637 /* note: we also signal this mutex to make sure we deblock the
2638 video thread in all cases */
2639 SDL_LockMutex(is->subpq_mutex);
2640 is->subtitle_stream_changed = 1;
2642 SDL_CondSignal(is->subpq_cond);
2643 SDL_UnlockMutex(is->subpq_mutex);
2645 SDL_WaitThread(is->subtitle_tid, NULL);
2647 packet_queue_flush(&is->subtitleq);
2653 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2654 avcodec_close(avctx);
2655 switch (avctx->codec_type) {
2656 case AVMEDIA_TYPE_AUDIO:
2657 is->audio_st = NULL;
2658 is->audio_stream = -1;
2660 case AVMEDIA_TYPE_VIDEO:
2661 is->video_st = NULL;
2662 is->video_stream = -1;
2664 case AVMEDIA_TYPE_SUBTITLE:
2665 is->subtitle_st = NULL;
2666 is->subtitle_stream = -1;
2673 static int decode_interrupt_cb(void *ctx)
2675 VideoState *is = ctx;
2676 return is->abort_request;
2679 static int is_realtime(AVFormatContext *s)
2681 if( !strcmp(s->iformat->name, "rtp")
2682 || !strcmp(s->iformat->name, "rtsp")
2683 || !strcmp(s->iformat->name, "sdp")
2687 if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2688 || !strncmp(s->filename, "udp:", 4)
2695 /* this thread gets the stream from the disk or the network */
2696 static int read_thread(void *arg)
2698 VideoState *is = arg;
2699 AVFormatContext *ic = NULL;
2701 int st_index[AVMEDIA_TYPE_NB];
2702 AVPacket pkt1, *pkt = &pkt1;
2704 int pkt_in_play_range = 0;
2705 AVDictionaryEntry *t;
2706 AVDictionary **opts;
2707 int orig_nb_streams;
2708 SDL_mutex *wait_mutex = SDL_CreateMutex();
2710 memset(st_index, -1, sizeof(st_index));
2711 is->last_video_stream = is->video_stream = -1;
2712 is->last_audio_stream = is->audio_stream = -1;
2713 is->last_subtitle_stream = is->subtitle_stream = -1;
2715 ic = avformat_alloc_context();
2716 ic->interrupt_callback.callback = decode_interrupt_cb;
2717 ic->interrupt_callback.opaque = is;
2718 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2720 print_error(is->filename, err);
2724 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2725 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2726 ret = AVERROR_OPTION_NOT_FOUND;
2732 ic->flags |= AVFMT_FLAG_GENPTS;
2734 opts = setup_find_stream_info_opts(ic, codec_opts);
2735 orig_nb_streams = ic->nb_streams;
2737 err = avformat_find_stream_info(ic, opts);
2739 av_log(NULL, AV_LOG_WARNING,
2740 "%s: could not find codec parameters\n", is->filename);
2744 for (i = 0; i < orig_nb_streams; i++)
2745 av_dict_free(&opts[i]);
2749 ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2751 if (seek_by_bytes < 0)
2752 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2754 is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2756 if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2757 window_title = av_asprintf("%s - %s", t->value, input_filename);
2759 /* if seeking requested, we execute it */
2760 if (start_time != AV_NOPTS_VALUE) {
2763 timestamp = start_time;
2764 /* add the stream start time */
2765 if (ic->start_time != AV_NOPTS_VALUE)
2766 timestamp += ic->start_time;
2767 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2769 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2770 is->filename, (double)timestamp / AV_TIME_BASE);
2774 is->realtime = is_realtime(ic);
2776 for (i = 0; i < ic->nb_streams; i++)
2777 ic->streams[i]->discard = AVDISCARD_ALL;
2779 st_index[AVMEDIA_TYPE_VIDEO] =
2780 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2781 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2783 st_index[AVMEDIA_TYPE_AUDIO] =
2784 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2785 wanted_stream[AVMEDIA_TYPE_AUDIO],
2786 st_index[AVMEDIA_TYPE_VIDEO],
2788 if (!video_disable && !subtitle_disable)
2789 st_index[AVMEDIA_TYPE_SUBTITLE] =
2790 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2791 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2792 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2793 st_index[AVMEDIA_TYPE_AUDIO] :
2794 st_index[AVMEDIA_TYPE_VIDEO]),
2797 av_dump_format(ic, 0, is->filename, 0);
2800 is->show_mode = show_mode;
2802 /* open the streams */
2803 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2804 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2808 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2809 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2811 if (is->show_mode == SHOW_MODE_NONE)
2812 is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2814 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2815 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2818 if (is->video_stream < 0 && is->audio_stream < 0) {
2819 av_log(NULL, AV_LOG_FATAL, "%s: could not open codecs\n", is->filename);
2824 if (infinite_buffer < 0 && is->realtime)
2825 infinite_buffer = 1;
2828 if (is->abort_request)
2830 if (is->paused != is->last_paused) {
2831 is->last_paused = is->paused;
2833 is->read_pause_return = av_read_pause(ic);
2837 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2839 (!strcmp(ic->iformat->name, "rtsp") ||
2840 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2841 /* wait 10 ms to avoid trying to get another packet */
2848 int64_t seek_target = is->seek_pos;
2849 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2850 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2851 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2852 // of the seek_pos/seek_rel variables
2854 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2856 av_log(NULL, AV_LOG_ERROR,
2857 "%s: error while seeking\n", is->ic->filename);
2859 if (is->audio_stream >= 0) {
2860 packet_queue_flush(&is->audioq);
2861 packet_queue_put(&is->audioq, &flush_pkt);
2863 if (is->subtitle_stream >= 0) {
2864 packet_queue_flush(&is->subtitleq);
2865 packet_queue_put(&is->subtitleq, &flush_pkt);
2867 if (is->video_stream >= 0) {
2868 packet_queue_flush(&is->videoq);
2869 packet_queue_put(&is->videoq, &flush_pkt);
2871 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2872 set_clock(&is->extclk, NAN, 0);
2874 set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2878 is->queue_attachments_req = 1;
2881 step_to_next_frame(is);
2883 if (is->queue_attachments_req) {
2884 if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2886 if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
2888 packet_queue_put(&is->videoq, ©);
2890 is->queue_attachments_req = 0;
2893 /* if the queue are full, no need to read more */
2894 if (infinite_buffer<1 &&
2895 (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2896 || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2897 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2898 || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2899 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2901 SDL_LockMutex(wait_mutex);
2902 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2903 SDL_UnlockMutex(wait_mutex);
2907 if (is->video_stream >= 0) {
2908 av_init_packet(pkt);
2911 pkt->stream_index = is->video_stream;
2912 packet_queue_put(&is->videoq, pkt);
2914 if (is->audio_stream >= 0 &&
2915 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2916 av_init_packet(pkt);
2919 pkt->stream_index = is->audio_stream;
2920 packet_queue_put(&is->audioq, pkt);
2923 if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2924 if (loop != 1 && (!loop || --loop)) {
2925 stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2926 } else if (autoexit) {
2934 ret = av_read_frame(ic, pkt);
2936 if (ret == AVERROR_EOF || url_feof(ic->pb))
2938 if (ic->pb && ic->pb->error)
2940 SDL_LockMutex(wait_mutex);
2941 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2942 SDL_UnlockMutex(wait_mutex);
2945 /* check if packet is in play range specified by user, then queue, otherwise discard */
2946 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2947 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2948 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2949 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2950 <= ((double)duration / 1000000);
2951 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2952 packet_queue_put(&is->audioq, pkt);
2953 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2954 && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
2955 packet_queue_put(&is->videoq, pkt);
2956 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2957 packet_queue_put(&is->subtitleq, pkt);
2959 av_free_packet(pkt);
2962 /* wait until the end */
2963 while (!is->abort_request) {
2969 /* close each stream */
2970 if (is->audio_stream >= 0)
2971 stream_component_close(is, is->audio_stream);
2972 if (is->video_stream >= 0)
2973 stream_component_close(is, is->video_stream);
2974 if (is->subtitle_stream >= 0)
2975 stream_component_close(is, is->subtitle_stream);
2977 avformat_close_input(&is->ic);
2983 event.type = FF_QUIT_EVENT;
2984 event.user.data1 = is;
2985 SDL_PushEvent(&event);
2987 SDL_DestroyMutex(wait_mutex);
2991 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2995 is = av_mallocz(sizeof(VideoState));
2998 av_strlcpy(is->filename, filename, sizeof(is->filename));
2999 is->iformat = iformat;
3003 /* start video display */
3004 is->pictq_mutex = SDL_CreateMutex();
3005 is->pictq_cond = SDL_CreateCond();
3007 is->subpq_mutex = SDL_CreateMutex();
3008 is->subpq_cond = SDL_CreateCond();
3010 packet_queue_init(&is->videoq);
3011 packet_queue_init(&is->audioq);
3012 packet_queue_init(&is->subtitleq);
3014 is->continue_read_thread = SDL_CreateCond();
3016 init_clock(&is->vidclk, &is->videoq.serial);
3017 init_clock(&is->audclk, &is->audioq.serial);
3018 init_clock(&is->extclk, &is->extclk.serial);
3019 is->audio_clock_serial = -1;
3020 is->audio_last_serial = -1;
3021 is->av_sync_type = av_sync_type;
3022 is->read_tid = SDL_CreateThread(read_thread, is);
3023 if (!is->read_tid) {
3030 static void stream_cycle_channel(VideoState *is, int codec_type)
3032 AVFormatContext *ic = is->ic;
3033 int start_index, stream_index;
3037 if (codec_type == AVMEDIA_TYPE_VIDEO) {
3038 start_index = is->last_video_stream;
3039 old_index = is->video_stream;
3040 } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3041 start_index = is->last_audio_stream;
3042 old_index = is->audio_stream;
3044 start_index = is->last_subtitle_stream;
3045 old_index = is->subtitle_stream;
3047 stream_index = start_index;
3049 if (++stream_index >= is->ic->nb_streams)
3051 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3054 is->last_subtitle_stream = -1;
3057 if (start_index == -1)
3061 if (stream_index == start_index)
3063 st = ic->streams[stream_index];
3064 if (st->codec->codec_type == codec_type) {
3065 /* check that parameters are OK */
3066 switch (codec_type) {
3067 case AVMEDIA_TYPE_AUDIO:
3068 if (st->codec->sample_rate != 0 &&
3069 st->codec->channels != 0)
3072 case AVMEDIA_TYPE_VIDEO:
3073 case AVMEDIA_TYPE_SUBTITLE:
3081 stream_component_close(is, old_index);
3082 stream_component_open(is, stream_index);
3086 static void toggle_full_screen(VideoState *is)
3088 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3089 /* OS X needs to reallocate the SDL overlays */
3091 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3092 is->pictq[i].reallocate = 1;
3094 is_full_screen = !is_full_screen;
3095 video_open(is, 1, NULL);
3098 static void toggle_audio_display(VideoState *is)
3100 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3101 int next = is->show_mode;
3103 next = (next + 1) % SHOW_MODE_NB;
3104 } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3105 if (is->show_mode != next) {
3106 fill_rectangle(screen,
3107 is->xleft, is->ytop, is->width, is->height,
3109 is->force_refresh = 1;
3110 is->show_mode = next;
3114 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3115 double remaining_time = 0.0;
3117 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3118 if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3122 if (remaining_time > 0.0)
3123 av_usleep((int64_t)(remaining_time * 1000000.0));
3124 remaining_time = REFRESH_RATE;
3125 if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3126 video_refresh(is, &remaining_time);
3131 /* handle an event sent by the GUI */
3132 static void event_loop(VideoState *cur_stream)
3135 double incr, pos, frac;
3139 refresh_loop_wait_event(cur_stream, &event);
3140 switch (event.type) {
3142 if (exit_on_keydown) {
3143 do_exit(cur_stream);
3146 switch (event.key.keysym.sym) {
3149 do_exit(cur_stream);
3152 toggle_full_screen(cur_stream);
3153 cur_stream->force_refresh = 1;
3157 toggle_pause(cur_stream);
3159 case SDLK_s: // S: Step to next frame
3160 step_to_next_frame(cur_stream);
3163 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3166 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3169 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3172 toggle_audio_display(cur_stream);
3192 if (seek_by_bytes) {
3193 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3194 pos = cur_stream->video_current_pos;
3195 } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3196 pos = cur_stream->audio_pkt.pos;
3198 pos = avio_tell(cur_stream->ic->pb);
3199 if (cur_stream->ic->bit_rate)
3200 incr *= cur_stream->ic->bit_rate / 8.0;
3204 stream_seek(cur_stream, pos, incr, 1);
3206 pos = get_master_clock(cur_stream);
3208 pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3210 if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3211 pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3212 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3219 case SDL_VIDEOEXPOSE:
3220 cur_stream->force_refresh = 1;
3222 case SDL_MOUSEBUTTONDOWN:
3223 if (exit_on_mousedown) {
3224 do_exit(cur_stream);
3227 case SDL_MOUSEMOTION:
3228 if (cursor_hidden) {
3232 cursor_last_shown = av_gettime();
3233 if (event.type == SDL_MOUSEBUTTONDOWN) {
3236 if (event.motion.state != SDL_PRESSED)
3240 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3241 uint64_t size = avio_size(cur_stream->ic->pb);
3242 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3246 int tns, thh, tmm, tss;
3247 tns = cur_stream->ic->duration / 1000000LL;
3249 tmm = (tns % 3600) / 60;
3251 frac = x / cur_stream->width;
3254 mm = (ns % 3600) / 60;
3256 av_log(NULL, AV_LOG_INFO,
3257 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3258 hh, mm, ss, thh, tmm, tss);
3259 ts = frac * cur_stream->ic->duration;
3260 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3261 ts += cur_stream->ic->start_time;
3262 stream_seek(cur_stream, ts, 0, 0);
3265 case SDL_VIDEORESIZE:
3266 screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3267 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3269 av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3270 do_exit(cur_stream);
3272 screen_width = cur_stream->width = screen->w;
3273 screen_height = cur_stream->height = screen->h;
3274 cur_stream->force_refresh = 1;
3278 do_exit(cur_stream);
3280 case FF_ALLOC_EVENT:
3281 alloc_picture(event.user.data1);
3289 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3291 av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3292 return opt_default(NULL, "video_size", arg);
3295 static int opt_width(void *optctx, const char *opt, const char *arg)
3297 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3301 static int opt_height(void *optctx, const char *opt, const char *arg)
3303 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3307 static int opt_format(void *optctx, const char *opt, const char *arg)
3309 file_iformat = av_find_input_format(arg);
3310 if (!file_iformat) {
3311 av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3312 return AVERROR(EINVAL);
3317 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3319 av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3320 return opt_default(NULL, "pixel_format", arg);
3323 static int opt_sync(void *optctx, const char *opt, const char *arg)
3325 if (!strcmp(arg, "audio"))
3326 av_sync_type = AV_SYNC_AUDIO_MASTER;
3327 else if (!strcmp(arg, "video"))
3328 av_sync_type = AV_SYNC_VIDEO_MASTER;
3329 else if (!strcmp(arg, "ext"))
3330 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3332 av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3338 static int opt_seek(void *optctx, const char *opt, const char *arg)
3340 start_time = parse_time_or_die(opt, arg, 1);
3344 static int opt_duration(void *optctx, const char *opt, const char *arg)
3346 duration = parse_time_or_die(opt, arg, 1);
3350 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3352 show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3353 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3354 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3355 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3359 static void opt_input_file(void *optctx, const char *filename)
3361 if (input_filename) {
3362 av_log(NULL, AV_LOG_FATAL,
3363 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3364 filename, input_filename);
3367 if (!strcmp(filename, "-"))
3369 input_filename = filename;
3372 static int opt_codec(void *optctx, const char *opt, const char *arg)
3374 const char *spec = strchr(opt, ':');
3376 av_log(NULL, AV_LOG_ERROR,
3377 "No media specifier was specified in '%s' in option '%s'\n",
3379 return AVERROR(EINVAL);
3383 case 'a' : audio_codec_name = arg; break;
3384 case 's' : subtitle_codec_name = arg; break;
3385 case 'v' : video_codec_name = arg; break;
3387 av_log(NULL, AV_LOG_ERROR,
3388 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3389 return AVERROR(EINVAL);
3396 static const OptionDef options[] = {
3397 #include "cmdutils_common_opts.h"
3398 { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3399 { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3400 { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3401 { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3402 { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3403 { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3404 { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3405 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3406 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3407 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3408 { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3409 { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3410 { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3411 { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3412 { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3413 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3414 { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3415 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3416 { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3417 { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3418 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3419 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3420 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
3421 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3422 { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3423 { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3424 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3425 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3426 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3427 { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3428 { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3429 { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3431 { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3432 { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3434 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3435 { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3436 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3437 { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3438 { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3439 { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3440 { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3441 { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3445 static void show_usage(void)
3447 av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3448 av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3449 av_log(NULL, AV_LOG_INFO, "\n");
3452 void show_help_default(const char *opt, const char *arg)
3454 av_log_set_callback(log_callback_help);
3456 show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3457 show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3459 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3460 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3461 #if !CONFIG_AVFILTER
3462 show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3464 show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3466 printf("\nWhile playing:\n"
3468 "f toggle full screen\n"
3470 "a cycle audio channel\n"
3471 "v cycle video channel\n"
3472 "t cycle subtitle channel\n"
3473 "w show audio waves\n"
3474 "s activate frame-step mode\n"
3475 "left/right seek backward/forward 10 seconds\n"
3476 "down/up seek backward/forward 1 minute\n"
3477 "page down/page up seek backward/forward 10 minutes\n"
3478 "mouse click seek to percentage in file corresponding to fraction of width\n"
3482 static int lockmgr(void **mtx, enum AVLockOp op)
3485 case AV_LOCK_CREATE:
3486 *mtx = SDL_CreateMutex();
3490 case AV_LOCK_OBTAIN:
3491 return !!SDL_LockMutex(*mtx);
3492 case AV_LOCK_RELEASE:
3493 return !!SDL_UnlockMutex(*mtx);
3494 case AV_LOCK_DESTROY:
3495 SDL_DestroyMutex(*mtx);
3501 /* Called from the main */
3502 int main(int argc, char **argv)
3506 char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3508 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3509 parse_loglevel(argc, argv, options);
3511 /* register all codecs, demux and protocols */
3512 avcodec_register_all();
3514 avdevice_register_all();
3517 avfilter_register_all();
3520 avformat_network_init();
3524 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3525 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3527 show_banner(argc, argv, options);
3529 parse_options(NULL, argc, argv, options, opt_input_file);
3531 if (!input_filename) {
3533 av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3534 av_log(NULL, AV_LOG_FATAL,
3535 "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3539 if (display_disable) {
3542 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3544 flags &= ~SDL_INIT_AUDIO;
3545 if (display_disable)
3546 SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3547 #if !defined(__MINGW32__) && !defined(__APPLE__)
3548 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3550 if (SDL_Init (flags)) {
3551 av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3552 av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3556 if (!display_disable) {
3557 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3558 fs_screen_width = vi->current_w;
3559 fs_screen_height = vi->current_h;
3562 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3563 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3564 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3566 if (av_lockmgr_register(lockmgr)) {
3567 av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3571 av_init_packet(&flush_pkt);
3572 flush_pkt.data = (uint8_t *)&flush_pkt;
3574 is = stream_open(input_filename, file_iformat);
3576 av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");