2 * Copyright (c) 2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * simple media player based on the FFmpeg libraries
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
48 #include "libswresample/swresample.h"
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
58 #include <SDL_thread.h>
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB 20
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
102 #define CURSOR_HIDE_DELAY 1000000
104 static int64_t sws_flags = SWS_BICUBIC;
106 typedef struct MyAVPacketList {
108 struct MyAVPacketList *next;
112 typedef struct PacketQueue {
113 MyAVPacketList *first_pkt, *last_pkt;
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 4
125 typedef struct VideoPicture {
126 double pts; // presentation timestamp for this picture
127 double duration; // estimated duration based on frame rate
128 int64_t pos; // byte position in file
130 int width, height; /* source height & width */
138 typedef struct SubPicture {
139 double pts; /* presentation time stamp for this picture */
144 typedef struct AudioParams {
147 int64_t channel_layout;
148 enum AVSampleFormat fmt;
153 typedef struct Clock {
154 double pts; /* clock base */
155 double pts_drift; /* clock base minus time at which we updated the clock */
158 int serial; /* clock is based on a packet with this serial */
160 int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
164 AV_SYNC_AUDIO_MASTER, /* default choice */
165 AV_SYNC_VIDEO_MASTER,
166 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
169 typedef struct VideoState {
170 SDL_Thread *read_tid;
171 SDL_Thread *video_tid;
172 AVInputFormat *iformat;
178 int queue_attachments_req;
183 int read_pause_return;
198 int audio_clock_serial;
199 double audio_diff_cum; /* used for AV difference average computation */
200 double audio_diff_avg_coef;
201 double audio_diff_threshold;
202 int audio_diff_avg_count;
205 int audio_hw_buf_size;
206 uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
209 unsigned int audio_buf_size; /* in bytes */
210 unsigned int audio_buf1_size;
211 int audio_buf_index; /* in bytes */
212 int audio_write_buf_size;
213 int audio_buf_frames_pending;
214 AVPacket audio_pkt_temp;
216 int audio_pkt_temp_serial;
217 int audio_last_serial;
218 struct AudioParams audio_src;
220 struct AudioParams audio_filter_src;
222 struct AudioParams audio_tgt;
223 struct SwrContext *swr_ctx;
224 int frame_drops_early;
225 int frame_drops_late;
227 int64_t audio_frame_next_pts;
230 SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
232 int16_t sample_array[SAMPLE_ARRAY_SIZE];
233 int sample_array_index;
237 FFTSample *rdft_data;
239 double last_vis_time;
241 SDL_Thread *subtitle_tid;
243 AVStream *subtitle_st;
244 PacketQueue subtitleq;
245 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
246 int subpq_size, subpq_rindex, subpq_windex;
247 SDL_mutex *subpq_mutex;
248 SDL_cond *subpq_cond;
251 double frame_last_returned_time;
252 double frame_last_filter_delay;
256 int64_t video_current_pos; // current displayed file pos
257 double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
258 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
259 int pictq_size, pictq_rindex, pictq_windex, pictq_rindex_shown;
260 SDL_mutex *pictq_mutex;
261 SDL_cond *pictq_cond;
263 struct SwsContext *img_convert_ctx;
265 SDL_Rect last_display_rect;
268 int width, height, xleft, ytop;
273 AVFilterContext *in_video_filter; // the first filter in the video chain
274 AVFilterContext *out_video_filter; // the last filter in the video chain
275 AVFilterContext *in_audio_filter; // the first filter in the audio chain
276 AVFilterContext *out_audio_filter; // the last filter in the audio chain
277 AVFilterGraph *agraph; // audio filter graph
280 int last_video_stream, last_audio_stream, last_subtitle_stream;
282 SDL_cond *continue_read_thread;
285 /* options specified by the user */
286 static AVInputFormat *file_iformat;
287 static const char *input_filename;
288 static const char *window_title;
289 static int fs_screen_width;
290 static int fs_screen_height;
291 static int default_width = 640;
292 static int default_height = 480;
293 static int screen_width = 0;
294 static int screen_height = 0;
295 static int audio_disable;
296 static int video_disable;
297 static int subtitle_disable;
298 static int wanted_stream[AVMEDIA_TYPE_NB] = {
299 [AVMEDIA_TYPE_AUDIO] = -1,
300 [AVMEDIA_TYPE_VIDEO] = -1,
301 [AVMEDIA_TYPE_SUBTITLE] = -1,
303 static int seek_by_bytes = -1;
304 static int display_disable;
305 static int show_status = 1;
306 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
307 static int64_t start_time = AV_NOPTS_VALUE;
308 static int64_t duration = AV_NOPTS_VALUE;
309 static int workaround_bugs = 1;
311 static int genpts = 0;
312 static int lowres = 0;
313 static int decoder_reorder_pts = -1;
315 static int exit_on_keydown;
316 static int exit_on_mousedown;
318 static int framedrop = -1;
319 static int infinite_buffer = -1;
320 static enum ShowMode show_mode = SHOW_MODE_NONE;
321 static const char *audio_codec_name;
322 static const char *subtitle_codec_name;
323 static const char *video_codec_name;
324 double rdftspeed = 0.02;
325 static int64_t cursor_last_shown;
326 static int cursor_hidden = 0;
328 static const char **vfilters_list = NULL;
329 static int nb_vfilters = 0;
330 static char *afilters = NULL;
332 static int autorotate = 1;
334 /* current context */
335 static int is_full_screen;
336 static int64_t audio_callback_time;
338 static AVPacket flush_pkt;
340 #define FF_ALLOC_EVENT (SDL_USEREVENT)
341 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
343 static SDL_Surface *screen;
346 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
348 GROW_ARRAY(vfilters_list, nb_vfilters);
349 vfilters_list[nb_vfilters - 1] = arg;
355 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
356 enum AVSampleFormat fmt2, int64_t channel_count2)
358 /* If channel count == 1, planar and non-planar formats are the same */
359 if (channel_count1 == 1 && channel_count2 == 1)
360 return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
362 return channel_count1 != channel_count2 || fmt1 != fmt2;
366 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
368 if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
369 return channel_layout;
374 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
376 MyAVPacketList *pkt1;
378 if (q->abort_request)
381 pkt1 = av_malloc(sizeof(MyAVPacketList));
386 if (pkt == &flush_pkt)
388 pkt1->serial = q->serial;
393 q->last_pkt->next = pkt1;
396 q->size += pkt1->pkt.size + sizeof(*pkt1);
397 /* XXX: should duplicate packet data in DV case */
398 SDL_CondSignal(q->cond);
402 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
406 /* duplicate the packet */
407 if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
410 SDL_LockMutex(q->mutex);
411 ret = packet_queue_put_private(q, pkt);
412 SDL_UnlockMutex(q->mutex);
414 if (pkt != &flush_pkt && ret < 0)
420 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
422 AVPacket pkt1, *pkt = &pkt1;
426 pkt->stream_index = stream_index;
427 return packet_queue_put(q, pkt);
430 /* packet queue handling */
431 static void packet_queue_init(PacketQueue *q)
433 memset(q, 0, sizeof(PacketQueue));
434 q->mutex = SDL_CreateMutex();
435 q->cond = SDL_CreateCond();
436 q->abort_request = 1;
439 static void packet_queue_flush(PacketQueue *q)
441 MyAVPacketList *pkt, *pkt1;
443 SDL_LockMutex(q->mutex);
444 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
446 av_free_packet(&pkt->pkt);
453 SDL_UnlockMutex(q->mutex);
456 static void packet_queue_destroy(PacketQueue *q)
458 packet_queue_flush(q);
459 SDL_DestroyMutex(q->mutex);
460 SDL_DestroyCond(q->cond);
463 static void packet_queue_abort(PacketQueue *q)
465 SDL_LockMutex(q->mutex);
467 q->abort_request = 1;
469 SDL_CondSignal(q->cond);
471 SDL_UnlockMutex(q->mutex);
474 static void packet_queue_start(PacketQueue *q)
476 SDL_LockMutex(q->mutex);
477 q->abort_request = 0;
478 packet_queue_put_private(q, &flush_pkt);
479 SDL_UnlockMutex(q->mutex);
482 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
483 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
485 MyAVPacketList *pkt1;
488 SDL_LockMutex(q->mutex);
491 if (q->abort_request) {
498 q->first_pkt = pkt1->next;
502 q->size -= pkt1->pkt.size + sizeof(*pkt1);
505 *serial = pkt1->serial;
513 SDL_CondWait(q->cond, q->mutex);
516 SDL_UnlockMutex(q->mutex);
520 static inline void fill_rectangle(SDL_Surface *screen,
521 int x, int y, int w, int h, int color, int update)
528 SDL_FillRect(screen, &rect, color);
529 if (update && w > 0 && h > 0)
530 SDL_UpdateRect(screen, x, y, w, h);
533 /* draw only the border of a rectangle */
534 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
538 /* fill the background */
542 w2 = width - (x + w);
548 h2 = height - (y + h);
551 fill_rectangle(screen,
555 fill_rectangle(screen,
556 xleft + width - w2, ytop,
559 fill_rectangle(screen,
563 fill_rectangle(screen,
564 xleft + w1, ytop + height - h2,
569 #define ALPHA_BLEND(a, oldp, newp, s)\
570 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
572 #define RGBA_IN(r, g, b, a, s)\
574 unsigned int v = ((const uint32_t *)(s))[0];\
575 a = (v >> 24) & 0xff;\
576 r = (v >> 16) & 0xff;\
577 g = (v >> 8) & 0xff;\
581 #define YUVA_IN(y, u, v, a, s, pal)\
583 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
584 a = (val >> 24) & 0xff;\
585 y = (val >> 16) & 0xff;\
586 u = (val >> 8) & 0xff;\
590 #define YUVA_OUT(d, y, u, v, a)\
592 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
598 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
600 int wrap, wrap3, width2, skip2;
601 int y, u, v, a, u1, v1, a1, w, h;
602 uint8_t *lum, *cb, *cr;
605 int dstx, dsty, dstw, dsth;
607 dstw = av_clip(rect->w, 0, imgw);
608 dsth = av_clip(rect->h, 0, imgh);
609 dstx = av_clip(rect->x, 0, imgw - dstw);
610 dsty = av_clip(rect->y, 0, imgh - dsth);
611 lum = dst->data[0] + dsty * dst->linesize[0];
612 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
613 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
615 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
617 wrap = dst->linesize[0];
618 wrap3 = rect->pict.linesize[0];
619 p = rect->pict.data[0];
620 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
628 YUVA_IN(y, u, v, a, p, pal);
629 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
638 YUVA_IN(y, u, v, a, p, pal);
642 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644 YUVA_IN(y, u, v, a, p + BPP, pal);
648 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
649 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
650 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
657 YUVA_IN(y, u, v, a, p, pal);
658 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
659 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
660 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
664 p += wrap3 - dstw * BPP;
665 lum += wrap - dstw - dstx;
666 cb += dst->linesize[1] - width2 - skip2;
667 cr += dst->linesize[2] - width2 - skip2;
669 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
675 YUVA_IN(y, u, v, a, p, pal);
679 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
682 YUVA_IN(y, u, v, a, p, pal);
686 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
687 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
688 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
694 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
695 YUVA_IN(y, u, v, a, p, pal);
699 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
701 YUVA_IN(y, u, v, a, p + BPP, pal);
705 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
709 YUVA_IN(y, u, v, a, p, pal);
713 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
715 YUVA_IN(y, u, v, a, p + BPP, pal);
719 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
721 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
722 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
726 p += -wrap3 + 2 * BPP;
730 YUVA_IN(y, u, v, a, p, pal);
734 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
737 YUVA_IN(y, u, v, a, p, pal);
741 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
742 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
743 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
749 p += wrap3 + (wrap3 - dstw * BPP);
750 lum += wrap + (wrap - dstw - dstx);
751 cb += dst->linesize[1] - width2 - skip2;
752 cr += dst->linesize[2] - width2 - skip2;
754 /* handle odd height */
761 YUVA_IN(y, u, v, a, p, pal);
762 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
763 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
764 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
770 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
771 YUVA_IN(y, u, v, a, p, pal);
775 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
777 YUVA_IN(y, u, v, a, p + BPP, pal);
781 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
782 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
783 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
790 YUVA_IN(y, u, v, a, p, pal);
791 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
792 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
793 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
798 static void free_picture(VideoPicture *vp)
801 SDL_FreeYUVOverlay(vp->bmp);
806 static void free_subpicture(SubPicture *sp)
808 avsubtitle_free(&sp->sub);
811 static void calculate_display_rect(SDL_Rect *rect,
812 int scr_xleft, int scr_ytop, int scr_width, int scr_height,
813 int pic_width, int pic_height, AVRational pic_sar)
816 int width, height, x, y;
818 if (pic_sar.num == 0)
821 aspect_ratio = av_q2d(pic_sar);
823 if (aspect_ratio <= 0.0)
825 aspect_ratio *= (float)pic_width / (float)pic_height;
827 /* XXX: we suppose the screen has a 1.0 pixel ratio */
829 width = ((int)rint(height * aspect_ratio)) & ~1;
830 if (width > scr_width) {
832 height = ((int)rint(width / aspect_ratio)) & ~1;
834 x = (scr_width - width) / 2;
835 y = (scr_height - height) / 2;
836 rect->x = scr_xleft + x;
837 rect->y = scr_ytop + y;
838 rect->w = FFMAX(width, 1);
839 rect->h = FFMAX(height, 1);
842 static void video_image_display(VideoState *is)
850 vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
852 if (is->subtitle_st) {
853 if (is->subpq_size > 0) {
854 sp = &is->subpq[is->subpq_rindex];
856 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
857 SDL_LockYUVOverlay (vp->bmp);
859 pict.data[0] = vp->bmp->pixels[0];
860 pict.data[1] = vp->bmp->pixels[2];
861 pict.data[2] = vp->bmp->pixels[1];
863 pict.linesize[0] = vp->bmp->pitches[0];
864 pict.linesize[1] = vp->bmp->pitches[2];
865 pict.linesize[2] = vp->bmp->pitches[1];
867 for (i = 0; i < sp->sub.num_rects; i++)
868 blend_subrect(&pict, sp->sub.rects[i],
869 vp->bmp->w, vp->bmp->h);
871 SDL_UnlockYUVOverlay (vp->bmp);
876 calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
878 SDL_DisplayYUVOverlay(vp->bmp, &rect);
880 if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
881 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
882 fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
883 is->last_display_rect = rect;
888 static inline int compute_mod(int a, int b)
890 return a < 0 ? a%b + b : a%b;
893 static void video_audio_display(VideoState *s)
895 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
896 int ch, channels, h, h2, bgcolor, fgcolor;
898 int rdft_bits, nb_freq;
900 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
902 nb_freq = 1 << (rdft_bits - 1);
904 /* compute display index : center on currently output samples */
905 channels = s->audio_tgt.channels;
906 nb_display_channels = channels;
908 int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
910 delay = s->audio_write_buf_size;
913 /* to be more precise, we take into account the time spent since
914 the last buffer computation */
915 if (audio_callback_time) {
916 time_diff = av_gettime_relative() - audio_callback_time;
917 delay -= (time_diff * s->audio_tgt.freq) / 1000000;
920 delay += 2 * data_used;
921 if (delay < data_used)
924 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
925 if (s->show_mode == SHOW_MODE_WAVES) {
927 for (i = 0; i < 1000; i += channels) {
928 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
929 int a = s->sample_array[idx];
930 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
931 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
932 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
934 if (h < score && (b ^ c) < 0) {
941 s->last_i_start = i_start;
943 i_start = s->last_i_start;
946 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
947 if (s->show_mode == SHOW_MODE_WAVES) {
948 fill_rectangle(screen,
949 s->xleft, s->ytop, s->width, s->height,
952 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
954 /* total height for one channel */
955 h = s->height / nb_display_channels;
956 /* graph height / 2 */
958 for (ch = 0; ch < nb_display_channels; ch++) {
960 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
961 for (x = 0; x < s->width; x++) {
962 y = (s->sample_array[i] * h2) >> 15;
969 fill_rectangle(screen,
970 s->xleft + x, ys, 1, y,
973 if (i >= SAMPLE_ARRAY_SIZE)
974 i -= SAMPLE_ARRAY_SIZE;
978 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
980 for (ch = 1; ch < nb_display_channels; ch++) {
981 y = s->ytop + ch * h;
982 fill_rectangle(screen,
983 s->xleft, y, s->width, 1,
986 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
988 nb_display_channels= FFMIN(nb_display_channels, 2);
989 if (rdft_bits != s->rdft_bits) {
990 av_rdft_end(s->rdft);
991 av_free(s->rdft_data);
992 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
993 s->rdft_bits = rdft_bits;
994 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
998 for (ch = 0; ch < nb_display_channels; ch++) {
999 data[ch] = s->rdft_data + 2 * nb_freq * ch;
1001 for (x = 0; x < 2 * nb_freq; x++) {
1002 double w = (x-nb_freq) * (1.0 / nb_freq);
1003 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1005 if (i >= SAMPLE_ARRAY_SIZE)
1006 i -= SAMPLE_ARRAY_SIZE;
1008 av_rdft_calc(s->rdft, data[ch]);
1010 /* Least efficient way to do this, we should of course
1011 * directly access it but it is more than fast enough. */
1012 for (y = 0; y < s->height; y++) {
1013 double w = 1 / sqrt(nb_freq);
1014 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1015 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1016 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1019 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1021 fill_rectangle(screen,
1022 s->xpos, s->height-y, 1, 1,
1026 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1029 if (s->xpos >= s->width)
1034 static void stream_close(VideoState *is)
1037 /* XXX: use a special url_shutdown call to abort parse cleanly */
1038 is->abort_request = 1;
1039 SDL_WaitThread(is->read_tid, NULL);
1040 packet_queue_destroy(&is->videoq);
1041 packet_queue_destroy(&is->audioq);
1042 packet_queue_destroy(&is->subtitleq);
1044 /* free all pictures */
1045 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
1046 free_picture(&is->pictq[i]);
1047 for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++)
1048 free_subpicture(&is->subpq[i]);
1049 SDL_DestroyMutex(is->pictq_mutex);
1050 SDL_DestroyCond(is->pictq_cond);
1051 SDL_DestroyMutex(is->subpq_mutex);
1052 SDL_DestroyCond(is->subpq_cond);
1053 SDL_DestroyCond(is->continue_read_thread);
1054 #if !CONFIG_AVFILTER
1055 sws_freeContext(is->img_convert_ctx);
1060 static void do_exit(VideoState *is)
1065 av_lockmgr_register(NULL);
1068 av_freep(&vfilters_list);
1070 avformat_network_deinit();
1074 av_log(NULL, AV_LOG_QUIET, "%s", "");
1078 static void sigterm_handler(int sig)
1083 static void set_default_window_size(int width, int height, AVRational sar)
1086 calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1087 default_width = rect.w;
1088 default_height = rect.h;
1091 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1093 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1096 if (is_full_screen) flags |= SDL_FULLSCREEN;
1097 else flags |= SDL_RESIZABLE;
1099 if (vp && vp->width)
1100 set_default_window_size(vp->width, vp->height, vp->sar);
1102 if (is_full_screen && fs_screen_width) {
1103 w = fs_screen_width;
1104 h = fs_screen_height;
1105 } else if (!is_full_screen && screen_width) {
1112 w = FFMIN(16383, w);
1113 if (screen && is->width == screen->w && screen->w == w
1114 && is->height== screen->h && screen->h == h && !force_set_video_mode)
1116 screen = SDL_SetVideoMode(w, h, 0, flags);
1118 av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1122 window_title = input_filename;
1123 SDL_WM_SetCaption(window_title, window_title);
1125 is->width = screen->w;
1126 is->height = screen->h;
1131 /* display the current picture, if any */
1132 static void video_display(VideoState *is)
1135 video_open(is, 0, NULL);
1136 if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1137 video_audio_display(is);
1138 else if (is->video_st)
1139 video_image_display(is);
1142 static double get_clock(Clock *c)
1144 if (*c->queue_serial != c->serial)
1149 double time = av_gettime_relative() / 1000000.0;
1150 return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1154 static void set_clock_at(Clock *c, double pts, int serial, double time)
1157 c->last_updated = time;
1158 c->pts_drift = c->pts - time;
1162 static void set_clock(Clock *c, double pts, int serial)
1164 double time = av_gettime_relative() / 1000000.0;
1165 set_clock_at(c, pts, serial, time);
1168 static void set_clock_speed(Clock *c, double speed)
1170 set_clock(c, get_clock(c), c->serial);
1174 static void init_clock(Clock *c, int *queue_serial)
1178 c->queue_serial = queue_serial;
1179 set_clock(c, NAN, -1);
1182 static void sync_clock_to_slave(Clock *c, Clock *slave)
1184 double clock = get_clock(c);
1185 double slave_clock = get_clock(slave);
1186 if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1187 set_clock(c, slave_clock, slave->serial);
1190 static int get_master_sync_type(VideoState *is) {
1191 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1193 return AV_SYNC_VIDEO_MASTER;
1195 return AV_SYNC_AUDIO_MASTER;
1196 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1198 return AV_SYNC_AUDIO_MASTER;
1200 return AV_SYNC_EXTERNAL_CLOCK;
1202 return AV_SYNC_EXTERNAL_CLOCK;
1206 /* get the current master clock value */
1207 static double get_master_clock(VideoState *is)
1211 switch (get_master_sync_type(is)) {
1212 case AV_SYNC_VIDEO_MASTER:
1213 val = get_clock(&is->vidclk);
1215 case AV_SYNC_AUDIO_MASTER:
1216 val = get_clock(&is->audclk);
1219 val = get_clock(&is->extclk);
1225 static void check_external_clock_speed(VideoState *is) {
1226 if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1227 is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1228 set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1229 } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1230 (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1231 set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1233 double speed = is->extclk.speed;
1235 set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1239 /* seek in the stream */
1240 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1242 if (!is->seek_req) {
1245 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1247 is->seek_flags |= AVSEEK_FLAG_BYTE;
1249 SDL_CondSignal(is->continue_read_thread);
1253 /* pause or resume the video */
1254 static void stream_toggle_pause(VideoState *is)
1257 is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1258 if (is->read_pause_return != AVERROR(ENOSYS)) {
1259 is->vidclk.paused = 0;
1261 set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1263 set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1264 is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1267 static void toggle_pause(VideoState *is)
1269 stream_toggle_pause(is);
1273 static void step_to_next_frame(VideoState *is)
1275 /* if the stream is paused unpause it, then step */
1277 stream_toggle_pause(is);
1281 static double compute_target_delay(double delay, VideoState *is)
1283 double sync_threshold, diff;
1285 /* update delay to follow master synchronisation source */
1286 if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1287 /* if video is slave, we try to correct big delays by
1288 duplicating or deleting a frame */
1289 diff = get_clock(&is->vidclk) - get_master_clock(is);
1291 /* skip or repeat frame. We take into account the
1292 delay to compute the threshold. I still don't know
1293 if it is the best guess */
1294 sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1295 if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1296 if (diff <= -sync_threshold)
1297 delay = FFMAX(0, delay + diff);
1298 else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1299 delay = delay + diff;
1300 else if (diff >= sync_threshold)
1305 av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1311 static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp) {
1312 if (vp->serial == nextvp->serial) {
1313 double duration = nextvp->pts - vp->pts;
1314 if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1315 return vp->duration;
1323 /* return the number of undisplayed pictures in the queue */
1324 static int pictq_nb_remaining(VideoState *is) {
1325 return is->pictq_size - is->pictq_rindex_shown;
1328 /* jump back to the previous picture if available by resetting rindex_shown */
1329 static int pictq_prev_picture(VideoState *is) {
1330 int ret = is->pictq_rindex_shown;
1331 is->pictq_rindex_shown = 0;
1335 static void pictq_next_picture(VideoState *is) {
1336 if (!is->pictq_rindex_shown) {
1337 is->pictq_rindex_shown = 1;
1340 /* update queue size and signal for next picture */
1341 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1342 is->pictq_rindex = 0;
1344 SDL_LockMutex(is->pictq_mutex);
1346 SDL_CondSignal(is->pictq_cond);
1347 SDL_UnlockMutex(is->pictq_mutex);
1350 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1351 /* update current video pts */
1352 set_clock(&is->vidclk, pts, serial);
1353 sync_clock_to_slave(&is->extclk, &is->vidclk);
1354 is->video_current_pos = pos;
1357 /* called to display each frame */
1358 static void video_refresh(void *opaque, double *remaining_time)
1360 VideoState *is = opaque;
1363 SubPicture *sp, *sp2;
1365 if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1366 check_external_clock_speed(is);
1368 if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1369 time = av_gettime_relative() / 1000000.0;
1370 if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1372 is->last_vis_time = time;
1374 *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1379 if (is->force_refresh)
1380 redisplay = pictq_prev_picture(is);
1382 if (pictq_nb_remaining(is) == 0) {
1383 // nothing to do, no picture to display in the queue
1385 double last_duration, duration, delay;
1386 VideoPicture *vp, *lastvp;
1388 /* dequeue the picture */
1389 lastvp = &is->pictq[is->pictq_rindex];
1390 vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
1392 if (vp->serial != is->videoq.serial) {
1393 pictq_next_picture(is);
1394 is->video_current_pos = -1;
1399 if (lastvp->serial != vp->serial && !redisplay)
1400 is->frame_timer = av_gettime_relative() / 1000000.0;
1405 /* compute nominal last_duration */
1406 last_duration = vp_duration(is, lastvp, vp);
1410 delay = compute_target_delay(last_duration, is);
1412 time= av_gettime_relative()/1000000.0;
1413 if (time < is->frame_timer + delay && !redisplay) {
1414 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1418 is->frame_timer += delay;
1419 if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1420 is->frame_timer = time;
1422 SDL_LockMutex(is->pictq_mutex);
1423 if (!redisplay && !isnan(vp->pts))
1424 update_video_pts(is, vp->pts, vp->pos, vp->serial);
1425 SDL_UnlockMutex(is->pictq_mutex);
1427 if (pictq_nb_remaining(is) > 1) {
1428 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1429 duration = vp_duration(is, vp, nextvp);
1430 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1432 is->frame_drops_late++;
1433 pictq_next_picture(is);
1439 if (is->subtitle_st) {
1440 while (is->subpq_size > 0) {
1441 sp = &is->subpq[is->subpq_rindex];
1443 if (is->subpq_size > 1)
1444 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1448 if (sp->serial != is->subtitleq.serial
1449 || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1450 || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1452 free_subpicture(sp);
1454 /* update queue size and signal for next picture */
1455 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1456 is->subpq_rindex = 0;
1458 SDL_LockMutex(is->subpq_mutex);
1460 SDL_CondSignal(is->subpq_cond);
1461 SDL_UnlockMutex(is->subpq_mutex);
1469 /* display picture */
1470 if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1473 pictq_next_picture(is);
1475 if (is->step && !is->paused)
1476 stream_toggle_pause(is);
1479 is->force_refresh = 0;
1481 static int64_t last_time;
1483 int aqsize, vqsize, sqsize;
1486 cur_time = av_gettime_relative();
1487 if (!last_time || (cur_time - last_time) >= 30000) {
1492 aqsize = is->audioq.size;
1494 vqsize = is->videoq.size;
1495 if (is->subtitle_st)
1496 sqsize = is->subtitleq.size;
1498 if (is->audio_st && is->video_st)
1499 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1500 else if (is->video_st)
1501 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1502 else if (is->audio_st)
1503 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1504 av_log(NULL, AV_LOG_INFO,
1505 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1506 get_master_clock(is),
1507 (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1509 is->frame_drops_early + is->frame_drops_late,
1513 is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1514 is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1516 last_time = cur_time;
1521 /* allocate a picture (needs to do that in main thread to avoid
1522 potential locking problems */
1523 static void alloc_picture(VideoState *is)
1528 vp = &is->pictq[is->pictq_windex];
1532 video_open(is, 0, vp);
1534 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1537 bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1538 if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1539 /* SDL allocates a buffer smaller than requested if the video
1540 * overlay hardware is unable to support the requested size. */
1541 av_log(NULL, AV_LOG_FATAL,
1542 "Error: the video system does not support an image\n"
1543 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1544 "to reduce the image size.\n", vp->width, vp->height );
1548 SDL_LockMutex(is->pictq_mutex);
1550 SDL_CondSignal(is->pictq_cond);
1551 SDL_UnlockMutex(is->pictq_mutex);
1554 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1555 int i, width, height;
1557 for (i = 0; i < 3; i++) {
1564 if (bmp->pitches[i] > width) {
1565 maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1566 for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1572 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1576 #if defined(DEBUG_SYNC) && 0
1577 printf("frame_type=%c pts=%0.3f\n",
1578 av_get_picture_type_char(src_frame->pict_type), pts);
1581 /* wait until we have space to put a new picture */
1582 SDL_LockMutex(is->pictq_mutex);
1584 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1585 !is->videoq.abort_request) {
1586 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1588 SDL_UnlockMutex(is->pictq_mutex);
1590 if (is->videoq.abort_request)
1593 vp = &is->pictq[is->pictq_windex];
1595 vp->sar = src_frame->sample_aspect_ratio;
1597 /* alloc or resize hardware picture buffer */
1598 if (!vp->bmp || vp->reallocate || !vp->allocated ||
1599 vp->width != src_frame->width ||
1600 vp->height != src_frame->height) {
1605 vp->width = src_frame->width;
1606 vp->height = src_frame->height;
1608 /* the allocation must be done in the main thread to avoid
1609 locking problems. */
1610 event.type = FF_ALLOC_EVENT;
1611 event.user.data1 = is;
1612 SDL_PushEvent(&event);
1614 /* wait until the picture is allocated */
1615 SDL_LockMutex(is->pictq_mutex);
1616 while (!vp->allocated && !is->videoq.abort_request) {
1617 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1619 /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1620 if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1621 while (!vp->allocated && !is->abort_request) {
1622 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1625 SDL_UnlockMutex(is->pictq_mutex);
1627 if (is->videoq.abort_request)
1631 /* if the frame is not skipped, then display it */
1633 AVPicture pict = { { 0 } };
1635 /* get a pointer on the bitmap */
1636 SDL_LockYUVOverlay (vp->bmp);
1638 pict.data[0] = vp->bmp->pixels[0];
1639 pict.data[1] = vp->bmp->pixels[2];
1640 pict.data[2] = vp->bmp->pixels[1];
1642 pict.linesize[0] = vp->bmp->pitches[0];
1643 pict.linesize[1] = vp->bmp->pitches[2];
1644 pict.linesize[2] = vp->bmp->pitches[1];
1647 // FIXME use direct rendering
1648 av_picture_copy(&pict, (AVPicture *)src_frame,
1649 src_frame->format, vp->width, vp->height);
1651 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1652 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1653 vp->width, vp->height, src_frame->format, vp->width, vp->height,
1654 AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1655 if (is->img_convert_ctx == NULL) {
1656 av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1659 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1660 0, vp->height, pict.data, pict.linesize);
1662 /* workaround SDL PITCH_WORKAROUND */
1663 duplicate_right_border_pixels(vp->bmp);
1664 /* update the bitmap content */
1665 SDL_UnlockYUVOverlay(vp->bmp);
1668 vp->duration = duration;
1670 vp->serial = serial;
1672 /* now we can update the picture count */
1673 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1674 is->pictq_windex = 0;
1675 SDL_LockMutex(is->pictq_mutex);
1677 SDL_UnlockMutex(is->pictq_mutex);
1682 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1686 if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1689 if (pkt->data == flush_pkt.data) {
1690 avcodec_flush_buffers(is->video_st->codec);
1694 if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1697 if (!got_picture && !pkt->data)
1698 is->video_finished = *serial;
1704 if (decoder_reorder_pts == -1) {
1705 frame->pts = av_frame_get_best_effort_timestamp(frame);
1706 } else if (decoder_reorder_pts) {
1707 frame->pts = frame->pkt_pts;
1709 frame->pts = frame->pkt_dts;
1712 if (frame->pts != AV_NOPTS_VALUE)
1713 dpts = av_q2d(is->video_st->time_base) * frame->pts;
1715 frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1717 if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1718 if (frame->pts != AV_NOPTS_VALUE) {
1719 double diff = dpts - get_master_clock(is);
1720 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1721 diff - is->frame_last_filter_delay < 0 &&
1722 *serial == is->vidclk.serial &&
1723 is->videoq.nb_packets) {
1724 is->frame_drops_early++;
1725 av_frame_unref(frame);
1737 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1738 AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1741 int nb_filters = graph->nb_filters;
1742 AVFilterInOut *outputs = NULL, *inputs = NULL;
1745 outputs = avfilter_inout_alloc();
1746 inputs = avfilter_inout_alloc();
1747 if (!outputs || !inputs) {
1748 ret = AVERROR(ENOMEM);
1752 outputs->name = av_strdup("in");
1753 outputs->filter_ctx = source_ctx;
1754 outputs->pad_idx = 0;
1755 outputs->next = NULL;
1757 inputs->name = av_strdup("out");
1758 inputs->filter_ctx = sink_ctx;
1759 inputs->pad_idx = 0;
1760 inputs->next = NULL;
1762 if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1765 if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1769 /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1770 for (i = 0; i < graph->nb_filters - nb_filters; i++)
1771 FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1773 ret = avfilter_graph_config(graph, NULL);
1775 avfilter_inout_free(&outputs);
1776 avfilter_inout_free(&inputs);
1780 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1782 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1783 char sws_flags_str[128];
1784 char buffersrc_args[256];
1786 AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1787 AVCodecContext *codec = is->video_st->codec;
1788 AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1790 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1791 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1792 graph->scale_sws_opts = av_strdup(sws_flags_str);
1794 snprintf(buffersrc_args, sizeof(buffersrc_args),
1795 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1796 frame->width, frame->height, frame->format,
1797 is->video_st->time_base.num, is->video_st->time_base.den,
1798 codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1799 if (fr.num && fr.den)
1800 av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1802 if ((ret = avfilter_graph_create_filter(&filt_src,
1803 avfilter_get_by_name("buffer"),
1804 "ffplay_buffer", buffersrc_args, NULL,
1808 ret = avfilter_graph_create_filter(&filt_out,
1809 avfilter_get_by_name("buffersink"),
1810 "ffplay_buffersink", NULL, NULL, graph);
1814 if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1817 last_filter = filt_out;
1819 /* Note: this macro adds a filter before the lastly added filter, so the
1820 * processing order of the filters is in reverse */
1821 #define INSERT_FILT(name, arg) do { \
1822 AVFilterContext *filt_ctx; \
1824 ret = avfilter_graph_create_filter(&filt_ctx, \
1825 avfilter_get_by_name(name), \
1826 "ffplay_" name, arg, NULL, graph); \
1830 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1834 last_filter = filt_ctx; \
1837 /* SDL YUV code is not handling odd width/height for some driver
1838 * combinations, therefore we crop the picture to an even width/height. */
1839 INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1842 AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
1843 if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
1844 if (!strcmp(rotate_tag->value, "90")) {
1845 INSERT_FILT("transpose", "clock");
1846 } else if (!strcmp(rotate_tag->value, "180")) {
1847 INSERT_FILT("hflip", NULL);
1848 INSERT_FILT("vflip", NULL);
1849 } else if (!strcmp(rotate_tag->value, "270")) {
1850 INSERT_FILT("transpose", "cclock");
1852 char rotate_buf[64];
1853 snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
1854 INSERT_FILT("rotate", rotate_buf);
1859 if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1862 is->in_video_filter = filt_src;
1863 is->out_video_filter = filt_out;
1869 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1871 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1872 int sample_rates[2] = { 0, -1 };
1873 int64_t channel_layouts[2] = { 0, -1 };
1874 int channels[2] = { 0, -1 };
1875 AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1876 char aresample_swr_opts[512] = "";
1877 AVDictionaryEntry *e = NULL;
1878 char asrc_args[256];
1881 avfilter_graph_free(&is->agraph);
1882 if (!(is->agraph = avfilter_graph_alloc()))
1883 return AVERROR(ENOMEM);
1885 while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1886 av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1887 if (strlen(aresample_swr_opts))
1888 aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1889 av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1891 ret = snprintf(asrc_args, sizeof(asrc_args),
1892 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1893 is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1894 is->audio_filter_src.channels,
1895 1, is->audio_filter_src.freq);
1896 if (is->audio_filter_src.channel_layout)
1897 snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1898 ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1900 ret = avfilter_graph_create_filter(&filt_asrc,
1901 avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1902 asrc_args, NULL, is->agraph);
1907 ret = avfilter_graph_create_filter(&filt_asink,
1908 avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1909 NULL, NULL, is->agraph);
1913 if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1915 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1918 if (force_output_format) {
1919 channel_layouts[0] = is->audio_tgt.channel_layout;
1920 channels [0] = is->audio_tgt.channels;
1921 sample_rates [0] = is->audio_tgt.freq;
1922 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1924 if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1926 if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1928 if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1933 if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1936 is->in_audio_filter = filt_asrc;
1937 is->out_audio_filter = filt_asink;
1941 avfilter_graph_free(&is->agraph);
1944 #endif /* CONFIG_AVFILTER */
1946 static int video_thread(void *arg)
1948 AVPacket pkt = { 0 };
1949 VideoState *is = arg;
1950 AVFrame *frame = av_frame_alloc();
1955 AVRational tb = is->video_st->time_base;
1956 AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
1959 AVFilterGraph *graph = avfilter_graph_alloc();
1960 AVFilterContext *filt_out = NULL, *filt_in = NULL;
1963 enum AVPixelFormat last_format = -2;
1964 int last_serial = -1;
1965 int last_vfilter_idx = 0;
1969 while (is->paused && !is->videoq.abort_request)
1972 av_free_packet(&pkt);
1974 ret = get_video_frame(is, frame, &pkt, &serial);
1981 if ( last_w != frame->width
1982 || last_h != frame->height
1983 || last_format != frame->format
1984 || last_serial != serial
1985 || last_vfilter_idx != is->vfilter_idx) {
1986 av_log(NULL, AV_LOG_DEBUG,
1987 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1989 (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1990 frame->width, frame->height,
1991 (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1992 avfilter_graph_free(&graph);
1993 graph = avfilter_graph_alloc();
1994 if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
1996 event.type = FF_QUIT_EVENT;
1997 event.user.data1 = is;
1998 SDL_PushEvent(&event);
2001 filt_in = is->in_video_filter;
2002 filt_out = is->out_video_filter;
2003 last_w = frame->width;
2004 last_h = frame->height;
2005 last_format = frame->format;
2006 last_serial = serial;
2007 last_vfilter_idx = is->vfilter_idx;
2008 frame_rate = filt_out->inputs[0]->frame_rate;
2011 ret = av_buffersrc_add_frame(filt_in, frame);
2016 is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2018 ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2020 if (ret == AVERROR_EOF)
2021 is->video_finished = serial;
2026 is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2027 if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2028 is->frame_last_filter_delay = 0;
2029 tb = filt_out->inputs[0]->time_base;
2031 duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2032 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2033 ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
2034 av_frame_unref(frame);
2044 avfilter_graph_free(&graph);
2046 av_free_packet(&pkt);
2047 av_frame_free(&frame);
2051 static int subtitle_thread(void *arg)
2053 VideoState *is = arg;
2055 AVPacket pkt1, *pkt = &pkt1;
2060 int r, g, b, y, u, v, a;
2063 while (is->paused && !is->subtitleq.abort_request) {
2066 if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2069 if (pkt->data == flush_pkt.data) {
2070 avcodec_flush_buffers(is->subtitle_st->codec);
2073 SDL_LockMutex(is->subpq_mutex);
2074 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2075 !is->subtitleq.abort_request) {
2076 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2078 SDL_UnlockMutex(is->subpq_mutex);
2080 if (is->subtitleq.abort_request)
2083 sp = &is->subpq[is->subpq_windex];
2085 /* NOTE: ipts is the PTS of the _first_ picture beginning in
2086 this packet, if any */
2088 if (pkt->pts != AV_NOPTS_VALUE)
2089 pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2091 avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2092 &got_subtitle, pkt);
2093 if (got_subtitle && sp->sub.format == 0) {
2094 if (sp->sub.pts != AV_NOPTS_VALUE)
2095 pts = sp->sub.pts / (double)AV_TIME_BASE;
2097 sp->serial = serial;
2099 for (i = 0; i < sp->sub.num_rects; i++)
2101 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2103 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2104 y = RGB_TO_Y_CCIR(r, g, b);
2105 u = RGB_TO_U_CCIR(r, g, b, 0);
2106 v = RGB_TO_V_CCIR(r, g, b, 0);
2107 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2111 /* now we can update the picture count */
2112 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2113 is->subpq_windex = 0;
2114 SDL_LockMutex(is->subpq_mutex);
2116 SDL_UnlockMutex(is->subpq_mutex);
2117 } else if (got_subtitle) {
2118 avsubtitle_free(&sp->sub);
2120 av_free_packet(pkt);
2125 /* copy samples for viewing in editor window */
2126 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2130 size = samples_size / sizeof(short);
2132 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2135 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2137 is->sample_array_index += len;
2138 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2139 is->sample_array_index = 0;
2144 /* return the wanted number of samples to get better sync if sync_type is video
2145 * or external master clock */
2146 static int synchronize_audio(VideoState *is, int nb_samples)
2148 int wanted_nb_samples = nb_samples;
2150 /* if not master, then we try to remove or add samples to correct the clock */
2151 if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2152 double diff, avg_diff;
2153 int min_nb_samples, max_nb_samples;
2155 diff = get_clock(&is->audclk) - get_master_clock(is);
2157 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2158 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2159 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2160 /* not enough measures to have a correct estimate */
2161 is->audio_diff_avg_count++;
2163 /* estimate the A-V difference */
2164 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2166 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2167 wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2168 min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2169 max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2170 wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2172 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2173 diff, avg_diff, wanted_nb_samples - nb_samples,
2174 is->audio_clock, is->audio_diff_threshold);
2177 /* too big difference : may be initial PTS errors, so
2179 is->audio_diff_avg_count = 0;
2180 is->audio_diff_cum = 0;
2184 return wanted_nb_samples;
2188 * Decode one audio frame and return its uncompressed size.
2190 * The processed audio frame is decoded, converted if required, and
2191 * stored in is->audio_buf, with size in bytes given by the return
2194 static int audio_decode_frame(VideoState *is)
2196 AVPacket *pkt_temp = &is->audio_pkt_temp;
2197 AVPacket *pkt = &is->audio_pkt;
2198 AVCodecContext *dec = is->audio_st->codec;
2199 int len1, data_size, resampled_data_size;
2200 int64_t dec_channel_layout;
2202 av_unused double audio_clock0;
2203 int wanted_nb_samples;
2209 /* NOTE: the audio packet can contain several frames */
2210 while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2212 if (!(is->frame = av_frame_alloc()))
2213 return AVERROR(ENOMEM);
2215 av_frame_unref(is->frame);
2218 if (is->audioq.serial != is->audio_pkt_temp_serial)
2224 if (!is->audio_buf_frames_pending) {
2225 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2227 /* if error, we skip the frame */
2233 pkt_temp->pts = AV_NOPTS_VALUE;
2234 pkt_temp->data += len1;
2235 pkt_temp->size -= len1;
2236 if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2237 pkt_temp->stream_index = -1;
2238 if (!pkt_temp->data && !got_frame)
2239 is->audio_finished = is->audio_pkt_temp_serial;
2244 tb = (AVRational){1, is->frame->sample_rate};
2245 if (is->frame->pts != AV_NOPTS_VALUE)
2246 is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2247 else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2248 is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2249 else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2251 is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2253 is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_src.freq}, tb);
2256 if (is->frame->pts != AV_NOPTS_VALUE)
2257 is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2260 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2263 cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2264 is->frame->format, av_frame_get_channels(is->frame)) ||
2265 is->audio_filter_src.channel_layout != dec_channel_layout ||
2266 is->audio_filter_src.freq != is->frame->sample_rate ||
2267 is->audio_pkt_temp_serial != is->audio_last_serial;
2270 char buf1[1024], buf2[1024];
2271 av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2272 av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2273 av_log(NULL, AV_LOG_DEBUG,
2274 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2275 is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2276 is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2278 is->audio_filter_src.fmt = is->frame->format;
2279 is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2280 is->audio_filter_src.channel_layout = dec_channel_layout;
2281 is->audio_filter_src.freq = is->frame->sample_rate;
2282 is->audio_last_serial = is->audio_pkt_temp_serial;
2284 if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2288 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2293 if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2294 if (ret == AVERROR(EAGAIN)) {
2295 is->audio_buf_frames_pending = 0;
2298 if (ret == AVERROR_EOF)
2299 is->audio_finished = is->audio_pkt_temp_serial;
2302 is->audio_buf_frames_pending = 1;
2303 tb = is->out_audio_filter->inputs[0]->time_base;
2306 data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2307 is->frame->nb_samples,
2308 is->frame->format, 1);
2310 dec_channel_layout =
2311 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2312 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2313 wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2315 if (is->frame->format != is->audio_src.fmt ||
2316 dec_channel_layout != is->audio_src.channel_layout ||
2317 is->frame->sample_rate != is->audio_src.freq ||
2318 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2319 swr_free(&is->swr_ctx);
2320 is->swr_ctx = swr_alloc_set_opts(NULL,
2321 is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2322 dec_channel_layout, is->frame->format, is->frame->sample_rate,
2324 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2325 av_log(NULL, AV_LOG_ERROR,
2326 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2327 is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2328 is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2331 is->audio_src.channel_layout = dec_channel_layout;
2332 is->audio_src.channels = av_frame_get_channels(is->frame);
2333 is->audio_src.freq = is->frame->sample_rate;
2334 is->audio_src.fmt = is->frame->format;
2338 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2339 uint8_t **out = &is->audio_buf1;
2340 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2341 int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2344 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2347 if (wanted_nb_samples != is->frame->nb_samples) {
2348 if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2349 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2350 av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2354 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2355 if (!is->audio_buf1)
2356 return AVERROR(ENOMEM);
2357 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2359 av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2362 if (len2 == out_count) {
2363 av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2364 swr_init(is->swr_ctx);
2366 is->audio_buf = is->audio_buf1;
2367 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2369 is->audio_buf = is->frame->data[0];
2370 resampled_data_size = data_size;
2373 audio_clock0 = is->audio_clock;
2374 /* update the audio clock with the pts */
2375 if (is->frame->pts != AV_NOPTS_VALUE)
2376 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2378 is->audio_clock = NAN;
2379 is->audio_clock_serial = is->audio_pkt_temp_serial;
2382 static double last_clock;
2383 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2384 is->audio_clock - last_clock,
2385 is->audio_clock, audio_clock0);
2386 last_clock = is->audio_clock;
2389 return resampled_data_size;
2392 /* free the current packet */
2394 av_free_packet(pkt);
2395 memset(pkt_temp, 0, sizeof(*pkt_temp));
2396 pkt_temp->stream_index = -1;
2398 if (is->audioq.abort_request) {
2402 if (is->audioq.nb_packets == 0)
2403 SDL_CondSignal(is->continue_read_thread);
2405 /* read next packet */
2406 if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2409 if (pkt->data == flush_pkt.data) {
2410 avcodec_flush_buffers(dec);
2411 is->audio_buf_frames_pending = 0;
2412 is->audio_frame_next_pts = AV_NOPTS_VALUE;
2413 if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek)
2414 is->audio_frame_next_pts = is->audio_st->start_time;
2421 /* prepare a new audio buffer */
2422 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2424 VideoState *is = opaque;
2425 int audio_size, len1;
2427 audio_callback_time = av_gettime_relative();
2430 if (is->audio_buf_index >= is->audio_buf_size) {
2431 audio_size = audio_decode_frame(is);
2432 if (audio_size < 0) {
2433 /* if error, just output silence */
2434 is->audio_buf = is->silence_buf;
2435 is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2437 if (is->show_mode != SHOW_MODE_VIDEO)
2438 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2439 is->audio_buf_size = audio_size;
2441 is->audio_buf_index = 0;
2443 len1 = is->audio_buf_size - is->audio_buf_index;
2446 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2449 is->audio_buf_index += len1;
2451 is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2452 /* Let's assume the audio driver that is used by SDL has two periods. */
2453 if (!isnan(is->audio_clock)) {
2454 set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2455 sync_clock_to_slave(&is->extclk, &is->audclk);
2459 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2461 SDL_AudioSpec wanted_spec, spec;
2463 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2464 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2465 int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2467 env = SDL_getenv("SDL_AUDIO_CHANNELS");
2469 wanted_nb_channels = atoi(env);
2470 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2472 if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2473 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2474 wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2476 wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2477 wanted_spec.channels = wanted_nb_channels;
2478 wanted_spec.freq = wanted_sample_rate;
2479 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2480 av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2483 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2484 next_sample_rate_idx--;
2485 wanted_spec.format = AUDIO_S16SYS;
2486 wanted_spec.silence = 0;
2487 wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2488 wanted_spec.callback = sdl_audio_callback;
2489 wanted_spec.userdata = opaque;
2490 while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2491 av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2492 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2493 wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2494 if (!wanted_spec.channels) {
2495 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2496 wanted_spec.channels = wanted_nb_channels;
2497 if (!wanted_spec.freq) {
2498 av_log(NULL, AV_LOG_ERROR,
2499 "No more combinations to try, audio open failed\n");
2503 wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2505 if (spec.format != AUDIO_S16SYS) {
2506 av_log(NULL, AV_LOG_ERROR,
2507 "SDL advised audio format %d is not supported!\n", spec.format);
2510 if (spec.channels != wanted_spec.channels) {
2511 wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2512 if (!wanted_channel_layout) {
2513 av_log(NULL, AV_LOG_ERROR,
2514 "SDL advised channel count %d is not supported!\n", spec.channels);
2519 audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2520 audio_hw_params->freq = spec.freq;
2521 audio_hw_params->channel_layout = wanted_channel_layout;
2522 audio_hw_params->channels = spec.channels;
2523 audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2524 audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2525 if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2526 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2532 /* open a given stream. Return 0 if OK */
2533 static int stream_component_open(VideoState *is, int stream_index)
2535 AVFormatContext *ic = is->ic;
2536 AVCodecContext *avctx;
2538 const char *forced_codec_name = NULL;
2540 AVDictionaryEntry *t = NULL;
2541 int sample_rate, nb_channels;
2542 int64_t channel_layout;
2544 int stream_lowres = lowres;
2546 if (stream_index < 0 || stream_index >= ic->nb_streams)
2548 avctx = ic->streams[stream_index]->codec;
2550 codec = avcodec_find_decoder(avctx->codec_id);
2552 switch(avctx->codec_type){
2553 case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2554 case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2555 case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2557 if (forced_codec_name)
2558 codec = avcodec_find_decoder_by_name(forced_codec_name);
2560 if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2561 "No codec could be found with name '%s'\n", forced_codec_name);
2562 else av_log(NULL, AV_LOG_WARNING,
2563 "No codec could be found with id %d\n", avctx->codec_id);
2567 avctx->codec_id = codec->id;
2568 avctx->workaround_bugs = workaround_bugs;
2569 if(stream_lowres > av_codec_get_max_lowres(codec)){
2570 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2571 av_codec_get_max_lowres(codec));
2572 stream_lowres = av_codec_get_max_lowres(codec);
2574 av_codec_set_lowres(avctx, stream_lowres);
2576 if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2577 if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2578 if(codec->capabilities & CODEC_CAP_DR1)
2579 avctx->flags |= CODEC_FLAG_EMU_EDGE;
2581 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2582 if (!av_dict_get(opts, "threads", NULL, 0))
2583 av_dict_set(&opts, "threads", "auto", 0);
2585 av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
2586 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2587 av_dict_set(&opts, "refcounted_frames", "1", 0);
2588 if (avcodec_open2(avctx, codec, &opts) < 0)
2590 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2591 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2592 return AVERROR_OPTION_NOT_FOUND;
2595 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2596 switch (avctx->codec_type) {
2597 case AVMEDIA_TYPE_AUDIO:
2602 is->audio_filter_src.freq = avctx->sample_rate;
2603 is->audio_filter_src.channels = avctx->channels;
2604 is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2605 is->audio_filter_src.fmt = avctx->sample_fmt;
2606 if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2608 link = is->out_audio_filter->inputs[0];
2609 sample_rate = link->sample_rate;
2610 nb_channels = link->channels;
2611 channel_layout = link->channel_layout;
2614 sample_rate = avctx->sample_rate;
2615 nb_channels = avctx->channels;
2616 channel_layout = avctx->channel_layout;
2619 /* prepare audio output */
2620 if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2622 is->audio_hw_buf_size = ret;
2623 is->audio_src = is->audio_tgt;
2624 is->audio_buf_size = 0;
2625 is->audio_buf_index = 0;
2627 /* init averaging filter */
2628 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2629 is->audio_diff_avg_count = 0;
2630 /* since we do not have a precise anough audio fifo fullness,
2631 we correct audio sync only if larger than this threshold */
2632 is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2634 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2635 memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2636 is->audio_pkt_temp.stream_index = -1;
2638 is->audio_stream = stream_index;
2639 is->audio_st = ic->streams[stream_index];
2641 packet_queue_start(&is->audioq);
2644 case AVMEDIA_TYPE_VIDEO:
2645 is->video_stream = stream_index;
2646 is->video_st = ic->streams[stream_index];
2648 packet_queue_start(&is->videoq);
2649 is->video_tid = SDL_CreateThread(video_thread, is);
2650 is->queue_attachments_req = 1;
2652 case AVMEDIA_TYPE_SUBTITLE:
2653 is->subtitle_stream = stream_index;
2654 is->subtitle_st = ic->streams[stream_index];
2655 packet_queue_start(&is->subtitleq);
2657 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2665 static void stream_component_close(VideoState *is, int stream_index)
2667 AVFormatContext *ic = is->ic;
2668 AVCodecContext *avctx;
2670 if (stream_index < 0 || stream_index >= ic->nb_streams)
2672 avctx = ic->streams[stream_index]->codec;
2674 switch (avctx->codec_type) {
2675 case AVMEDIA_TYPE_AUDIO:
2676 packet_queue_abort(&is->audioq);
2680 packet_queue_flush(&is->audioq);
2681 av_free_packet(&is->audio_pkt);
2682 swr_free(&is->swr_ctx);
2683 av_freep(&is->audio_buf1);
2684 is->audio_buf1_size = 0;
2685 is->audio_buf = NULL;
2686 av_frame_free(&is->frame);
2689 av_rdft_end(is->rdft);
2690 av_freep(&is->rdft_data);
2695 avfilter_graph_free(&is->agraph);
2698 case AVMEDIA_TYPE_VIDEO:
2699 packet_queue_abort(&is->videoq);
2701 /* note: we also signal this mutex to make sure we deblock the
2702 video thread in all cases */
2703 SDL_LockMutex(is->pictq_mutex);
2704 SDL_CondSignal(is->pictq_cond);
2705 SDL_UnlockMutex(is->pictq_mutex);
2707 SDL_WaitThread(is->video_tid, NULL);
2709 packet_queue_flush(&is->videoq);
2711 case AVMEDIA_TYPE_SUBTITLE:
2712 packet_queue_abort(&is->subtitleq);
2714 /* note: we also signal this mutex to make sure we deblock the
2715 video thread in all cases */
2716 SDL_LockMutex(is->subpq_mutex);
2717 SDL_CondSignal(is->subpq_cond);
2718 SDL_UnlockMutex(is->subpq_mutex);
2720 SDL_WaitThread(is->subtitle_tid, NULL);
2722 packet_queue_flush(&is->subtitleq);
2728 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2729 avcodec_close(avctx);
2730 switch (avctx->codec_type) {
2731 case AVMEDIA_TYPE_AUDIO:
2732 is->audio_st = NULL;
2733 is->audio_stream = -1;
2735 case AVMEDIA_TYPE_VIDEO:
2736 is->video_st = NULL;
2737 is->video_stream = -1;
2739 case AVMEDIA_TYPE_SUBTITLE:
2740 is->subtitle_st = NULL;
2741 is->subtitle_stream = -1;
2748 static int decode_interrupt_cb(void *ctx)
2750 VideoState *is = ctx;
2751 return is->abort_request;
2754 static int is_realtime(AVFormatContext *s)
2756 if( !strcmp(s->iformat->name, "rtp")
2757 || !strcmp(s->iformat->name, "rtsp")
2758 || !strcmp(s->iformat->name, "sdp")
2762 if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2763 || !strncmp(s->filename, "udp:", 4)
2770 /* this thread gets the stream from the disk or the network */
2771 static int read_thread(void *arg)
2773 VideoState *is = arg;
2774 AVFormatContext *ic = NULL;
2776 int st_index[AVMEDIA_TYPE_NB];
2777 AVPacket pkt1, *pkt = &pkt1;
2779 int64_t stream_start_time;
2780 int pkt_in_play_range = 0;
2781 AVDictionaryEntry *t;
2782 AVDictionary **opts;
2783 int orig_nb_streams;
2784 SDL_mutex *wait_mutex = SDL_CreateMutex();
2786 memset(st_index, -1, sizeof(st_index));
2787 is->last_video_stream = is->video_stream = -1;
2788 is->last_audio_stream = is->audio_stream = -1;
2789 is->last_subtitle_stream = is->subtitle_stream = -1;
2791 ic = avformat_alloc_context();
2792 ic->interrupt_callback.callback = decode_interrupt_cb;
2793 ic->interrupt_callback.opaque = is;
2794 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2796 print_error(is->filename, err);
2800 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2801 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2802 ret = AVERROR_OPTION_NOT_FOUND;
2808 ic->flags |= AVFMT_FLAG_GENPTS;
2810 av_format_inject_global_side_data(ic);
2812 opts = setup_find_stream_info_opts(ic, codec_opts);
2813 orig_nb_streams = ic->nb_streams;
2815 err = avformat_find_stream_info(ic, opts);
2817 av_log(NULL, AV_LOG_WARNING,
2818 "%s: could not find codec parameters\n", is->filename);
2822 for (i = 0; i < orig_nb_streams; i++)
2823 av_dict_free(&opts[i]);
2827 ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2829 if (seek_by_bytes < 0)
2830 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2832 is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2834 if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2835 window_title = av_asprintf("%s - %s", t->value, input_filename);
2837 /* if seeking requested, we execute it */
2838 if (start_time != AV_NOPTS_VALUE) {
2841 timestamp = start_time;
2842 /* add the stream start time */
2843 if (ic->start_time != AV_NOPTS_VALUE)
2844 timestamp += ic->start_time;
2845 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2847 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2848 is->filename, (double)timestamp / AV_TIME_BASE);
2852 is->realtime = is_realtime(ic);
2854 for (i = 0; i < ic->nb_streams; i++)
2855 ic->streams[i]->discard = AVDISCARD_ALL;
2857 st_index[AVMEDIA_TYPE_VIDEO] =
2858 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2859 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2861 st_index[AVMEDIA_TYPE_AUDIO] =
2862 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2863 wanted_stream[AVMEDIA_TYPE_AUDIO],
2864 st_index[AVMEDIA_TYPE_VIDEO],
2866 if (!video_disable && !subtitle_disable)
2867 st_index[AVMEDIA_TYPE_SUBTITLE] =
2868 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2869 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2870 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2871 st_index[AVMEDIA_TYPE_AUDIO] :
2872 st_index[AVMEDIA_TYPE_VIDEO]),
2875 av_dump_format(ic, 0, is->filename, 0);
2878 is->show_mode = show_mode;
2879 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2880 AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2881 AVCodecContext *avctx = st->codec;
2882 AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
2884 set_default_window_size(avctx->width, avctx->height, sar);
2887 /* open the streams */
2888 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2889 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2893 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2894 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2896 if (is->show_mode == SHOW_MODE_NONE)
2897 is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2899 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2900 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2903 if (is->video_stream < 0 && is->audio_stream < 0) {
2904 av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2910 if (infinite_buffer < 0 && is->realtime)
2911 infinite_buffer = 1;
2914 if (is->abort_request)
2916 if (is->paused != is->last_paused) {
2917 is->last_paused = is->paused;
2919 is->read_pause_return = av_read_pause(ic);
2923 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2925 (!strcmp(ic->iformat->name, "rtsp") ||
2926 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2927 /* wait 10 ms to avoid trying to get another packet */
2934 int64_t seek_target = is->seek_pos;
2935 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2936 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2937 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2938 // of the seek_pos/seek_rel variables
2940 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2942 av_log(NULL, AV_LOG_ERROR,
2943 "%s: error while seeking\n", is->ic->filename);
2945 if (is->audio_stream >= 0) {
2946 packet_queue_flush(&is->audioq);
2947 packet_queue_put(&is->audioq, &flush_pkt);
2949 if (is->subtitle_stream >= 0) {
2950 packet_queue_flush(&is->subtitleq);
2951 packet_queue_put(&is->subtitleq, &flush_pkt);
2953 if (is->video_stream >= 0) {
2954 packet_queue_flush(&is->videoq);
2955 packet_queue_put(&is->videoq, &flush_pkt);
2957 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2958 set_clock(&is->extclk, NAN, 0);
2960 set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2964 is->queue_attachments_req = 1;
2967 step_to_next_frame(is);
2969 if (is->queue_attachments_req) {
2970 if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2972 if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
2974 packet_queue_put(&is->videoq, ©);
2975 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
2977 is->queue_attachments_req = 0;
2980 /* if the queue are full, no need to read more */
2981 if (infinite_buffer<1 &&
2982 (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2983 || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2984 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2985 || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2986 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2988 SDL_LockMutex(wait_mutex);
2989 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2990 SDL_UnlockMutex(wait_mutex);
2994 (!is->audio_st || is->audio_finished == is->audioq.serial) &&
2995 (!is->video_st || (is->video_finished == is->videoq.serial && pictq_nb_remaining(is) == 0))) {
2996 if (loop != 1 && (!loop || --loop)) {
2997 stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2998 } else if (autoexit) {
3004 if (is->video_stream >= 0)
3005 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
3006 if (is->audio_stream >= 0)
3007 packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
3008 if (is->subtitle_stream >= 0)
3009 packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
3014 ret = av_read_frame(ic, pkt);
3016 if (ret == AVERROR_EOF || url_feof(ic->pb))
3018 if (ic->pb && ic->pb->error)
3020 SDL_LockMutex(wait_mutex);
3021 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3022 SDL_UnlockMutex(wait_mutex);
3025 /* check if packet is in play range specified by user, then queue, otherwise discard */
3026 stream_start_time = ic->streams[pkt->stream_index]->start_time;
3027 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3028 (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3029 av_q2d(ic->streams[pkt->stream_index]->time_base) -
3030 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3031 <= ((double)duration / 1000000);
3032 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3033 packet_queue_put(&is->audioq, pkt);
3034 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3035 && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3036 packet_queue_put(&is->videoq, pkt);
3037 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3038 packet_queue_put(&is->subtitleq, pkt);
3040 av_free_packet(pkt);
3043 /* wait until the end */
3044 while (!is->abort_request) {
3050 /* close each stream */
3051 if (is->audio_stream >= 0)
3052 stream_component_close(is, is->audio_stream);
3053 if (is->video_stream >= 0)
3054 stream_component_close(is, is->video_stream);
3055 if (is->subtitle_stream >= 0)
3056 stream_component_close(is, is->subtitle_stream);
3058 avformat_close_input(&is->ic);
3064 event.type = FF_QUIT_EVENT;
3065 event.user.data1 = is;
3066 SDL_PushEvent(&event);
3068 SDL_DestroyMutex(wait_mutex);
3072 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3076 is = av_mallocz(sizeof(VideoState));
3079 av_strlcpy(is->filename, filename, sizeof(is->filename));
3080 is->iformat = iformat;
3084 /* start video display */
3085 is->pictq_mutex = SDL_CreateMutex();
3086 is->pictq_cond = SDL_CreateCond();
3088 is->subpq_mutex = SDL_CreateMutex();
3089 is->subpq_cond = SDL_CreateCond();
3091 packet_queue_init(&is->videoq);
3092 packet_queue_init(&is->audioq);
3093 packet_queue_init(&is->subtitleq);
3095 is->continue_read_thread = SDL_CreateCond();
3097 init_clock(&is->vidclk, &is->videoq.serial);
3098 init_clock(&is->audclk, &is->audioq.serial);
3099 init_clock(&is->extclk, &is->extclk.serial);
3100 is->audio_clock_serial = -1;
3101 is->audio_last_serial = -1;
3102 is->av_sync_type = av_sync_type;
3103 is->read_tid = SDL_CreateThread(read_thread, is);
3104 if (!is->read_tid) {
3111 static void stream_cycle_channel(VideoState *is, int codec_type)
3113 AVFormatContext *ic = is->ic;
3114 int start_index, stream_index;
3117 AVProgram *p = NULL;
3118 int nb_streams = is->ic->nb_streams;
3120 if (codec_type == AVMEDIA_TYPE_VIDEO) {
3121 start_index = is->last_video_stream;
3122 old_index = is->video_stream;
3123 } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3124 start_index = is->last_audio_stream;
3125 old_index = is->audio_stream;
3127 start_index = is->last_subtitle_stream;
3128 old_index = is->subtitle_stream;
3130 stream_index = start_index;
3132 if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3133 p = av_find_program_from_stream(ic, NULL, is->video_stream);
3135 nb_streams = p->nb_stream_indexes;
3136 for (start_index = 0; start_index < nb_streams; start_index++)
3137 if (p->stream_index[start_index] == stream_index)
3139 if (start_index == nb_streams)
3141 stream_index = start_index;
3146 if (++stream_index >= nb_streams)
3148 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3151 is->last_subtitle_stream = -1;
3154 if (start_index == -1)
3158 if (stream_index == start_index)
3160 st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3161 if (st->codec->codec_type == codec_type) {
3162 /* check that parameters are OK */
3163 switch (codec_type) {
3164 case AVMEDIA_TYPE_AUDIO:
3165 if (st->codec->sample_rate != 0 &&
3166 st->codec->channels != 0)
3169 case AVMEDIA_TYPE_VIDEO:
3170 case AVMEDIA_TYPE_SUBTITLE:
3178 if (p && stream_index != -1)
3179 stream_index = p->stream_index[stream_index];
3180 av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3181 av_get_media_type_string(codec_type),
3185 stream_component_close(is, old_index);
3186 stream_component_open(is, stream_index);
3190 static void toggle_full_screen(VideoState *is)
3192 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3193 /* OS X needs to reallocate the SDL overlays */
3195 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3196 is->pictq[i].reallocate = 1;
3198 is_full_screen = !is_full_screen;
3199 video_open(is, 1, NULL);
3202 static void toggle_audio_display(VideoState *is)
3204 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3205 int next = is->show_mode;
3207 next = (next + 1) % SHOW_MODE_NB;
3208 } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3209 if (is->show_mode != next) {
3210 fill_rectangle(screen,
3211 is->xleft, is->ytop, is->width, is->height,
3213 is->force_refresh = 1;
3214 is->show_mode = next;
3218 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3219 double remaining_time = 0.0;
3221 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3222 if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3226 if (remaining_time > 0.0)
3227 av_usleep((int64_t)(remaining_time * 1000000.0));
3228 remaining_time = REFRESH_RATE;
3229 if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3230 video_refresh(is, &remaining_time);
3235 static void seek_chapter(VideoState *is, int incr)
3237 int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3240 if (!is->ic->nb_chapters)
3243 /* find the current chapter */
3244 for (i = 0; i < is->ic->nb_chapters; i++) {
3245 AVChapter *ch = is->ic->chapters[i];
3246 if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3254 if (i >= is->ic->nb_chapters)
3257 av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3258 stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3259 AV_TIME_BASE_Q), 0, 0);
3262 /* handle an event sent by the GUI */
3263 static void event_loop(VideoState *cur_stream)
3266 double incr, pos, frac;
3270 refresh_loop_wait_event(cur_stream, &event);
3271 switch (event.type) {
3273 if (exit_on_keydown) {
3274 do_exit(cur_stream);
3277 switch (event.key.keysym.sym) {
3280 do_exit(cur_stream);
3283 toggle_full_screen(cur_stream);
3284 cur_stream->force_refresh = 1;
3288 toggle_pause(cur_stream);
3290 case SDLK_s: // S: Step to next frame
3291 step_to_next_frame(cur_stream);
3294 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3297 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3300 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3301 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3302 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3305 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3309 if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3310 if (++cur_stream->vfilter_idx >= nb_vfilters)
3311 cur_stream->vfilter_idx = 0;
3313 cur_stream->vfilter_idx = 0;
3314 toggle_audio_display(cur_stream);
3317 toggle_audio_display(cur_stream);
3321 if (cur_stream->ic->nb_chapters <= 1) {
3325 seek_chapter(cur_stream, 1);
3328 if (cur_stream->ic->nb_chapters <= 1) {
3332 seek_chapter(cur_stream, -1);
3346 if (seek_by_bytes) {
3347 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3348 pos = cur_stream->video_current_pos;
3349 } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3350 pos = cur_stream->audio_pkt.pos;
3352 pos = avio_tell(cur_stream->ic->pb);
3353 if (cur_stream->ic->bit_rate)
3354 incr *= cur_stream->ic->bit_rate / 8.0;
3358 stream_seek(cur_stream, pos, incr, 1);
3360 pos = get_master_clock(cur_stream);
3362 pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3364 if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3365 pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3366 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3373 case SDL_VIDEOEXPOSE:
3374 cur_stream->force_refresh = 1;
3376 case SDL_MOUSEBUTTONDOWN:
3377 if (exit_on_mousedown) {
3378 do_exit(cur_stream);
3381 case SDL_MOUSEMOTION:
3382 if (cursor_hidden) {
3386 cursor_last_shown = av_gettime_relative();
3387 if (event.type == SDL_MOUSEBUTTONDOWN) {
3390 if (event.motion.state != SDL_PRESSED)
3394 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3395 uint64_t size = avio_size(cur_stream->ic->pb);
3396 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3400 int tns, thh, tmm, tss;
3401 tns = cur_stream->ic->duration / 1000000LL;
3403 tmm = (tns % 3600) / 60;
3405 frac = x / cur_stream->width;
3408 mm = (ns % 3600) / 60;
3410 av_log(NULL, AV_LOG_INFO,
3411 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3412 hh, mm, ss, thh, tmm, tss);
3413 ts = frac * cur_stream->ic->duration;
3414 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3415 ts += cur_stream->ic->start_time;
3416 stream_seek(cur_stream, ts, 0, 0);
3419 case SDL_VIDEORESIZE:
3420 screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3421 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3423 av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3424 do_exit(cur_stream);
3426 screen_width = cur_stream->width = screen->w;
3427 screen_height = cur_stream->height = screen->h;
3428 cur_stream->force_refresh = 1;
3432 do_exit(cur_stream);
3434 case FF_ALLOC_EVENT:
3435 alloc_picture(event.user.data1);
3443 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3445 av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3446 return opt_default(NULL, "video_size", arg);
3449 static int opt_width(void *optctx, const char *opt, const char *arg)
3451 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3455 static int opt_height(void *optctx, const char *opt, const char *arg)
3457 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3461 static int opt_format(void *optctx, const char *opt, const char *arg)
3463 file_iformat = av_find_input_format(arg);
3464 if (!file_iformat) {
3465 av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3466 return AVERROR(EINVAL);
3471 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3473 av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3474 return opt_default(NULL, "pixel_format", arg);
3477 static int opt_sync(void *optctx, const char *opt, const char *arg)
3479 if (!strcmp(arg, "audio"))
3480 av_sync_type = AV_SYNC_AUDIO_MASTER;
3481 else if (!strcmp(arg, "video"))
3482 av_sync_type = AV_SYNC_VIDEO_MASTER;
3483 else if (!strcmp(arg, "ext"))
3484 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3486 av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3492 static int opt_seek(void *optctx, const char *opt, const char *arg)
3494 start_time = parse_time_or_die(opt, arg, 1);
3498 static int opt_duration(void *optctx, const char *opt, const char *arg)
3500 duration = parse_time_or_die(opt, arg, 1);
3504 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3506 show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3507 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3508 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3509 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3513 static void opt_input_file(void *optctx, const char *filename)
3515 if (input_filename) {
3516 av_log(NULL, AV_LOG_FATAL,
3517 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3518 filename, input_filename);
3521 if (!strcmp(filename, "-"))
3523 input_filename = filename;
3526 static int opt_codec(void *optctx, const char *opt, const char *arg)
3528 const char *spec = strchr(opt, ':');
3530 av_log(NULL, AV_LOG_ERROR,
3531 "No media specifier was specified in '%s' in option '%s'\n",
3533 return AVERROR(EINVAL);
3537 case 'a' : audio_codec_name = arg; break;
3538 case 's' : subtitle_codec_name = arg; break;
3539 case 'v' : video_codec_name = arg; break;
3541 av_log(NULL, AV_LOG_ERROR,
3542 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3543 return AVERROR(EINVAL);
3550 static const OptionDef options[] = {
3551 #include "cmdutils_common_opts.h"
3552 { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3553 { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3554 { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3555 { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3556 { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3557 { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3558 { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3559 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3560 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3561 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3562 { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3563 { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3564 { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3565 { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3566 { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3567 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3568 { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3569 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3570 { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3571 { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3572 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3573 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3574 { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3575 { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3576 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3577 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3578 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3579 { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3580 { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3581 { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3583 { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3584 { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3586 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3587 { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3588 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3589 { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3590 { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3591 { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3592 { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3593 { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3594 { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3598 static void show_usage(void)
3600 av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3601 av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3602 av_log(NULL, AV_LOG_INFO, "\n");
3605 void show_help_default(const char *opt, const char *arg)
3607 av_log_set_callback(log_callback_help);
3609 show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3610 show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3612 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3613 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3614 #if !CONFIG_AVFILTER
3615 show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3617 show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3619 printf("\nWhile playing:\n"
3621 "f toggle full screen\n"
3623 "a cycle audio channel in the current program\n"
3624 "v cycle video channel\n"
3625 "t cycle subtitle channel in the current program\n"
3627 "w cycle video filters or show modes\n"
3628 "s activate frame-step mode\n"
3629 "left/right seek backward/forward 10 seconds\n"
3630 "down/up seek backward/forward 1 minute\n"
3631 "page down/page up seek backward/forward 10 minutes\n"
3632 "mouse click seek to percentage in file corresponding to fraction of width\n"
3636 static int lockmgr(void **mtx, enum AVLockOp op)
3639 case AV_LOCK_CREATE:
3640 *mtx = SDL_CreateMutex();
3644 case AV_LOCK_OBTAIN:
3645 return !!SDL_LockMutex(*mtx);
3646 case AV_LOCK_RELEASE:
3647 return !!SDL_UnlockMutex(*mtx);
3648 case AV_LOCK_DESTROY:
3649 SDL_DestroyMutex(*mtx);
3655 /* Called from the main */
3656 int main(int argc, char **argv)
3660 char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3662 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3663 parse_loglevel(argc, argv, options);
3665 /* register all codecs, demux and protocols */
3667 avdevice_register_all();
3670 avfilter_register_all();
3673 avformat_network_init();
3677 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3678 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3680 show_banner(argc, argv, options);
3682 parse_options(NULL, argc, argv, options, opt_input_file);
3684 if (!input_filename) {
3686 av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3687 av_log(NULL, AV_LOG_FATAL,
3688 "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3692 if (display_disable) {
3695 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3697 flags &= ~SDL_INIT_AUDIO;
3698 if (display_disable)
3699 SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3700 #if !defined(_WIN32) && !defined(__APPLE__)
3701 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3703 if (SDL_Init (flags)) {
3704 av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3705 av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3709 if (!display_disable) {
3710 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3711 fs_screen_width = vi->current_w;
3712 fs_screen_height = vi->current_h;
3715 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3716 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3717 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3719 if (av_lockmgr_register(lockmgr)) {
3720 av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3724 av_init_packet(&flush_pkt);
3725 flush_pkt.data = (uint8_t *)&flush_pkt;
3727 is = stream_open(input_filename, file_iformat);
3729 av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");