/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB 20
+/* polls for possible required screen refresh at least this often, should be less than 1/fps */
+#define REFRESH_RATE 0.01
+
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
/* TODO: We assume that a decoded and resampled frame fits into this buffer */
#define SAMPLE_ARRAY_SIZE (8 * 65536)
typedef struct AudioParams {
int freq;
int channels;
- int channel_layout;
+ int64_t channel_layout;
enum AVSampleFormat fmt;
} AudioParams;
typedef struct VideoState {
SDL_Thread *read_tid;
SDL_Thread *video_tid;
- SDL_Thread *refresh_tid;
AVInputFormat *iformat;
int no_background;
int abort_request;
AVStream *audio_st;
PacketQueue audioq;
int audio_hw_buf_size;
- DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
uint8_t *audio_buf;
uint8_t *audio_buf1;
unsigned int audio_buf_size; /* in bytes */
+ unsigned int audio_buf1_size;
int audio_buf_index; /* in bytes */
int audio_write_buf_size;
AVPacket audio_pkt_temp;
int rdft_bits;
FFTSample *rdft_data;
int xpos;
+ double last_vis_time;
SDL_Thread *subtitle_tid;
int subtitle_stream;
double frame_last_returned_time;
double frame_last_filter_delay;
int64_t frame_last_dropped_pos;
- double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
int video_stream;
AVStream *video_st;
PacketQueue videoq;
- double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
+ double video_current_pts; // current displayed pts
double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
int64_t video_current_pos; // current displayed file pos
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
FrameBuffer *buffer_pool;
#endif
- int refresh;
int last_video_stream, last_audio_stream, last_subtitle_stream;
SDL_cond *continue_read_thread;
static const char *window_title;
static int fs_screen_width;
static int fs_screen_height;
+static int default_width = 640;
+static int default_height = 480;
static int screen_width = 0;
static int screen_height = 0;
static int audio_disable;
static const char *audio_codec_name;
static const char *subtitle_codec_name;
static const char *video_codec_name;
-static int rdftspeed = 20;
+double rdftspeed = 0.02;
static int64_t cursor_last_shown;
static int cursor_hidden = 0;
#if CONFIG_AVFILTER
static AVPacket flush_pkt;
#define FF_ALLOC_EVENT (SDL_USEREVENT)
-#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
static SDL_Surface *screen;
/* XXX: use a special url_shutdown call to abort parse cleanly */
is->abort_request = 1;
SDL_WaitThread(is->read_tid, NULL);
- SDL_WaitThread(is->refresh_tid, NULL);
packet_queue_destroy(&is->videoq);
packet_queue_destroy(&is->audioq);
packet_queue_destroy(&is->subtitleq);
SDL_DestroyCond(is->subpq_cond);
SDL_DestroyCond(is->continue_read_thread);
#if !CONFIG_AVFILTER
- if (is->img_convert_ctx)
- sws_freeContext(is->img_convert_ctx);
+ sws_freeContext(is->img_convert_ctx);
#endif
av_free(is);
}
exit(123);
}
-static int video_open(VideoState *is, int force_set_video_mode)
+static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
{
int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
int w,h;
- VideoPicture *vp = &is->pictq[is->pictq_rindex];
SDL_Rect rect;
if (is_full_screen) flags |= SDL_FULLSCREEN;
else flags |= SDL_RESIZABLE;
+ if (vp && vp->width) {
+ calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
+ default_width = rect.w;
+ default_height = rect.h;
+ }
+
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
h = fs_screen_height;
} else if (!is_full_screen && screen_width) {
w = screen_width;
h = screen_height;
- } else if (vp->width) {
- calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
- w = rect.w;
- h = rect.h;
} else {
- w = 640;
- h = 480;
+ w = default_width;
+ h = default_height;
}
if (screen && is->width == screen->w && screen->w == w
&& is->height== screen->h && screen->h == h && !force_set_video_mode)
static void video_display(VideoState *is)
{
if (!screen)
- video_open(is, 0);
+ video_open(is, 0, NULL);
if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
video_audio_display(is);
else if (is->video_st)
video_image_display(is);
}
-static int refresh_thread(void *opaque)
-{
- VideoState *is= opaque;
- while (!is->abort_request) {
- SDL_Event event;
- event.type = FF_REFRESH_EVENT;
- event.user.data1 = opaque;
- if (!is->refresh && (!is->paused || is->force_refresh)) {
- is->refresh = 1;
- SDL_PushEvent(&event);
- }
- //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
- av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
- }
- return 0;
-}
-
/* get the current audio clock value */
static double get_audio_clock(VideoState *is)
{
is->paused = !is->paused;
}
+static void toggle_pause(VideoState *is)
+{
+ stream_toggle_pause(is);
+ is->step = 0;
+}
+
+static void step_to_next_frame(VideoState *is)
+{
+ /* if the stream is paused unpause it, then step */
+ if (is->paused)
+ stream_toggle_pause(is);
+ is->step = 1;
+}
+
static double compute_target_delay(double delay, VideoState *is)
{
double sync_threshold, diff;
}
/* called to display each frame */
-static void video_refresh(void *opaque)
+static void video_refresh(void *opaque, double *remaining_time)
{
VideoState *is = opaque;
VideoPicture *vp;
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
check_external_clock_speed(is);
- if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st)
- video_display(is);
+ if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
+ time = av_gettime() / 1000000.0;
+ if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
+ video_display(is);
+ is->last_vis_time = time;
+ }
+ *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
+ }
if (is->video_st) {
if (is->force_refresh)
delay = compute_target_delay(is->frame_last_duration, is);
time= av_gettime()/1000000.0;
- if (time < is->frame_timer + delay)
+ if (time < is->frame_timer + delay) {
+ *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
return;
+ }
if (delay > 0)
is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
avfilter_unref_bufferp(&vp->picref);
#endif
- video_open(is, 0);
+ video_open(is, 0, vp);
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
SDL_YV12_OVERLAY,
}
}
-static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
+static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
{
VideoPicture *vp;
- double frame_delay, pts = pts1;
-
- /* compute the exact PTS for the picture if it is omitted in the stream
- * pts1 is the dts of the pkt / pts of the frame */
- if (pts != 0) {
- /* update video clock with pts, if present */
- is->video_clock = pts;
- } else {
- pts = is->video_clock;
- }
- /* update video clock for next frame */
- frame_delay = av_q2d(is->video_st->codec->time_base);
- /* for MPEG2, the frame can be repeated, so we update the
- clock accordingly */
- frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
- is->video_clock += frame_delay;
#if defined(DEBUG_SYNC) && 0
- printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
- av_get_picture_type_char(src_frame->pict_type), pts, pts1);
+ printf("frame_type=%c pts=%0.3f\n",
+ av_get_picture_type_char(src_frame->pict_type), pts);
#endif
/* wait until we have space to put a new picture */
|| last_h != frame->height
|| last_format != frame->format
|| last_serial != serial) {
- av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
- last_w, last_h, frame->width, frame->height);
+ av_log(NULL, AV_LOG_DEBUG,
+ "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
+ last_w, last_h,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
+ frame->width, frame->height,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
}
- av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
+ av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
diff, avg_diff, wanted_nb_samples - nb_samples,
- is->audio_clock, is->video_clock, is->audio_diff_threshold);
+ is->audio_clock, is->audio_diff_threshold);
}
} else {
/* too big difference : may be initial PTS errors, so
return wanted_nb_samples;
}
-/* decode one audio frame and returns its uncompressed size */
-static int audio_decode_frame(VideoState *is, double *pts_ptr)
+/**
+ * Decode one audio frame and return its uncompressed size.
+ *
+ * The processed audio frame is decoded, converted if required, and
+ * stored in is->audio_buf, with size in bytes given by the return
+ * value.
+ */
+static int audio_decode_frame(VideoState *is)
{
AVPacket *pkt_temp = &is->audio_pkt_temp;
AVPacket *pkt = &is->audio_pkt;
int len1, len2, data_size, resampled_data_size;
int64_t dec_channel_layout;
int got_frame;
- double pts;
+ av_unused double audio_clock0;
int new_packet = 0;
int flush_complete = 0;
int wanted_nb_samples;
0, NULL);
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
- is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
- is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
+ is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), is->frame->channels,
+ is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
break;
}
is->audio_src.channel_layout = dec_channel_layout;
if (is->swr_ctx) {
const uint8_t **in = (const uint8_t **)is->frame->extended_data;
- uint8_t *out[] = {is->audio_buf2};
- int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
+ uint8_t **out = &is->audio_buf1;
+ int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
+ int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
if (wanted_nb_samples != is->frame->nb_samples) {
if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
break;
}
}
+ av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
+ if (!is->audio_buf1)
+ return AVERROR(ENOMEM);
len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
if (len2 < 0) {
fprintf(stderr, "swr_convert() failed\n");
fprintf(stderr, "warning: audio buffer is probably too small\n");
swr_init(is->swr_ctx);
}
- is->audio_buf = is->audio_buf2;
+ is->audio_buf = is->audio_buf1;
resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
} else {
is->audio_buf = is->frame->data[0];
resampled_data_size = data_size;
}
- /* if no pts, then compute it */
- pts = is->audio_clock;
- *pts_ptr = pts;
+ audio_clock0 = is->audio_clock;
is->audio_clock += (double)data_size /
(is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
#ifdef DEBUG
{
static double last_clock;
- printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
+ printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
is->audio_clock - last_clock,
- is->audio_clock, pts);
+ is->audio_clock, audio_clock0);
last_clock = is->audio_clock;
}
#endif
int audio_size, len1;
int bytes_per_sec;
int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
- double pts;
audio_callback_time = av_gettime();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
- audio_size = audio_decode_frame(is, &pts);
+ audio_size = audio_decode_frame(is);
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = is->silence_buf;
av_free_packet(&is->audio_pkt);
swr_free(&is->swr_ctx);
av_freep(&is->audio_buf1);
+ is->audio_buf1_size = 0;
is->audio_buf = NULL;
avcodec_free_frame(&is->frame);
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
}
- is->refresh_tid = SDL_CreateThread(refresh_thread, is);
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
}
is->seek_req = 0;
eof = 0;
+ if (is->paused)
+ step_to_next_frame(is);
}
if (is->queue_attachments_req) {
avformat_queue_attached_pictures(ic);
is->pictq[i].reallocate = 1;
#endif
is_full_screen = !is_full_screen;
- video_open(is, 1);
-}
-
-static void toggle_pause(VideoState *is)
-{
- stream_toggle_pause(is);
- is->step = 0;
-}
-
-static void step_to_next_frame(VideoState *is)
-{
- /* if the stream is paused unpause it, then step */
- if (is->paused)
- stream_toggle_pause(is);
- is->step = 1;
+ video_open(is, 1, NULL);
}
static void toggle_audio_display(VideoState *is)
bgcolor, 1);
}
+static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
+ double remaining_time = 0.0;
+ SDL_PumpEvents();
+ while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
+ if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
+ SDL_ShowCursor(0);
+ cursor_hidden = 1;
+ }
+ if (remaining_time > 0.0)
+ av_usleep((int64_t)(remaining_time * 1000000.0));
+ remaining_time = REFRESH_RATE;
+ if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
+ video_refresh(is, &remaining_time);
+ SDL_PumpEvents();
+ }
+}
+
/* handle an event sent by the GUI */
static void event_loop(VideoState *cur_stream)
{
for (;;) {
double x;
- SDL_WaitEvent(&event);
+ refresh_loop_wait_event(cur_stream, &event);
switch (event.type) {
case SDL_KEYDOWN:
if (exit_on_keydown) {
case FF_ALLOC_EVENT:
alloc_picture(event.user.data1);
break;
- case FF_REFRESH_EVENT:
- if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
- SDL_ShowCursor(0);
- cursor_hidden = 1;
- }
- video_refresh(event.user.data1);
- cur_stream->refresh = 0;
- break;
default:
break;
}