AVStream *audio_st;
PacketQueue audioq;
int audio_hw_buf_size;
- DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
uint8_t *audio_buf;
uint8_t *audio_buf1;
unsigned int audio_buf_size; /* in bytes */
+ unsigned int audio_buf1_size;
int audio_buf_index; /* in bytes */
int audio_write_buf_size;
AVPacket audio_pkt_temp;
static const char *window_title;
static int fs_screen_width;
static int fs_screen_height;
+static int default_width = 640;
+static int default_height = 480;
static int screen_width = 0;
static int screen_height = 0;
static int audio_disable;
SDL_DestroyCond(is->subpq_cond);
SDL_DestroyCond(is->continue_read_thread);
#if !CONFIG_AVFILTER
- if (is->img_convert_ctx)
- sws_freeContext(is->img_convert_ctx);
+ sws_freeContext(is->img_convert_ctx);
#endif
av_free(is);
}
exit(123);
}
-static int video_open(VideoState *is, int force_set_video_mode)
+static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
{
int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
int w,h;
- VideoPicture *vp = &is->pictq[is->pictq_rindex];
SDL_Rect rect;
if (is_full_screen) flags |= SDL_FULLSCREEN;
else flags |= SDL_RESIZABLE;
+ if (vp && vp->width) {
+ calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
+ default_width = rect.w;
+ default_height = rect.h;
+ }
+
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
h = fs_screen_height;
} else if (!is_full_screen && screen_width) {
w = screen_width;
h = screen_height;
- } else if (vp->width) {
- calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
- w = rect.w;
- h = rect.h;
} else {
- w = 640;
- h = 480;
+ w = default_width;
+ h = default_height;
}
if (screen && is->width == screen->w && screen->w == w
&& is->height== screen->h && screen->h == h && !force_set_video_mode)
static void video_display(VideoState *is)
{
if (!screen)
- video_open(is, 0);
+ video_open(is, 0, NULL);
if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
video_audio_display(is);
else if (is->video_st)
is->paused = !is->paused;
}
+static void toggle_pause(VideoState *is)
+{
+ stream_toggle_pause(is);
+ is->step = 0;
+}
+
+static void step_to_next_frame(VideoState *is)
+{
+ /* if the stream is paused unpause it, then step */
+ if (is->paused)
+ stream_toggle_pause(is);
+ is->step = 1;
+}
+
static double compute_target_delay(double delay, VideoState *is)
{
double sync_threshold, diff;
avfilter_unref_bufferp(&vp->picref);
#endif
- video_open(is, 0);
+ video_open(is, 0, vp);
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
SDL_YV12_OVERLAY,
|| last_h != frame->height
|| last_format != frame->format
|| last_serial != serial) {
- av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
- last_w, last_h, frame->width, frame->height);
+ av_log(NULL, AV_LOG_DEBUG,
+ "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
+ last_w, last_h,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
+ frame->width, frame->height,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
return wanted_nb_samples;
}
-/* decode one audio frame and returns its uncompressed size */
-static int audio_decode_frame(VideoState *is, double *pts_ptr)
+/**
+ * Decode one audio frame and return its uncompressed size.
+ *
+ * The processed audio frame is decoded, converted if required, and
+ * stored in is->audio_buf, with size in bytes given by the return
+ * value.
+ */
+static int audio_decode_frame(VideoState *is)
{
AVPacket *pkt_temp = &is->audio_pkt_temp;
AVPacket *pkt = &is->audio_pkt;
int len1, len2, data_size, resampled_data_size;
int64_t dec_channel_layout;
int got_frame;
- double pts;
+ av_unused double audio_clock0;
int new_packet = 0;
int flush_complete = 0;
int wanted_nb_samples;
if (is->swr_ctx) {
const uint8_t **in = (const uint8_t **)is->frame->extended_data;
- uint8_t *out[] = {is->audio_buf2};
- int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
+ uint8_t **out = &is->audio_buf1;
+ int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
+ int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
if (wanted_nb_samples != is->frame->nb_samples) {
if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
break;
}
}
+ av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
+ if (!is->audio_buf1)
+ return AVERROR(ENOMEM);
len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
if (len2 < 0) {
fprintf(stderr, "swr_convert() failed\n");
fprintf(stderr, "warning: audio buffer is probably too small\n");
swr_init(is->swr_ctx);
}
- is->audio_buf = is->audio_buf2;
+ is->audio_buf = is->audio_buf1;
resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
} else {
is->audio_buf = is->frame->data[0];
resampled_data_size = data_size;
}
- /* if no pts, then compute it */
- pts = is->audio_clock;
- *pts_ptr = pts;
+ audio_clock0 = is->audio_clock;
is->audio_clock += (double)data_size /
(is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
#ifdef DEBUG
{
static double last_clock;
- printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
+ printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
is->audio_clock - last_clock,
- is->audio_clock, pts);
+ is->audio_clock, audio_clock0);
last_clock = is->audio_clock;
}
#endif
int audio_size, len1;
int bytes_per_sec;
int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
- double pts;
audio_callback_time = av_gettime();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
- audio_size = audio_decode_frame(is, &pts);
+ audio_size = audio_decode_frame(is);
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = is->silence_buf;
av_free_packet(&is->audio_pkt);
swr_free(&is->swr_ctx);
av_freep(&is->audio_buf1);
+ is->audio_buf1_size = 0;
is->audio_buf = NULL;
avcodec_free_frame(&is->frame);
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
}
- is->refresh_tid = SDL_CreateThread(refresh_thread, is);
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
+ is->refresh_tid = SDL_CreateThread(refresh_thread, is);
+
if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
}
}
is->seek_req = 0;
eof = 0;
+ if (is->paused)
+ step_to_next_frame(is);
}
if (is->queue_attachments_req) {
avformat_queue_attached_pictures(ic);
is->pictq[i].reallocate = 1;
#endif
is_full_screen = !is_full_screen;
- video_open(is, 1);
-}
-
-static void toggle_pause(VideoState *is)
-{
- stream_toggle_pause(is);
- is->step = 0;
-}
-
-static void step_to_next_frame(VideoState *is)
-{
- /* if the stream is paused unpause it, then step */
- if (is->paused)
- stream_toggle_pause(is);
- is->step = 1;
+ video_open(is, 1, NULL);
}
static void toggle_audio_display(VideoState *is)