static char *afilters = NULL;
#endif
static int autorotate = 1;
+static int find_stream_info = 1;
/* current context */
static int is_full_screen;
static SDL_Window *window;
static SDL_Renderer *renderer;
+static const struct TextureFormatEntry {
+ enum AVPixelFormat format;
+ int texture_fmt;
+} sdl_texture_format_map[] = {
+ { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
+ { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
+ { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
+ { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
+ { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
+ { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
+ { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
+ { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
+ { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
+ { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
+ { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
+ { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
+ { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
+ { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
+ { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
+ { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
+ { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
+ { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
+ { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
+ { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
+};
+
#if CONFIG_AVFILTER
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
{
ret = avcodec_receive_frame(d->avctx, frame);
if (ret >= 0) {
if (decoder_reorder_pts == -1) {
- frame->pts = av_frame_get_best_effort_timestamp(frame);
+ frame->pts = frame->best_effort_timestamp;
} else if (!decoder_reorder_pts) {
frame->pts = frame->pkt_dts;
}
memset(pixels, 0, pitch * new_height);
SDL_UnlockTexture(*texture);
}
+ av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
}
return 0;
}
rect->h = FFMAX(height, 1);
}
-static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
+static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
+{
+ int i;
+ *sdl_blendmode = SDL_BLENDMODE_NONE;
+ *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
+ if (format == AV_PIX_FMT_RGB32 ||
+ format == AV_PIX_FMT_RGB32_1 ||
+ format == AV_PIX_FMT_BGR32 ||
+ format == AV_PIX_FMT_BGR32_1)
+ *sdl_blendmode = SDL_BLENDMODE_BLEND;
+ for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
+ if (format == sdl_texture_format_map[i].format) {
+ *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
+ return;
+ }
+ }
+}
+
+static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
int ret = 0;
- switch (frame->format) {
- case AV_PIX_FMT_YUV420P:
- if (frame->linesize[0] < 0 || frame->linesize[1] < 0 || frame->linesize[2] < 0) {
- av_log(NULL, AV_LOG_ERROR, "Negative linesize is not supported for YUV.\n");
- return -1;
- }
- ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0],
- frame->data[1], frame->linesize[1],
- frame->data[2], frame->linesize[2]);
- break;
- case AV_PIX_FMT_BGRA:
- if (frame->linesize[0] < 0) {
- ret = SDL_UpdateTexture(tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
- } else {
- ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]);
- }
- break;
- default:
+ Uint32 sdl_pix_fmt;
+ SDL_BlendMode sdl_blendmode;
+ get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
+ if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
+ return -1;
+ switch (sdl_pix_fmt) {
+ case SDL_PIXELFORMAT_UNKNOWN:
/* This should only happen if we are not using avfilter... */
*img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
frame->width, frame->height, frame->format, frame->width, frame->height,
if (*img_convert_ctx != NULL) {
uint8_t *pixels[4];
int pitch[4];
- if (!SDL_LockTexture(tex, NULL, (void **)pixels, pitch)) {
+ if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
0, frame->height, pixels, pitch);
- SDL_UnlockTexture(tex);
+ SDL_UnlockTexture(*tex);
}
} else {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
ret = -1;
}
break;
+ case SDL_PIXELFORMAT_IYUV:
+ if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
+ ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
+ frame->data[1], frame->linesize[1],
+ frame->data[2], frame->linesize[2]);
+ } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
+ ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
+ frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
+ frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
+ return -1;
+ }
+ break;
+ default:
+ if (frame->linesize[0] < 0) {
+ ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
+ } else {
+ ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
+ }
+ break;
}
return ret;
}
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
if (!vp->uploaded) {
- int sdl_pix_fmt = vp->frame->format == AV_PIX_FMT_YUV420P ? SDL_PIXELFORMAT_YV12 : SDL_PIXELFORMAT_ARGB8888;
- if (realloc_texture(&is->vid_texture, sdl_pix_fmt, vp->frame->width, vp->frame->height, SDL_BLENDMODE_NONE, 0) < 0)
- return;
- if (upload_texture(is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
+ if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
return;
vp->uploaded = 1;
vp->flip_v = vp->frame->linesize[0] < 0;
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
{
- static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
+ enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
char sws_flags_str[512] = "";
char buffersrc_args[256];
int ret;
AVCodecParameters *codecpar = is->video_st->codecpar;
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
AVDictionaryEntry *e = NULL;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(pix_fmts); i++)
+ pix_fmts[i] = sdl_texture_format_map[i].format;
while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
if (!strcmp(e->key, "sws_flags")) {
tb = (AVRational){1, frame->sample_rate};
#if CONFIG_AVFILTER
- dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
+ dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
reconfigure =
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
- frame->format, av_frame_get_channels(frame)) ||
+ frame->format, frame->channels) ||
is->audio_filter_src.channel_layout != dec_channel_layout ||
is->audio_filter_src.freq != frame->sample_rate ||
is->auddec.pkt_serial != last_serial;
av_log(NULL, AV_LOG_DEBUG,
"Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
- frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
+ frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
is->audio_filter_src.fmt = frame->format;
- is->audio_filter_src.channels = av_frame_get_channels(frame);
+ is->audio_filter_src.channels = frame->channels;
is->audio_filter_src.channel_layout = dec_channel_layout;
is->audio_filter_src.freq = frame->sample_rate;
last_serial = is->auddec.pkt_serial;
goto the_end;
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
- af->pos = av_frame_get_pkt_pos(frame);
+ af->pos = frame->pkt_pos;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
#endif
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
- ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
+ ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
av_frame_unref(frame);
#if CONFIG_AVFILTER
}
frame_queue_next(&is->sampq);
} while (af->serial != is->audioq.serial);
- data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
+ data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
af->frame->nb_samples,
af->frame->format, 1);
dec_channel_layout =
- (af->frame->channel_layout && av_frame_get_channels(af->frame) == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
- af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
+ (af->frame->channel_layout && af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
+ af->frame->channel_layout : av_get_default_channel_layout(af->frame->channels);
wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
if (af->frame->format != is->audio_src.fmt ||
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
av_log(NULL, AV_LOG_ERROR,
"Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
- af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), av_frame_get_channels(af->frame),
+ af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->channels,
is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
swr_free(&is->swr_ctx);
return -1;
}
is->audio_src.channel_layout = dec_channel_layout;
- is->audio_src.channels = av_frame_get_channels(af->frame);
+ is->audio_src.channels = af->frame->channels;
is->audio_src.freq = af->frame->sample_rate;
is->audio_src.fmt = af->frame->format;
}
int64_t stream_start_time;
int pkt_in_play_range = 0;
AVDictionaryEntry *t;
- AVDictionary **opts;
- int orig_nb_streams;
SDL_mutex *wait_mutex = SDL_CreateMutex();
int scan_all_pmts_set = 0;
int64_t pkt_ts;
av_format_inject_global_side_data(ic);
- opts = setup_find_stream_info_opts(ic, codec_opts);
- orig_nb_streams = ic->nb_streams;
+ if (find_stream_info) {
+ AVDictionary **opts = setup_find_stream_info_opts(ic, codec_opts);
+ int orig_nb_streams = ic->nb_streams;
- err = avformat_find_stream_info(ic, opts);
+ err = avformat_find_stream_info(ic, opts);
- for (i = 0; i < orig_nb_streams; i++)
- av_dict_free(&opts[i]);
- av_freep(&opts);
+ for (i = 0; i < orig_nb_streams; i++)
+ av_dict_free(&opts[i]);
+ av_freep(&opts);
- if (err < 0) {
- av_log(NULL, AV_LOG_WARNING,
- "%s: could not find codec parameters\n", is->filename);
- ret = -1;
- goto fail;
+ if (err < 0) {
+ av_log(NULL, AV_LOG_WARNING,
+ "%s: could not find codec parameters\n", is->filename);
+ ret = -1;
+ goto fail;
+ }
}
if (ic->pb)
}
if (is->queue_attachments_req) {
if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
- AVPacket copy;
- if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
+ AVPacket copy = { 0 };
+ if ((ret = av_packet_ref(©, &is->video_st->attached_pic)) < 0)
goto fail;
packet_queue_put(&is->videoq, ©);
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
static int dummy;
static const OptionDef options[] = {
-#include "cmdutils_common_opts.h"
+ CMDUTILS_COMMON_OPTIONS
{ "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
{ "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
{ "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
{ "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
{ "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
{ "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
+ { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
+ "read and decode the streams to fill missing information with heuristics" },
{ NULL, },
};