X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavfilter%2Faf_resample.c;h=f82a970bb3d30bb31be728e200bd76523337fcbb;hb=3d8c80b611aa1e2f800dd9c8d8f350407f95c042;hp=eb2d2f9e21839899c7b45a38c0439ffcb78077de;hpb=1d9c2dc89a2302076a68b4a3ae3639abbb1a502d;p=ffmpeg diff --git a/libavfilter/af_resample.c b/libavfilter/af_resample.c index eb2d2f9e218..f82a970bb3d 100644 --- a/libavfilter/af_resample.c +++ b/libavfilter/af_resample.c @@ -25,6 +25,7 @@ #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/common.h" +#include "libavutil/dict.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" @@ -37,13 +38,37 @@ typedef struct ResampleContext { AVAudioResampleContext *avr; + AVDictionary *options; int64_t next_pts; - /* set by filter_samples() to signal an output frame to request_frame() */ + /* set by filter_frame() to signal an output frame to request_frame() */ int got_output; } ResampleContext; +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ResampleContext *s = ctx->priv; + + if (args) { + int ret = av_dict_parse_string(&s->options, args, "=", ":", 0); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "error setting option string: %s\n", args); + return ret; + } + + /* do not allow the user to override basic format options */ + av_dict_set(&s->options, "in_channel_layout", NULL, 0); + av_dict_set(&s->options, "out_channel_layout", NULL, 0); + av_dict_set(&s->options, "in_sample_fmt", NULL, 0); + av_dict_set(&s->options, "out_sample_fmt", NULL, 0); + av_dict_set(&s->options, "in_sample_rate", NULL, 0); + av_dict_set(&s->options, "out_sample_rate", NULL, 0); + } + + return 0; +} + static av_cold void uninit(AVFilterContext *ctx) { ResampleContext *s = ctx->priv; @@ -52,6 +77,7 @@ static av_cold void uninit(AVFilterContext *ctx) avresample_close(s->avr); avresample_free(&s->avr); } + av_dict_free(&s->options); } static int query_formats(AVFilterContext *ctx) @@ -93,12 +119,24 @@ static int config_output(AVFilterLink *outlink) if (inlink->channel_layout == outlink->channel_layout && inlink->sample_rate == outlink->sample_rate && - inlink->format == outlink->format) + (inlink->format == outlink->format || + (av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 && + av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 && + av_get_planar_sample_fmt(inlink->format) == + av_get_planar_sample_fmt(outlink->format)))) return 0; if (!(s->avr = avresample_alloc_context())) return AVERROR(ENOMEM); + if (s->options) { + AVDictionaryEntry *e = NULL; + while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX))) + av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value); + + av_opt_set_dict(s->avr, &s->options); + } + av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0); av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0); av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0); @@ -136,7 +174,7 @@ static int request_frame(AVFilterLink *outlink) /* flush the lavr delay buffer */ if (ret == AVERROR_EOF && s->avr) { - AVFilterBufferRef *buf; + AVFrame *frame; int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), outlink->sample_rate, ctx->inputs[0]->sample_rate, @@ -145,25 +183,25 @@ static int request_frame(AVFilterLink *outlink) if (!nb_samples) return ret; - buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); - if (!buf) + frame = ff_get_audio_buffer(outlink, nb_samples); + if (!frame) return AVERROR(ENOMEM); - ret = avresample_convert(s->avr, (void**)buf->extended_data, - buf->linesize[0], nb_samples, + ret = avresample_convert(s->avr, frame->extended_data, + frame->linesize[0], nb_samples, NULL, 0, 0); if (ret <= 0) { - avfilter_unref_buffer(buf); + av_frame_free(&frame); return (ret == 0) ? AVERROR_EOF : ret; } - buf->pts = s->next_pts; - return ff_filter_samples(outlink, buf); + frame->pts = s->next_pts; + return ff_filter_frame(outlink, frame); } return ret; } -static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) +static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; ResampleContext *s = ctx->priv; @@ -171,84 +209,97 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) int ret; if (s->avr) { - AVFilterBufferRef *buf_out; + AVFrame *out; int delay, nb_samples; /* maximum possible samples lavr can output */ delay = avresample_get_delay(s->avr); - nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay, + nb_samples = av_rescale_rnd(in->nb_samples + delay, outlink->sample_rate, inlink->sample_rate, AV_ROUND_UP); - buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); - if (!buf_out) { + out = ff_get_audio_buffer(outlink, nb_samples); + if (!out) { ret = AVERROR(ENOMEM); goto fail; } - ret = avresample_convert(s->avr, (void**)buf_out->extended_data, - buf_out->linesize[0], nb_samples, - (void**)buf->extended_data, buf->linesize[0], - buf->audio->nb_samples); - if (ret < 0) { - avfilter_unref_buffer(buf_out); - goto fail; + ret = avresample_convert(s->avr, out->extended_data, out->linesize[0], + nb_samples, in->extended_data, in->linesize[0], + in->nb_samples); + if (ret <= 0) { + av_frame_free(&out); + if (ret < 0) + goto fail; } av_assert0(!avresample_available(s->avr)); if (s->next_pts == AV_NOPTS_VALUE) { - if (buf->pts == AV_NOPTS_VALUE) { + if (in->pts == AV_NOPTS_VALUE) { av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " "assuming 0.\n"); s->next_pts = 0; } else - s->next_pts = av_rescale_q(buf->pts, inlink->time_base, + s->next_pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base); } if (ret > 0) { - buf_out->audio->nb_samples = ret; - if (buf->pts != AV_NOPTS_VALUE) { - buf_out->pts = av_rescale_q(buf->pts, inlink->time_base, + out->nb_samples = ret; + if (in->pts != AV_NOPTS_VALUE) { + out->pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base) - av_rescale(delay, outlink->sample_rate, inlink->sample_rate); } else - buf_out->pts = s->next_pts; + out->pts = s->next_pts; - s->next_pts = buf_out->pts + buf_out->audio->nb_samples; + s->next_pts = out->pts + out->nb_samples; - ret = ff_filter_samples(outlink, buf_out); + ret = ff_filter_frame(outlink, out); s->got_output = 1; } fail: - avfilter_unref_buffer(buf); + av_frame_free(&in); } else { - ret = ff_filter_samples(outlink, buf); + in->format = outlink->format; + ret = ff_filter_frame(outlink, in); s->got_output = 1; } return ret; } +static const AVFilterPad avfilter_af_resample_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_resample_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame + }, + { NULL } +}; + AVFilter avfilter_af_resample = { .name = "resample", .description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."), .priv_size = sizeof(ResampleContext), + .init = init, .uninit = uninit, .query_formats = query_formats, - .inputs = (const AVFilterPad[]) {{ .name = "default", - .type = AVMEDIA_TYPE_AUDIO, - .filter_samples = filter_samples, - .min_perms = AV_PERM_READ }, - { .name = NULL}}, - .outputs = (const AVFilterPad[]) {{ .name = "default", - .type = AVMEDIA_TYPE_AUDIO, - .config_props = config_output, - .request_frame = request_frame }, - { .name = NULL}}, + .inputs = avfilter_af_resample_inputs, + .outputs = avfilter_af_resample_outputs, };