#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
+#include "libavutil/common.h"
+#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "internal.h"
typedef struct ResampleContext {
+ const AVClass *class;
AVAudioResampleContext *avr;
+ AVDictionary *options;
int64_t next_pts;
+
+ /* set by filter_frame() to signal an output frame to request_frame() */
+ int got_output;
} ResampleContext;
+static av_cold int init(AVFilterContext *ctx, AVDictionary **opts)
+{
+ ResampleContext *s = ctx->priv;
+ const AVClass *avr_class = avresample_get_class();
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if (av_opt_find(&avr_class, e->key, NULL, 0,
+ AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN))
+ av_dict_set(&s->options, e->key, e->value, 0);
+ }
+
+ e = NULL;
+ while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
+ av_dict_set(opts, e->key, NULL, 0);
+
+ /* do not allow the user to override basic format options */
+ av_dict_set(&s->options, "in_channel_layout", NULL, 0);
+ av_dict_set(&s->options, "out_channel_layout", NULL, 0);
+ av_dict_set(&s->options, "in_sample_fmt", NULL, 0);
+ av_dict_set(&s->options, "out_sample_fmt", NULL, 0);
+ av_dict_set(&s->options, "in_sample_rate", NULL, 0);
+ av_dict_set(&s->options, "out_sample_rate", NULL, 0);
+
+ return 0;
+}
+
static av_cold void uninit(AVFilterContext *ctx)
{
ResampleContext *s = ctx->priv;
avresample_close(s->avr);
avresample_free(&s->avr);
}
+ av_dict_free(&s->options);
}
static int query_formats(AVFilterContext *ctx)
if (inlink->channel_layout == outlink->channel_layout &&
inlink->sample_rate == outlink->sample_rate &&
- inlink->format == outlink->format)
+ (inlink->format == outlink->format ||
+ (av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 &&
+ av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
+ av_get_planar_sample_fmt(inlink->format) ==
+ av_get_planar_sample_fmt(outlink->format))))
return 0;
if (!(s->avr = avresample_alloc_context()))
return AVERROR(ENOMEM);
+ if (s->options) {
+ AVDictionaryEntry *e = NULL;
+ while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
+ av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);
+
+ av_opt_set_dict(s->avr, &s->options);
+ }
+
av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0);
av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0);
{
AVFilterContext *ctx = outlink->src;
ResampleContext *s = ctx->priv;
- int ret = ff_request_frame(ctx->inputs[0]);
+ int ret = 0;
+
+ s->got_output = 0;
+ while (ret >= 0 && !s->got_output)
+ ret = ff_request_frame(ctx->inputs[0]);
/* flush the lavr delay buffer */
if (ret == AVERROR_EOF && s->avr) {
- AVFilterBufferRef *buf;
+ AVFrame *frame;
int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
outlink->sample_rate,
ctx->inputs[0]->sample_rate,
if (!nb_samples)
return ret;
- buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
- if (!buf)
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
return AVERROR(ENOMEM);
- ret = avresample_convert(s->avr, (void**)buf->extended_data,
- buf->linesize[0], nb_samples,
+ ret = avresample_convert(s->avr, frame->extended_data,
+ frame->linesize[0], nb_samples,
NULL, 0, 0);
if (ret <= 0) {
- avfilter_unref_buffer(buf);
+ av_frame_free(&frame);
return (ret == 0) ? AVERROR_EOF : ret;
}
- buf->pts = s->next_pts;
- ff_filter_samples(outlink, buf);
- return 0;
+ frame->pts = s->next_pts;
+ return ff_filter_frame(outlink, frame);
}
return ret;
}
-static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
if (s->avr) {
- AVFilterBufferRef *buf_out;
- int delay, nb_samples, ret;
+ AVFrame *out;
+ int delay, nb_samples;
/* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr);
- nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
+ nb_samples = av_rescale_rnd(in->nb_samples + delay,
outlink->sample_rate, inlink->sample_rate,
AV_ROUND_UP);
- buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
- ret = avresample_convert(s->avr, (void**)buf_out->extended_data,
- buf_out->linesize[0], nb_samples,
- (void**)buf->extended_data, buf->linesize[0],
- buf->audio->nb_samples);
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
+ nb_samples, in->extended_data, in->linesize[0],
+ in->nb_samples);
+ if (ret <= 0) {
+ av_frame_free(&out);
+ if (ret < 0)
+ goto fail;
+ }
av_assert0(!avresample_available(s->avr));
if (s->next_pts == AV_NOPTS_VALUE) {
- if (buf->pts == AV_NOPTS_VALUE) {
+ if (in->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
"assuming 0.\n");
s->next_pts = 0;
} else
- s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
+ s->next_pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base);
}
if (ret > 0) {
- buf_out->audio->nb_samples = ret;
- if (buf->pts != AV_NOPTS_VALUE) {
- buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
+ out->nb_samples = ret;
+ if (in->pts != AV_NOPTS_VALUE) {
+ out->pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base) -
av_rescale(delay, outlink->sample_rate,
inlink->sample_rate);
} else
- buf_out->pts = s->next_pts;
+ out->pts = s->next_pts;
- s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
+ s->next_pts = out->pts + out->nb_samples;
- ff_filter_samples(outlink, buf_out);
+ ret = ff_filter_frame(outlink, out);
+ s->got_output = 1;
}
- avfilter_unref_buffer(buf);
- } else
- ff_filter_samples(outlink, buf);
+
+fail:
+ av_frame_free(&in);
+ } else {
+ in->format = outlink->format;
+ ret = ff_filter_frame(outlink, in);
+ s->got_output = 1;
+ }
+
+ return ret;
+}
+
+static const AVClass *resample_child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : avresample_get_class();
}
-AVFilter avfilter_af_resample = {
+static void *resample_child_next(void *obj, void *prev)
+{
+ ResampleContext *s = obj;
+ return prev ? NULL : s->avr;
+}
+
+static const AVClass resample_class = {
+ .class_name = "resample",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_class_next = resample_child_class_next,
+ .child_next = resample_child_next,
+};
+
+static const AVFilterPad avfilter_af_resample_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_resample_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame
+ },
+ { NULL }
+};
+
+AVFilter ff_af_resample = {
.name = "resample",
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
.priv_size = sizeof(ResampleContext),
+ .priv_class = &resample_class,
+ .init_dict = init,
.uninit = uninit,
.query_formats = query_formats,
- .inputs = (const AVFilterPad[]) {{ .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_samples = filter_samples,
- .min_perms = AV_PERM_READ },
- { .name = NULL}},
- .outputs = (const AVFilterPad[]) {{ .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .config_props = config_output,
- .request_frame = request_frame },
- { .name = NULL}},
+ .inputs = avfilter_af_resample_inputs,
+ .outputs = avfilter_af_resample_outputs,
};