]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/af_resample.c
mpjpeg: Check stream allocation
[ffmpeg] / libavfilter / af_resample.c
index c51f9d243b33570ac5fcf53a7ab85eed7546fa1d..b125d25d20b0342d06c46c86ef0a0ad4b44cff47 100644 (file)
@@ -25,6 +25,7 @@
 #include "libavutil/avassert.h"
 #include "libavutil/avstring.h"
 #include "libavutil/common.h"
+#include "libavutil/dict.h"
 #include "libavutil/mathematics.h"
 #include "libavutil/opt.h"
 
 #include "internal.h"
 
 typedef struct ResampleContext {
+    const AVClass *class;
     AVAudioResampleContext *avr;
+    AVDictionary *options;
 
     int64_t next_pts;
+    int64_t next_in_pts;
 
-    /* set by filter_samples() to signal an output frame to request_frame() */
+    /* set by filter_frame() to signal an output frame to request_frame() */
     int got_output;
 } ResampleContext;
 
+static av_cold int init(AVFilterContext *ctx, AVDictionary **opts)
+{
+    ResampleContext *s = ctx->priv;
+    const AVClass *avr_class = avresample_get_class();
+    AVDictionaryEntry *e = NULL;
+
+    while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+        if (av_opt_find(&avr_class, e->key, NULL, 0,
+                        AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN))
+            av_dict_set(&s->options, e->key, e->value, 0);
+    }
+
+    e = NULL;
+    while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
+        av_dict_set(opts, e->key, NULL, 0);
+
+    /* do not allow the user to override basic format options */
+    av_dict_set(&s->options,  "in_channel_layout", NULL, 0);
+    av_dict_set(&s->options, "out_channel_layout", NULL, 0);
+    av_dict_set(&s->options,  "in_sample_fmt",     NULL, 0);
+    av_dict_set(&s->options, "out_sample_fmt",     NULL, 0);
+    av_dict_set(&s->options,  "in_sample_rate",    NULL, 0);
+    av_dict_set(&s->options, "out_sample_rate",    NULL, 0);
+
+    return 0;
+}
+
 static av_cold void uninit(AVFilterContext *ctx)
 {
     ResampleContext *s = ctx->priv;
@@ -52,6 +83,7 @@ static av_cold void uninit(AVFilterContext *ctx)
         avresample_close(s->avr);
         avresample_free(&s->avr);
     }
+    av_dict_free(&s->options);
 }
 
 static int query_formats(AVFilterContext *ctx)
@@ -93,12 +125,27 @@ static int config_output(AVFilterLink *outlink)
 
     if (inlink->channel_layout == outlink->channel_layout &&
         inlink->sample_rate    == outlink->sample_rate    &&
-        inlink->format         == outlink->format)
+        (inlink->format        == outlink->format ||
+        (av_get_channel_layout_nb_channels(inlink->channel_layout)  == 1 &&
+         av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
+         av_get_planar_sample_fmt(inlink->format) ==
+         av_get_planar_sample_fmt(outlink->format))))
         return 0;
 
     if (!(s->avr = avresample_alloc_context()))
         return AVERROR(ENOMEM);
 
+    if (s->options) {
+        int ret;
+        AVDictionaryEntry *e = NULL;
+        while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
+            av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);
+
+        ret = av_opt_set_dict(s->avr, &s->options);
+        if (ret < 0)
+            return ret;
+    }
+
     av_opt_set_int(s->avr,  "in_channel_layout", inlink ->channel_layout, 0);
     av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
     av_opt_set_int(s->avr,  "in_sample_fmt",     inlink ->format,         0);
@@ -111,6 +158,7 @@ static int config_output(AVFilterLink *outlink)
 
     outlink->time_base = (AVRational){ 1, outlink->sample_rate };
     s->next_pts        = AV_NOPTS_VALUE;
+    s->next_in_pts     = AV_NOPTS_VALUE;
 
     av_get_channel_layout_string(buf1, sizeof(buf1),
                                  -1, inlink ->channel_layout);
@@ -136,34 +184,32 @@ static int request_frame(AVFilterLink *outlink)
 
     /* flush the lavr delay buffer */
     if (ret == AVERROR_EOF && s->avr) {
-        AVFilterBufferRef *buf;
-        int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
-                                        outlink->sample_rate,
-                                        ctx->inputs[0]->sample_rate,
-                                        AV_ROUND_UP);
+        AVFrame *frame;
+        int nb_samples = avresample_get_out_samples(s->avr, 0);
 
         if (!nb_samples)
             return ret;
 
-        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
-        if (!buf)
+        frame = ff_get_audio_buffer(outlink, nb_samples);
+        if (!frame)
             return AVERROR(ENOMEM);
 
-        ret = avresample_convert(s->avr, buf->extended_data,
-                                 buf->linesize[0], nb_samples,
+        ret = avresample_convert(s->avr, frame->extended_data,
+                                 frame->linesize[0], nb_samples,
                                  NULL, 0, 0);
         if (ret <= 0) {
-            avfilter_unref_buffer(buf);
+            av_frame_free(&frame);
             return (ret == 0) ? AVERROR_EOF : ret;
         }
 
-        buf->pts = s->next_pts;
-        return ff_filter_samples(outlink, buf);
+        frame->nb_samples = ret;
+        frame->pts = s->next_pts;
+        return ff_filter_frame(outlink, frame);
     }
     return ret;
 }
 
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 {
     AVFilterContext  *ctx = inlink->dst;
     ResampleContext    *s = ctx->priv;
@@ -171,84 +217,129 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
     int ret;
 
     if (s->avr) {
-        AVFilterBufferRef *buf_out;
+        AVFrame *out;
         int delay, nb_samples;
 
         /* maximum possible samples lavr can output */
         delay      = avresample_get_delay(s->avr);
-        nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
-                                    outlink->sample_rate, inlink->sample_rate,
-                                    AV_ROUND_UP);
+        nb_samples = avresample_get_out_samples(s->avr, in->nb_samples);
 
-        buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
-        if (!buf_out) {
+        out = ff_get_audio_buffer(outlink, nb_samples);
+        if (!out) {
             ret = AVERROR(ENOMEM);
             goto fail;
         }
 
-        ret     = avresample_convert(s->avr, buf_out->extended_data,
-                                     buf_out->linesize[0], nb_samples,
-                                     buf->extended_data, buf->linesize[0],
-                                     buf->audio->nb_samples);
-        if (ret < 0) {
-            avfilter_unref_buffer(buf_out);
-            goto fail;
+        ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
+                                 nb_samples, in->extended_data, in->linesize[0],
+                                 in->nb_samples);
+        if (ret <= 0) {
+            av_frame_free(&out);
+            if (ret < 0)
+                goto fail;
         }
 
         av_assert0(!avresample_available(s->avr));
 
         if (s->next_pts == AV_NOPTS_VALUE) {
-            if (buf->pts == AV_NOPTS_VALUE) {
+            if (in->pts == AV_NOPTS_VALUE) {
                 av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
                        "assuming 0.\n");
                 s->next_pts = 0;
             } else
-                s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
+                s->next_pts = av_rescale_q(in->pts, inlink->time_base,
                                            outlink->time_base);
         }
 
         if (ret > 0) {
-            buf_out->audio->nb_samples = ret;
-            if (buf->pts != AV_NOPTS_VALUE) {
-                buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
+            out->nb_samples = ret;
+
+            ret = av_frame_copy_props(out, in);
+            if (ret < 0) {
+                av_frame_free(&out);
+                goto fail;
+            }
+
+            out->sample_rate = outlink->sample_rate;
+            /* Only convert in->pts if there is a discontinuous jump.
+               This ensures that out->pts tracks the number of samples actually
+               output by the resampler in the absence of such a jump.
+               Otherwise, the rounding in av_rescale_q() and av_rescale()
+               causes off-by-1 errors. */
+            if (in->pts != AV_NOPTS_VALUE && in->pts != s->next_in_pts) {
+                out->pts = av_rescale_q(in->pts, inlink->time_base,
                                             outlink->time_base) -
                                av_rescale(delay, outlink->sample_rate,
                                           inlink->sample_rate);
             } else
-                buf_out->pts = s->next_pts;
+                out->pts = s->next_pts;
 
-            s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
+            s->next_pts = out->pts + out->nb_samples;
+            s->next_in_pts = in->pts + in->nb_samples;
 
-            ret = ff_filter_samples(outlink, buf_out);
+            ret = ff_filter_frame(outlink, out);
             s->got_output = 1;
         }
 
 fail:
-        avfilter_unref_buffer(buf);
+        av_frame_free(&in);
     } else {
-        ret = ff_filter_samples(outlink, buf);
+        in->format = outlink->format;
+        ret = ff_filter_frame(outlink, in);
         s->got_output = 1;
     }
 
     return ret;
 }
 
-AVFilter avfilter_af_resample = {
+static const AVClass *resample_child_class_next(const AVClass *prev)
+{
+    return prev ? NULL : avresample_get_class();
+}
+
+static void *resample_child_next(void *obj, void *prev)
+{
+    ResampleContext *s = obj;
+    return prev ? NULL : s->avr;
+}
+
+static const AVClass resample_class = {
+    .class_name       = "resample",
+    .item_name        = av_default_item_name,
+    .version          = LIBAVUTIL_VERSION_INT,
+    .child_class_next = resample_child_class_next,
+    .child_next       = resample_child_next,
+};
+
+static const AVFilterPad avfilter_af_resample_inputs[] = {
+    {
+        .name           = "default",
+        .type           = AVMEDIA_TYPE_AUDIO,
+        .filter_frame   = filter_frame,
+    },
+    { NULL }
+};
+
+static const AVFilterPad avfilter_af_resample_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_AUDIO,
+        .config_props  = config_output,
+        .request_frame = request_frame
+    },
+    { NULL }
+};
+
+AVFilter ff_af_resample = {
     .name          = "resample",
     .description   = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
     .priv_size     = sizeof(ResampleContext),
+    .priv_class    = &resample_class,
 
+    .init_dict      = init,
     .uninit         = uninit,
     .query_formats  = query_formats,
 
-    .inputs    = (const AVFilterPad[]) {{ .name            = "default",
-                                          .type            = AVMEDIA_TYPE_AUDIO,
-                                          .filter_samples  = filter_samples,
-                                          .min_perms       = AV_PERM_READ },
-                                        { .name = NULL}},
-    .outputs   = (const AVFilterPad[]) {{ .name          = "default",
-                                          .type          = AVMEDIA_TYPE_AUDIO,
-                                          .config_props  = config_output,
-                                          .request_frame = request_frame },
-                                        { .name = NULL}},
+    .inputs    = avfilter_af_resample_inputs,
+    .outputs   = avfilter_af_resample_outputs,
 };