]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/af_join.c
asfenc: add ASF_Reserved_4 as defined in section 10.10 of the ASF spec
[ffmpeg] / libavfilter / af_join.c
index bf939feeaa8d6e8cde6857513b997d175e607684..e3dc5a56188978b4bce10e7b5cd4d58683a9d1da 100644 (file)
@@ -25,8 +25,8 @@
  * a single output
  */
 
-#include "libavutil/audioconvert.h"
 #include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
 #include "libavutil/common.h"
 #include "libavutil/opt.h"
 
@@ -56,24 +56,14 @@ typedef struct JoinContext {
     /**
      * Temporary storage for input frames, until we get one on each input.
      */
-    AVFilterBufferRef **input_frames;
+    AVFrame **input_frames;
 
     /**
-     *  Temporary storage for data pointers, for assembling the output buffer.
+     *  Temporary storage for buffer references, for assembling the output frame.
      */
-    uint8_t **data;
+    AVBufferRef **buffers;
 } JoinContext;
 
-/**
- * To avoid copying the data from input buffers, this filter creates
- * a custom output buffer that stores references to all inputs and
- * unrefs them on free.
- */
-typedef struct JoinBufferPriv {
-    AVFilterBufferRef **in_buffers;
-    int              nb_in_buffers;
-} JoinBufferPriv;
-
 #define OFFSET(x) offsetof(JoinContext, x)
 #define A AV_OPT_FLAG_AUDIO_PARAM
 static const AVOption join_options[] = {
@@ -93,7 +83,7 @@ static const AVClass join_class = {
     .version    = LIBAVUTIL_VERSION_INT,
 };
 
-static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
 {
     AVFilterContext *ctx = link->dst;
     JoinContext       *s = ctx->priv;
@@ -104,7 +94,7 @@ static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
             break;
     av_assert0(i < ctx->nb_inputs);
     av_assert0(!s->input_frames[i]);
-    s->input_frames[i] = buf;
+    s->input_frames[i] = frame;
 
     return 0;
 }
@@ -112,14 +102,23 @@ static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
 static int parse_maps(AVFilterContext *ctx)
 {
     JoinContext *s = ctx->priv;
+    char separator = '|';
     char *cur      = s->map;
 
+#if FF_API_OLD_FILTER_OPTS
+    if (cur && strchr(cur, ',')) {
+        av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "
+               "separate the mappings.\n");
+        separator = ',';
+    }
+#endif
+
     while (cur && *cur) {
         char *sep, *next, *p;
         uint64_t in_channel = 0, out_channel = 0;
         int input_idx, out_ch_idx, in_ch_idx;
 
-        next = strchr(cur, ',');
+        next = strchr(cur, separator);
         if (next)
             *next++ = 0;
 
@@ -187,18 +186,11 @@ static int parse_maps(AVFilterContext *ctx)
     return 0;
 }
 
-static int join_init(AVFilterContext *ctx, const char *args)
+static av_cold int join_init(AVFilterContext *ctx)
 {
     JoinContext *s = ctx->priv;
     int ret, i;
 
-    s->class = &join_class;
-    av_opt_set_defaults(s);
-    if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
-        av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args);
-        return ret;
-    }
-
     if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
         av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
                s->channel_layout_str);
@@ -208,9 +200,9 @@ static int join_init(AVFilterContext *ctx, const char *args)
 
     s->nb_channels  = av_get_channel_layout_nb_channels(s->channel_layout);
     s->channels     = av_mallocz(sizeof(*s->channels) * s->nb_channels);
-    s->data         = av_mallocz(sizeof(*s->data)     * s->nb_channels);
+    s->buffers      = av_mallocz(sizeof(*s->buffers)  * s->nb_channels);
     s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
-    if (!s->channels || !s->data || !s->input_frames) {
+    if (!s->channels || !s->buffers|| !s->input_frames) {
         ret = AVERROR(ENOMEM);
         goto fail;
     }
@@ -230,7 +222,7 @@ static int join_init(AVFilterContext *ctx, const char *args)
         snprintf(name, sizeof(name), "input%d", i);
         pad.type           = AVMEDIA_TYPE_AUDIO;
         pad.name           = av_strdup(name);
-        pad.filter_samples = filter_samples;
+        pad.filter_frame   = filter_frame;
 
         pad.needs_fifo = 1;
 
@@ -242,18 +234,18 @@ fail:
     return ret;
 }
 
-static void join_uninit(AVFilterContext *ctx)
+static av_cold void join_uninit(AVFilterContext *ctx)
 {
     JoinContext *s = ctx->priv;
     int i;
 
     for (i = 0; i < ctx->nb_inputs; i++) {
         av_freep(&ctx->input_pads[i].name);
-        avfilter_unref_bufferp(&s->input_frames[i]);
+        av_frame_free(&s->input_frames[i]);
     }
 
     av_freep(&s->channels);
-    av_freep(&s->data);
+    av_freep(&s->buffers);
     av_freep(&s->input_frames);
 }
 
@@ -395,34 +387,14 @@ fail:
     return ret;
 }
 
-static void join_free_buffer(AVFilterBuffer *buf)
-{
-    JoinBufferPriv *priv = buf->priv;
-
-    if (priv) {
-        int i;
-
-        for (i = 0; i < priv->nb_in_buffers; i++)
-            avfilter_unref_bufferp(&priv->in_buffers[i]);
-
-        av_freep(&priv->in_buffers);
-        av_freep(&buf->priv);
-    }
-
-    if (buf->extended_data != buf->data)
-        av_freep(&buf->extended_data);
-    av_freep(&buf);
-}
-
 static int join_request_frame(AVFilterLink *outlink)
 {
     AVFilterContext *ctx = outlink->src;
     JoinContext *s       = ctx->priv;
-    AVFilterBufferRef *buf;
-    JoinBufferPriv *priv;
+    AVFrame *frame;
     int linesize   = INT_MAX;
-    int perms      = ~0;
     int nb_samples = 0;
+    int nb_buffers = 0;
     int i, j, ret;
 
     /* get a frame on each input */
@@ -435,70 +407,121 @@ static int join_request_frame(AVFilterLink *outlink)
 
         /* request the same number of samples on all inputs */
         if (i == 0) {
-            nb_samples = s->input_frames[0]->audio->nb_samples;
+            nb_samples = s->input_frames[0]->nb_samples;
 
             for (j = 1; !i && j < ctx->nb_inputs; j++)
                 ctx->inputs[j]->request_samples = nb_samples;
         }
     }
 
+    /* setup the output frame */
+    frame = av_frame_alloc();
+    if (!frame)
+        return AVERROR(ENOMEM);
+    if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
+        frame->extended_data = av_mallocz(s->nb_channels *
+                                          sizeof(*frame->extended_data));
+        if (!frame->extended_data) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+
+    /* copy the data pointers */
     for (i = 0; i < s->nb_channels; i++) {
         ChannelMap *ch = &s->channels[i];
-        AVFilterBufferRef *cur_buf = s->input_frames[ch->input];
+        AVFrame *cur   = s->input_frames[ch->input];
+        AVBufferRef *buf;
 
-        s->data[i] = cur_buf->extended_data[ch->in_channel_idx];
-        linesize   = FFMIN(linesize, cur_buf->linesize[0]);
-        perms     &= cur_buf->perms;
+        frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
+        linesize = FFMIN(linesize, cur->linesize[0]);
+
+        /* add the buffer where this plan is stored to the list if it's
+         * not already there */
+        buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
+        if (!buf) {
+            ret = AVERROR(EINVAL);
+            goto fail;
+        }
+        for (j = 0; j < nb_buffers; j++)
+            if (s->buffers[j]->buffer == buf->buffer)
+                break;
+        if (j == i)
+            s->buffers[nb_buffers++] = buf;
     }
 
-    av_assert0(nb_samples > 0);
-    buf = avfilter_get_audio_buffer_ref_from_arrays(s->data, linesize, perms,
-                                                    nb_samples, outlink->format,
-                                                    outlink->channel_layout);
-    if (!buf)
-        return AVERROR(ENOMEM);
+    /* create references to the buffers we copied to output */
+    if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
+        frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
+        frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
+                                         frame->nb_extended_buf);
+        if (!frame->extended_buf) {
+            frame->nb_extended_buf = 0;
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+    for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
+        frame->buf[i] = av_buffer_ref(s->buffers[i]);
+        if (!frame->buf[i]) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+    for (i = 0; i < frame->nb_extended_buf; i++) {
+        frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
+                                               FF_ARRAY_ELEMS(frame->buf)]);
+        if (!frame->extended_buf[i]) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
 
-    buf->buf->free = join_free_buffer;
-    buf->pts       = s->input_frames[0]->pts;
+    frame->nb_samples     = nb_samples;
+    frame->channel_layout = outlink->channel_layout;
+    frame->sample_rate    = outlink->sample_rate;
+    frame->pts            = s->input_frames[0]->pts;
+    frame->linesize[0]    = linesize;
+    if (frame->data != frame->extended_data) {
+        memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
+               FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
+    }
 
-    if (!(priv = av_mallocz(sizeof(*priv))))
-        goto fail;
-    if (!(priv->in_buffers = av_mallocz(sizeof(*priv->in_buffers) * ctx->nb_inputs)))
-        goto fail;
+    ret = ff_filter_frame(outlink, frame);
 
     for (i = 0; i < ctx->nb_inputs; i++)
-        priv->in_buffers[i] = s->input_frames[i];
-    priv->nb_in_buffers = ctx->nb_inputs;
-    buf->buf->priv      = priv;
-
-    ret = ff_filter_samples(outlink, buf);
-
-    memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
+        av_frame_free(&s->input_frames[i]);
 
     return ret;
 
 fail:
-    avfilter_unref_buffer(buf);
-    if (priv)
-        av_freep(&priv->in_buffers);
-    av_freep(&priv);
-    return AVERROR(ENOMEM);
+    av_frame_free(&frame);
+    return ret;
 }
 
+static const AVFilterPad avfilter_af_join_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_AUDIO,
+        .config_props  = join_config_output,
+        .request_frame = join_request_frame,
+    },
+    { NULL }
+};
+
 AVFilter avfilter_af_join = {
     .name           = "join",
     .description    = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
                                            "multi-channel output"),
     .priv_size      = sizeof(JoinContext),
+    .priv_class     = &join_class,
 
     .init           = join_init,
     .uninit         = join_uninit,
     .query_formats  = join_query_formats,
 
-    .inputs  = (const AVFilterPad[]){{ NULL }},
-    .outputs = (const AVFilterPad[]){{ .name          = "default",
-                                       .type          = AVMEDIA_TYPE_AUDIO,
-                                       .config_props  = join_config_output,
-                                       .request_frame = join_request_frame, },
-                                     { NULL }},
+    .inputs  = NULL,
+    .outputs = avfilter_af_join_outputs,
+
+    .flags   = AVFILTER_FLAG_DYNAMIC_INPUTS,
 };