]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/buffersrc.c
mp3dec: export replaygain tags from ID3v2
[ffmpeg] / libavfilter / buffersrc.c
index 30e34b75fa3cc3ebedcd156dc4ad000ea2e71d38..fd058d6c19bb7508630a9cc2d1e5e69d66e6583b 100644 (file)
  * memory buffer source filter
  */
 
+#include <float.h>
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/fifo.h"
+#include "libavutil/frame.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
 #include "audio.h"
 #include "avfilter.h"
 #include "buffersrc.h"
 #include "formats.h"
 #include "internal.h"
 #include "video.h"
-#include "vsrc_buffer.h"
-
-#include "libavutil/audioconvert.h"
-#include "libavutil/fifo.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/opt.h"
-#include "libavutil/samplefmt.h"
 
 typedef struct {
     const AVClass    *class;
@@ -44,7 +47,8 @@ typedef struct {
 
     /* video only */
     int               h, w;
-    enum PixelFormat  pix_fmt;
+    enum AVPixelFormat  pix_fmt;
+    char               *pix_fmt_str;
     AVRational        pixel_aspect;
 
     /* audio only */
@@ -70,143 +74,240 @@ typedef struct {
         return AVERROR(EINVAL);\
     }
 
-#if FF_API_VSRC_BUFFER_ADD_FRAME
-int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
-                             int64_t pts, AVRational pixel_aspect)
+int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
 {
-    int64_t orig_pts = frame->pts;
-    AVRational orig_sar = frame->sample_aspect_ratio;
-    int ret;
+    AVFrame *copy;
+    int ret = 0;
 
-    frame->pts = pts;
-    frame->sample_aspect_ratio = pixel_aspect;
-    if ((ret = av_buffersrc_write_frame(buffer_filter, frame)) < 0)
-        return ret;
-    frame->pts = orig_pts;
-    frame->sample_aspect_ratio = orig_sar;
+    if (!(copy = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    ret = av_frame_ref(copy, frame);
+    if (ret >= 0)
+        ret = av_buffersrc_add_frame(ctx, copy);
 
-    return 0;
+    av_frame_free(&copy);
+    return ret;
 }
-#endif
 
-int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
+int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
+                                               AVFrame *frame)
 {
-    BufferSourceContext *c = buffer_filter->priv;
-    AVFilterBufferRef *buf;
-    int ret;
+    BufferSourceContext *s = ctx->priv;
+    AVFrame *copy;
+    int refcounted, ret;
 
     if (!frame) {
-        c->eof = 1;
+        s->eof = 1;
         return 0;
-    } else if (c->eof)
+    } else if (s->eof)
         return AVERROR(EINVAL);
 
-    if (!av_fifo_space(c->fifo) &&
-        (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
-                                         sizeof(buf))) < 0)
-        return ret;
+    refcounted = !!frame->buf[0];
 
-    switch (buffer_filter->outputs[0]->type) {
+    switch (ctx->outputs[0]->type) {
     case AVMEDIA_TYPE_VIDEO:
-        CHECK_VIDEO_PARAM_CHANGE(buffer_filter, c, frame->width, frame->height,
+        CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
                                  frame->format);
-        buf = ff_get_video_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
-                                  c->w, c->h);
-        av_image_copy(buf->data, buf->linesize, frame->data, frame->linesize,
-                      c->pix_fmt, c->w, c->h);
         break;
     case AVMEDIA_TYPE_AUDIO:
-        CHECK_AUDIO_PARAM_CHANGE(buffer_filter, c, frame->sample_rate, frame->channel_layout,
+        CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
                                  frame->format);
-        buf = ff_get_audio_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
-                                  frame->nb_samples);
-        av_samples_copy(buf->extended_data, frame->extended_data,
-                        0, 0, frame->nb_samples,
-                        av_get_channel_layout_nb_channels(frame->channel_layout),
-                        frame->format);
         break;
     default:
         return AVERROR(EINVAL);
     }
 
-    avfilter_copy_frame_props(buf, frame);
+    if (!av_fifo_space(s->fifo) &&
+        (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
+                                         sizeof(copy))) < 0)
+        return ret;
+
+    if (!(copy = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+
+    if (refcounted) {
+        av_frame_move_ref(copy, frame);
+    } else {
+        ret = av_frame_ref(copy, frame);
+        if (ret < 0) {
+            av_frame_free(&copy);
+            return ret;
+        }
+    }
 
-    if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
-        avfilter_unref_buffer(buf);
+    if ((ret = av_fifo_generic_write(s->fifo, &copy, sizeof(copy), NULL)) < 0) {
+        if (refcounted)
+            av_frame_move_ref(frame, copy);
+        av_frame_free(&copy);
         return ret;
     }
 
     return 0;
 }
 
-int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
+#if FF_API_AVFILTERBUFFER
+FF_DISABLE_DEPRECATION_WARNINGS
+static void compat_free_buffer(void *opaque, uint8_t *data)
 {
-    BufferSourceContext *c = s->priv;
-    int ret;
+    AVFilterBufferRef *buf = opaque;
+    avfilter_unref_buffer(buf);
+}
+
+static void compat_unref_buffer(void *opaque, uint8_t *data)
+{
+    AVBufferRef *buf = opaque;
+    av_buffer_unref(&buf);
+}
+
+int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
+{
+    BufferSourceContext *s = ctx->priv;
+    AVFrame *frame = NULL;
+    AVBufferRef *dummy_buf = NULL;
+    int ret = 0, planes, i;
 
     if (!buf) {
-        c->eof = 1;
+        s->eof = 1;
         return 0;
-    } else if (c->eof)
+    } else if (s->eof)
         return AVERROR(EINVAL);
 
-    if (!av_fifo_space(c->fifo) &&
-        (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
-                                         sizeof(buf))) < 0)
-        return ret;
+    frame = av_frame_alloc();
+    if (!frame)
+        return AVERROR(ENOMEM);
 
-    switch (s->outputs[0]->type) {
-    case AVMEDIA_TYPE_VIDEO:
-        CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
-        break;
-    case AVMEDIA_TYPE_AUDIO:
-        CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
-                                 buf->format);
-        break;
-    default:
-        return AVERROR(EINVAL);
+    dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, 0);
+    if (!dummy_buf) {
+        ret = AVERROR(ENOMEM);
+        goto fail;
     }
 
-    if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0)
-        return ret;
+    if ((ret = avfilter_copy_buf_props(frame, buf)) < 0)
+        goto fail;
 
-    return 0;
+#define WRAP_PLANE(ref_out, data, data_size)                            \
+do {                                                                    \
+    AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf);                  \
+    if (!dummy_ref) {                                                   \
+        ret = AVERROR(ENOMEM);                                          \
+        goto fail;                                                      \
+    }                                                                   \
+    ref_out = av_buffer_create(data, data_size, compat_unref_buffer,    \
+                               dummy_ref, 0);                           \
+    if (!ref_out) {                                                     \
+        av_frame_unref(frame);                                          \
+        ret = AVERROR(ENOMEM);                                          \
+        goto fail;                                                      \
+    }                                                                   \
+} while (0)
+
+    if (ctx->outputs[0]->type  == AVMEDIA_TYPE_VIDEO) {
+        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+
+        planes = av_pix_fmt_count_planes(frame->format);
+        if (!desc || planes <= 0) {
+            ret = AVERROR(EINVAL);
+            goto fail;
+        }
+
+        for (i = 0; i < planes; i++) {
+            int v_shift    = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
+            int plane_size = (frame->height >> v_shift) * frame->linesize[i];
+
+            WRAP_PLANE(frame->buf[i], frame->data[i], plane_size);
+        }
+    } else {
+        int planar = av_sample_fmt_is_planar(frame->format);
+        int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
+
+        planes = planar ? channels : 1;
+
+        if (planes > FF_ARRAY_ELEMS(frame->buf)) {
+            frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf);
+            frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
+                                             frame->nb_extended_buf);
+            if (!frame->extended_buf) {
+                ret = AVERROR(ENOMEM);
+                goto fail;
+            }
+        }
+
+        for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++)
+            WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]);
+
+        for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++)
+            WRAP_PLANE(frame->extended_buf[i],
+                       frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)],
+                       frame->linesize[0]);
+    }
+
+    ret = av_buffersrc_add_frame(ctx, frame);
+
+fail:
+    av_buffer_unref(&dummy_buf);
+    av_frame_free(&frame);
+
+    return ret;
 }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
 
-static av_cold int init_video(AVFilterContext *ctx, const char *args)
+static av_cold int init_video(AVFilterContext *ctx)
 {
     BufferSourceContext *c = ctx->priv;
-    char pix_fmt_str[128];
-    int n = 0;
-
-    if (!args ||
-        (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
-                    &c->time_base.num, &c->time_base.den,
-                    &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
-        av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args);
+
+    if (!c->pix_fmt_str || !c->w || !c->h || av_q2d(c->time_base) <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
         return AVERROR(EINVAL);
     }
-    if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) {
+
+    if ((c->pix_fmt = av_get_pix_fmt(c->pix_fmt_str)) == AV_PIX_FMT_NONE) {
         char *tail;
-        c->pix_fmt = strtol(pix_fmt_str, &tail, 10);
-        if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) {
-            av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str);
+        c->pix_fmt = strtol(c->pix_fmt_str, &tail, 10);
+        if (*tail || c->pix_fmt < 0 || c->pix_fmt >= AV_PIX_FMT_NB) {
+            av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", c->pix_fmt_str);
             return AVERROR(EINVAL);
         }
     }
 
-    if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
+    if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
         return AVERROR(ENOMEM);
 
-    av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
+    av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt));
     return 0;
 }
 
 #define OFFSET(x) offsetof(BufferSourceContext, x)
 #define A AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption video_options[] = {
+    { "width",         NULL,                     OFFSET(w),                AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, V },
+    { "height",        NULL,                     OFFSET(h),                AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, V },
+    { "pix_fmt",       NULL,                     OFFSET(pix_fmt_str),      AV_OPT_TYPE_STRING,                    .flags = V },
+#if FF_API_OLD_FILTER_OPTS
+    /* those 4 are for compatibility with the old option passing system where each filter
+     * did its own parsing */
+    { "time_base_num", "deprecated, do not use", OFFSET(time_base.num),    AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, V },
+    { "time_base_den", "deprecated, do not use", OFFSET(time_base.den),    AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, V },
+    { "sar_num",       "deprecated, do not use", OFFSET(pixel_aspect.num), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, V },
+    { "sar_den",       "deprecated, do not use", OFFSET(pixel_aspect.den), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, V },
+#endif
+    { "sar",           "sample aspect ratio",    OFFSET(pixel_aspect),     AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
+    { "time_base",     NULL,                     OFFSET(time_base),        AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+    { NULL },
+};
+
+static const AVClass buffer_class = {
+    .class_name = "buffer source",
+    .item_name  = av_default_item_name,
+    .option     = video_options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
 static const AVOption audio_options[] = {
-    { "time_base",      NULL, OFFSET(time_base),           AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
-    { "sample_rate",    NULL, OFFSET(sample_rate),         AV_OPT_TYPE_INT,      { 0 }, 0, INT_MAX, A },
+    { "time_base",      NULL, OFFSET(time_base),           AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A },
+    { "sample_rate",    NULL, OFFSET(sample_rate),         AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, INT_MAX, A },
     { "sample_fmt",     NULL, OFFSET(sample_fmt_str),      AV_OPT_TYPE_STRING,             .flags = A },
     { "channel_layout", NULL, OFFSET(channel_layout_str),  AV_OPT_TYPE_STRING,             .flags = A },
     { NULL },
@@ -219,39 +320,27 @@ static const AVClass abuffer_class = {
     .version    = LIBAVUTIL_VERSION_INT,
 };
 
-static av_cold int init_audio(AVFilterContext *ctx, const char *args)
+static av_cold int init_audio(AVFilterContext *ctx)
 {
     BufferSourceContext *s = ctx->priv;
     int ret = 0;
 
-    s->class = &abuffer_class;
-    av_opt_set_defaults(s);
-
-    if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
-        av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args);
-        goto fail;
-    }
-
     s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
     if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
         av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
                s->sample_fmt_str);
-        ret = AVERROR(EINVAL);
-        goto fail;
+        return AVERROR(EINVAL);
     }
 
     s->channel_layout = av_get_channel_layout(s->channel_layout_str);
     if (!s->channel_layout) {
         av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
                s->channel_layout_str);
-        ret = AVERROR(EINVAL);
-        goto fail;
+        return AVERROR(EINVAL);
     }
 
-    if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
-        ret = AVERROR(ENOMEM);
-        goto fail;
-    }
+    if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*))))
+        return AVERROR(ENOMEM);
 
     if (!s->time_base.num)
         s->time_base = (AVRational){1, s->sample_rate};
@@ -260,8 +349,6 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args)
            "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
            s->sample_rate, s->channel_layout_str);
 
-fail:
-    av_opt_free(s);
     return ret;
 }
 
@@ -269,9 +356,9 @@ static av_cold void uninit(AVFilterContext *ctx)
 {
     BufferSourceContext *s = ctx->priv;
     while (s->fifo && av_fifo_size(s->fifo)) {
-        AVFilterBufferRef *buf;
-        av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
-        avfilter_unref_buffer(buf);
+        AVFrame *frame;
+        av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL);
+        av_frame_free(&frame);
     }
     av_fifo_free(s->fifo);
     s->fifo = NULL;
@@ -331,31 +418,19 @@ static int config_props(AVFilterLink *link)
 static int request_frame(AVFilterLink *link)
 {
     BufferSourceContext *c = link->src->priv;
-    AVFilterBufferRef *buf;
+    AVFrame *frame;
+    int ret = 0;
 
     if (!av_fifo_size(c->fifo)) {
         if (c->eof)
             return AVERROR_EOF;
         return AVERROR(EAGAIN);
     }
-    av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
+    av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL);
 
-    switch (link->type) {
-    case AVMEDIA_TYPE_VIDEO:
-        ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
-        ff_draw_slice(link, 0, link->h, 1);
-        ff_end_frame(link);
-        break;
-    case AVMEDIA_TYPE_AUDIO:
-        ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
-        break;
-    default:
-        return AVERROR(EINVAL);
-    }
-
-    avfilter_unref_buffer(buf);
+    ff_filter_frame(link, frame);
 
-    return 0;
+    return ret;
 }
 
 static int poll_frame(AVFilterLink *link)
@@ -364,41 +439,55 @@ static int poll_frame(AVFilterLink *link)
     int size = av_fifo_size(c->fifo);
     if (!size && c->eof)
         return AVERROR_EOF;
-    return size/sizeof(AVFilterBufferRef*);
+    return size/sizeof(AVFrame*);
 }
 
-AVFilter avfilter_vsrc_buffer = {
+static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .request_frame = request_frame,
+        .poll_frame    = poll_frame,
+        .config_props  = config_props,
+    },
+    { NULL }
+};
+
+AVFilter ff_vsrc_buffer = {
     .name      = "buffer",
     .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
     .priv_size = sizeof(BufferSourceContext),
+    .priv_class = &buffer_class,
     .query_formats = query_formats,
 
     .init      = init_video,
     .uninit    = uninit,
 
-    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
-    .outputs   = (AVFilterPad[]) {{ .name            = "default",
-                                    .type            = AVMEDIA_TYPE_VIDEO,
-                                    .request_frame   = request_frame,
-                                    .poll_frame      = poll_frame,
-                                    .config_props    = config_props, },
-                                  { .name = NULL}},
+    .inputs    = NULL,
+    .outputs   = avfilter_vsrc_buffer_outputs,
+};
+
+static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_AUDIO,
+        .request_frame = request_frame,
+        .poll_frame    = poll_frame,
+        .config_props  = config_props,
+    },
+    { NULL }
 };
 
-AVFilter avfilter_asrc_abuffer = {
+AVFilter ff_asrc_abuffer = {
     .name          = "abuffer",
     .description   = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
     .priv_size     = sizeof(BufferSourceContext),
+    .priv_class    = &abuffer_class,
     .query_formats = query_formats,
 
     .init      = init_audio,
     .uninit    = uninit,
 
-    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
-    .outputs   = (AVFilterPad[]) {{ .name            = "default",
-                                    .type            = AVMEDIA_TYPE_AUDIO,
-                                    .request_frame   = request_frame,
-                                    .poll_frame      = poll_frame,
-                                    .config_props    = config_props, },
-                                  { .name = NULL}},
+    .inputs    = NULL,
+    .outputs   = avfilter_asrc_abuffer_outputs,
 };