]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/buffersink.c
avpacket: fix setting AVPacket.data in av_packet_ref()
[ffmpeg] / libavfilter / buffersink.c
index 6f7529172cda410487545ccb06f8e4d08345720b..3b4d285ffd6e90ef75a17e0a5200b694ec8e99d6 100644 (file)
@@ -27,6 +27,7 @@
 #include "libavutil/avassert.h"
 #include "libavutil/channel_layout.h"
 #include "libavutil/common.h"
+#include "libavutil/internal.h"
 #include "libavutil/mathematics.h"
 
 #include "audio.h"
@@ -34,9 +35,9 @@
 #include "buffersink.h"
 #include "internal.h"
 
-typedef struct {
+typedef struct BufferSinkContext {
     AVFrame *cur_frame;          ///< last frame delivered on the sink
-    AVAudioFifo  *audio_fifo;    ///< FIFO for audio samples
+    AVAudioFifo *audio_fifo;     ///< FIFO for audio samples
     int64_t next_pts;            ///< interpolating audio pts
 } BufferSinkContext;
 
@@ -58,7 +59,8 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
     return 0;
 }
 
-int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
+int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx,
+                                                AVFrame *frame)
 {
     BufferSinkContext *s    = ctx->priv;
     AVFilterLink      *link = ctx->inputs[0];
@@ -95,10 +97,10 @@ static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
     av_frame_free(&tmp);
 
     return 0;
-
 }
 
-int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
+int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
+                                                  AVFrame *frame, int nb_samples)
 {
     BufferSinkContext *s = ctx->priv;
     AVFilterLink   *link = ctx->inputs[0];
@@ -133,116 +135,44 @@ int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_sampl
     }
 
     return ret;
-
-}
-
-#if FF_API_AVFILTERBUFFER
-static void compat_free_buffer(AVFilterBuffer *buf)
-{
-    AVFrame *frame = buf->priv;
-    av_frame_free(&frame);
-    av_free(buf);
-}
-
-static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples)
-{
-    AVFilterBufferRef *buf;
-    AVFrame *frame;
-    int ret;
-
-    if (!pbuf)
-        return ff_poll_frame(ctx->inputs[0]);
-
-    frame = av_frame_alloc();
-    if (!frame)
-        return AVERROR(ENOMEM);
-
-    if (!nb_samples)
-        ret = av_buffersink_get_frame(ctx, frame);
-    else
-        ret = av_buffersink_get_samples(ctx, frame, nb_samples);
-
-    if (ret < 0)
-        goto fail;
-
-    if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
-        buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
-                                                        AV_PERM_READ,
-                                                        frame->width, frame->height,
-                                                        frame->format);
-    } else {
-        buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
-                                                        frame->linesize[0], AV_PERM_READ,
-                                                        frame->nb_samples,
-                                                        frame->format,
-                                                        frame->channel_layout);
-    }
-    if (!buf) {
-        ret = AVERROR(ENOMEM);
-        goto fail;
-    }
-
-    avfilter_copy_frame_props(buf, frame);
-
-    buf->buf->priv = frame;
-    buf->buf->free = compat_free_buffer;
-
-    *pbuf = buf;
-
-    return 0;
-fail:
-    av_frame_free(&frame);
-    return ret;
 }
 
-int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
-{
-    return compat_read(ctx, buf, 0);
-}
-
-int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
-                               int nb_samples)
-{
-    return compat_read(ctx, buf, nb_samples);
-}
-#endif
-
 static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
     {
-        .name        = "default",
-        .type        = AVMEDIA_TYPE_VIDEO,
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
         .filter_frame = filter_frame,
-        .needs_fifo  = 1
+        .needs_fifo   = 1
     },
     { NULL }
 };
 
-AVFilter avfilter_vsink_buffer = {
-    .name      = "buffersink",
+AVFilter ff_vsink_buffer = {
+    .name        = "buffersink",
     .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
-    .priv_size = sizeof(BufferSinkContext),
-    .uninit    = uninit,
+    .priv_size   = sizeof(BufferSinkContext),
+    .uninit      = uninit,
 
-    .inputs    = avfilter_vsink_buffer_inputs,
-    .outputs   = NULL,
+    .inputs      = avfilter_vsink_buffer_inputs,
+    .outputs     = NULL,
 };
 
 static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
     {
-        .name           = "default",
-        .type           = AVMEDIA_TYPE_AUDIO,
-        .filter_frame   = filter_frame,
-        .needs_fifo     = 1
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_AUDIO,
+        .filter_frame = filter_frame,
+        .needs_fifo   = 1
     },
     { NULL }
 };
 
-AVFilter avfilter_asink_abuffer = {
-    .name      = "abuffersink",
+AVFilter ff_asink_abuffer = {
+    .name        = "abuffersink",
     .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
-    .priv_size = sizeof(BufferSinkContext),
-    .uninit    = uninit,
+    .priv_size   = sizeof(BufferSinkContext),
+    .uninit      = uninit,
 
-    .inputs    = avfilter_asink_abuffer_inputs,
-    .outputs   = NULL,
+    .inputs      = avfilter_asink_abuffer_inputs,
+    .outputs     = NULL,
 };