]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/avf_showwaves.c
Merge remote-tracking branch 'cus/stable'
[ffmpeg] / libavfilter / avf_showwaves.c
index 2e3bfba458d867da9423c57836364e9d99063333..765e998694b95922666fd46a404e3616d8321b2d 100644 (file)
@@ -23,7 +23,7 @@
  * audio to video multimedia filter
  */
 
-#include "libavutil/audioconvert.h"
+#include "libavutil/channel_layout.h"
 #include "libavutil/opt.h"
 #include "libavutil/parseutils.h"
 #include "avfilter.h"
@@ -87,8 +87,8 @@ static int query_formats(AVFilterContext *ctx)
     AVFilterChannelLayouts *layouts = NULL;
     AVFilterLink *inlink = ctx->inputs[0];
     AVFilterLink *outlink = ctx->outputs[0];
-    static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
-    static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, -1 };
+    static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
+    static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
 
     /* set input audio formats */
     formats = ff_make_format_list(sample_fmts);
@@ -179,7 +179,7 @@ static int request_frame(AVFilterLink *outlink)
 
 #define MAX_INT16 ((1<<15) -1)
 
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 {
     AVFilterContext *ctx = inlink->dst;
     AVFilterLink *outlink = ctx->outputs[0];
@@ -195,10 +195,12 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 
     /* draw data in the buffer */
     for (i = 0; i < nb_samples; i++) {
-        if (showwaves->buf_idx == 0 && showwaves->sample_count_mod == 0) {
+        if (!outpicref) {
             showwaves->outpicref = outpicref =
                 ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN,
                                     outlink->w, outlink->h);
+            if (!outpicref)
+                return AVERROR(ENOMEM);
             outpicref->video->w = outlink->w;
             outpicref->video->h = outlink->h;
             outpicref->pts = insamples->pts +
@@ -226,6 +228,26 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
     return 0;
 }
 
+static const AVFilterPad showwaves_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_AUDIO,
+        .filter_frame = filter_frame,
+        .min_perms    = AV_PERM_READ,
+    },
+    { NULL }
+};
+
+static const AVFilterPad showwaves_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_output,
+        .request_frame = request_frame,
+    },
+    { NULL }
+};
+
 AVFilter avfilter_avf_showwaves = {
     .name           = "showwaves",
     .description    = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
@@ -233,26 +255,7 @@ AVFilter avfilter_avf_showwaves = {
     .uninit         = uninit,
     .query_formats  = query_formats,
     .priv_size      = sizeof(ShowWavesContext),
-
-    .inputs  = (const AVFilterPad[]) {
-        {
-            .name           = "default",
-            .type           = AVMEDIA_TYPE_AUDIO,
-            .filter_samples = filter_samples,
-            .min_perms      = AV_PERM_READ,
-        },
-        { .name = NULL }
-    },
-
-    .outputs = (const AVFilterPad[]) {
-        {
-            .name           = "default",
-            .type           = AVMEDIA_TYPE_VIDEO,
-            .config_props   = config_output,
-            .request_frame  = request_frame,
-        },
-        { .name = NULL }
-    },
-
-    .priv_class = &showwaves_class,
+    .inputs         = showwaves_inputs,
+    .outputs        = showwaves_outputs,
+    .priv_class     = &showwaves_class,
 };