]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/af_rubberband.c
avfilter/avf_avectorscope: make use of av_rescale
[ffmpeg] / libavfilter / af_rubberband.c
index ea6f4ff2c9c85bdcc69b77a6060f5944e87af851..ef937f44157840cf1a8c57f359dcd61e70714b0a 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "audio.h"
 #include "avfilter.h"
+#include "filters.h"
 #include "formats.h"
 #include "internal.h"
 
@@ -36,7 +37,8 @@ typedef struct RubberBandContext {
         smoothing, formant, opitch, channels;
     int64_t nb_samples_out;
     int64_t nb_samples_in;
-    int flushed;
+    int64_t first_pts;
+    int nb_samples;
 } RubberBandContext;
 
 #define OFFSET(x) offsetof(RubberBandContext, x)
@@ -123,7 +125,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     AVFrame *out;
     int ret = 0, nb_samples;
 
-    rubberband_process(s->rbs, (const float *const *)in->data, in->nb_samples, 0);
+    if (s->first_pts == AV_NOPTS_VALUE)
+        s->first_pts = in->pts;
+
+    rubberband_process(s->rbs, (const float *const *)in->data, in->nb_samples, ff_outlink_get_status(inlink));
     s->nb_samples_in += in->nb_samples;
 
     nb_samples = rubberband_available(s->rbs);
@@ -133,7 +138,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
             av_frame_free(&in);
             return AVERROR(ENOMEM);
         }
-        out->pts = av_rescale_q(s->nb_samples_out,
+        out->pts = s->first_pts + av_rescale_q(s->nb_samples_out,
                      (AVRational){ 1, outlink->sample_rate },
                      outlink->time_base);
         nb_samples = rubberband_retrieve(s->rbs, (float *const *)out->data, nb_samples);
@@ -143,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     }
 
     av_frame_free(&in);
-    return ret;
+    return ret  < 0 ? ret : nb_samples;
 }
 
 static int config_input(AVFilterLink *inlink)
@@ -157,54 +162,38 @@ static int config_input(AVFilterLink *inlink)
     if (s->rbs)
         rubberband_delete(s->rbs);
     s->rbs = rubberband_new(inlink->sample_rate, inlink->channels, opts, 1. / s->tempo, s->pitch);
+    if (!s->rbs)
+        return AVERROR(ENOMEM);
 
-    inlink->partial_buf_size =
-    inlink->min_samples =
-    inlink->max_samples = rubberband_get_samples_required(s->rbs);
+    s->nb_samples = rubberband_get_samples_required(s->rbs);
+    s->first_pts = AV_NOPTS_VALUE;
 
     return 0;
 }
 
-static int request_frame(AVFilterLink *outlink)
+static int activate(AVFilterContext *ctx)
 {
-    AVFilterContext *ctx = outlink->src;
-    RubberBandContext *s = ctx->priv;
     AVFilterLink *inlink = ctx->inputs[0];
-    int ret = 0;
-
-    ret = ff_request_frame(ctx->inputs[0]);
-
-    if (ret == AVERROR_EOF && !s->flushed) {
-        if (rubberband_available(s->rbs) > 0) {
-            AVFrame *out = ff_get_audio_buffer(inlink, 1);
-            int nb_samples;
-
-            if (!out)
-                return AVERROR(ENOMEM);
-
-            rubberband_process(s->rbs, (const float *const *)out->data, 1, 1);
-            av_frame_free(&out);
-            nb_samples = rubberband_available(s->rbs);
-
-            if (nb_samples > 0) {
-                out = ff_get_audio_buffer(outlink, nb_samples);
-                if (!out)
-                    return AVERROR(ENOMEM);
-                out->pts = av_rescale_q(s->nb_samples_out,
-                             (AVRational){ 1, outlink->sample_rate },
-                             outlink->time_base);
-                nb_samples = rubberband_retrieve(s->rbs, (float *const *)out->data, nb_samples);
-                out->nb_samples = nb_samples;
-                ret = ff_filter_frame(outlink, out);
-                s->nb_samples_out += nb_samples;
-            }
-        }
-        s->flushed = 1;
-        av_log(ctx, AV_LOG_DEBUG, "nb_samples_in %"PRId64" nb_samples_out %"PRId64"\n",
-                                   s->nb_samples_in, s->nb_samples_out);
+    AVFilterLink *outlink = ctx->outputs[0];
+    RubberBandContext *s = ctx->priv;
+    AVFrame *in = NULL;
+    int ret;
+
+    FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+    ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
+    if (ret < 0)
+        return ret;
+    if (ret > 0) {
+        ret = filter_frame(inlink, in);
+        if (ret != 0)
+            return ret;
     }
 
-    return ret;
+    FF_FILTER_FORWARD_STATUS(inlink, outlink);
+    FF_FILTER_FORWARD_WANTED(outlink, inlink);
+
+    return FFERROR_NOT_READY;
 }
 
 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
@@ -244,7 +233,6 @@ static const AVFilterPad rubberband_inputs[] = {
         .name          = "default",
         .type          = AVMEDIA_TYPE_AUDIO,
         .config_props  = config_input,
-        .filter_frame  = filter_frame,
     },
     { NULL }
 };
@@ -253,7 +241,6 @@ static const AVFilterPad rubberband_outputs[] = {
     {
         .name          = "default",
         .type          = AVMEDIA_TYPE_AUDIO,
-        .request_frame = request_frame,
     },
     { NULL }
 };
@@ -265,6 +252,7 @@ AVFilter ff_af_rubberband = {
     .priv_size     = sizeof(RubberBandContext),
     .priv_class    = &rubberband_class,
     .uninit        = uninit,
+    .activate      = activate,
     .inputs        = rubberband_inputs,
     .outputs       = rubberband_outputs,
     .process_command = process_command,