#include "libavutil/avassert.h"
#include "libavutil/opt.h"
-#define FF_BUFQUEUE_SIZE 302
+#define MIN_FILTER_SIZE 3
+#define MAX_FILTER_SIZE 301
+
+#define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1)
#include "libavfilter/bufferqueue.h"
#include "audio.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
+typedef struct local_gain {
+ double max_gain;
+ double threshold;
+} local_gain;
+
typedef struct cqueue {
double *elements;
int size;
+ int max_size;
int nb_elements;
- int first;
} cqueue;
typedef struct DynamicAudioNormalizerContext {
double max_amplification;
double target_rms;
double compress_factor;
+ double threshold;
double *prev_amplification_factor;
double *dc_correction_value;
double *compress_threshold;
- double *fade_factors[2];
double *weights;
int channels;
- int delay;
+ int eof;
+ int64_t pts;
cqueue **gain_history_original;
cqueue **gain_history_minimum;
cqueue **gain_history_smoothed;
+ cqueue **threshold_history;
+
+ cqueue *is_enabled;
} DynamicAudioNormalizerContext;
#define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
-#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption dynaudnorm_options[] = {
- { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
- { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
- { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
- { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
- { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
- { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
- { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
- { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
- { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
+ { "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
+ { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
+ { "gausssize", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
+ { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
+ { "peak", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
+ { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
+ { "maxgain", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
+ { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
+ { "targetrms", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
+ { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
+ { "coupling", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "correctdc", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "altboundary", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "compress", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
+ { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
+ { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
+ { "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
{ NULL }
};
DynamicAudioNormalizerContext *s = ctx->priv;
if (!(s->filter_size & 1)) {
- av_log(ctx, AV_LOG_ERROR, "filter size %d is invalid. Must be an odd value.\n", s->filter_size);
- return AVERROR(EINVAL);
+ av_log(ctx, AV_LOG_WARNING, "filter size %d is invalid. Changing to an odd value.\n", s->filter_size);
+ s->filter_size |= 1;
}
return 0;
return frame_size + (frame_size % 2);
}
-static void precalculate_fade_factors(double *fade_factors[2], int frame_len)
-{
- const double step_size = 1.0 / frame_len;
- int pos;
-
- for (pos = 0; pos < frame_len; pos++) {
- fade_factors[0][pos] = 1.0 - (step_size * (pos + 1.0));
- fade_factors[1][pos] = 1.0 - fade_factors[0][pos];
- }
-}
-
-static cqueue *cqueue_create(int size)
+static cqueue *cqueue_create(int size, int max_size)
{
cqueue *q;
+ if (max_size < size)
+ return NULL;
+
q = av_malloc(sizeof(cqueue));
if (!q)
return NULL;
+ q->max_size = max_size;
q->size = size;
q->nb_elements = 0;
- q->first = 0;
- q->elements = av_malloc_array(size, sizeof(double));
+ q->elements = av_malloc_array(max_size, sizeof(double));
if (!q->elements) {
av_free(q);
return NULL;
static int cqueue_empty(cqueue *q)
{
- return !q->nb_elements;
+ return q->nb_elements <= 0;
}
static int cqueue_enqueue(cqueue *q, double element)
{
- int i;
-
- av_assert2(q->nb_elements != q->size);
+ av_assert2(q->nb_elements < q->max_size);
- i = (q->first + q->nb_elements) % q->size;
- q->elements[i] = element;
+ q->elements[q->nb_elements] = element;
q->nb_elements++;
return 0;
static double cqueue_peek(cqueue *q, int index)
{
av_assert2(index < q->nb_elements);
- return q->elements[(q->first + index) % q->size];
+ return q->elements[index];
}
static int cqueue_dequeue(cqueue *q, double *element)
{
av_assert2(!cqueue_empty(q));
- *element = q->elements[q->first];
- q->first = (q->first + 1) % q->size;
+ *element = q->elements[0];
+ memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
q->nb_elements--;
return 0;
{
av_assert2(!cqueue_empty(q));
- q->first = (q->first + 1) % q->size;
+ memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
q->nb_elements--;
return 0;
}
+static void cqueue_resize(cqueue *q, int new_size)
+{
+ av_assert2(q->max_size >= new_size);
+ av_assert2(MIN_FILTER_SIZE <= new_size);
+
+ if (new_size > q->nb_elements) {
+ const int side = (new_size - q->nb_elements) / 2;
+
+ memmove(q->elements + side, q->elements, sizeof(double) * q->nb_elements);
+ for (int i = 0; i < side; i++)
+ q->elements[i] = q->elements[side];
+ q->nb_elements = new_size - 1 - side;
+ } else {
+ int count = (q->size - new_size + 1) / 2;
+
+ while (count-- > 0)
+ cqueue_pop(q);
+ }
+
+ q->size = new_size;
+}
+
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
{
double total_weight = 0.0;
av_freep(&s->prev_amplification_factor);
av_freep(&s->dc_correction_value);
av_freep(&s->compress_threshold);
- av_freep(&s->fade_factors[0]);
- av_freep(&s->fade_factors[1]);
for (c = 0; c < s->channels; c++) {
if (s->gain_history_original)
cqueue_free(s->gain_history_minimum[c]);
if (s->gain_history_smoothed)
cqueue_free(s->gain_history_smoothed[c]);
+ if (s->threshold_history)
+ cqueue_free(s->threshold_history[c]);
}
av_freep(&s->gain_history_original);
av_freep(&s->gain_history_minimum);
av_freep(&s->gain_history_smoothed);
+ av_freep(&s->threshold_history);
+
+ cqueue_free(s->is_enabled);
+ s->is_enabled = NULL;
av_freep(&s->weights);
uninit(ctx);
- s->frame_len =
- inlink->min_samples =
- inlink->max_samples =
- inlink->partial_buf_size = frame_size(inlink->sample_rate, s->frame_len_msec);
+ s->channels = inlink->channels;
+ s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
- s->fade_factors[0] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[0]));
- s->fade_factors[1] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[1]));
-
s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
- s->weights = av_malloc_array(s->filter_size, sizeof(*s->weights));
+ s->threshold_history = av_calloc(inlink->channels, sizeof(*s->threshold_history));
+ s->weights = av_malloc_array(MAX_FILTER_SIZE, sizeof(*s->weights));
+ s->is_enabled = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
if (!s->prev_amplification_factor || !s->dc_correction_value ||
- !s->compress_threshold || !s->fade_factors[0] || !s->fade_factors[1] ||
+ !s->compress_threshold ||
!s->gain_history_original || !s->gain_history_minimum ||
- !s->gain_history_smoothed || !s->weights)
+ !s->gain_history_smoothed || !s->threshold_history ||
+ !s->is_enabled || !s->weights)
return AVERROR(ENOMEM);
for (c = 0; c < inlink->channels; c++) {
s->prev_amplification_factor[c] = 1.0;
- s->gain_history_original[c] = cqueue_create(s->filter_size);
- s->gain_history_minimum[c] = cqueue_create(s->filter_size);
- s->gain_history_smoothed[c] = cqueue_create(s->filter_size);
+ s->gain_history_original[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
+ s->gain_history_minimum[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
+ s->gain_history_smoothed[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
+ s->threshold_history[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
- !s->gain_history_smoothed[c])
+ !s->gain_history_smoothed[c] || !s->threshold_history[c])
return AVERROR(ENOMEM);
}
- precalculate_fade_factors(s->fade_factors, s->frame_len);
init_gaussian_filter(s);
- s->channels = inlink->channels;
- s->delay = s->filter_size;
-
return 0;
}
-static inline double fade(double prev, double next, int pos,
- double *fade_factors[2])
+static inline double fade(double prev, double next, int pos, int length)
{
- return fade_factors[0][pos] * prev + fade_factors[1][pos] * next;
+ const double step_size = 1.0 / length;
+ const double f0 = 1.0 - (step_size * (pos + 1.0));
+ const double f1 = 1.0 - f0;
+ return f0 * prev + f1 * next;
}
static inline double pow_2(const double value)
return FFMAX(sqrt(rms_value), DBL_EPSILON);
}
-static double get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame,
- int channel)
+static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame,
+ int channel)
{
- const double maximum_gain = s->peak_value / find_peak_magnitude(frame, channel);
+ const double peak_magnitude = find_peak_magnitude(frame, channel);
+ const double maximum_gain = s->peak_value / peak_magnitude;
const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
- return bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
+ local_gain gain;
+
+ gain.threshold = peak_magnitude > s->threshold;
+ gain.max_gain = bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
+
+ return gain;
}
static double minimum_filter(cqueue *q)
return min;
}
-static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q)
+static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq)
{
- double result = 0.0;
+ double result = 0.0, tsum = 0.0;
int i;
for (i = 0; i < cqueue_size(q); i++) {
- result += cqueue_peek(q, i) * s->weights[i];
+ tsum += cqueue_peek(tq, i) * s->weights[i];
+ result += cqueue_peek(q, i) * s->weights[i] * cqueue_peek(tq, i);
}
+ if (tsum == 0.0)
+ result = 1.0;
+
return result;
}
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel,
- double current_gain_factor)
+ local_gain gain)
{
- if (cqueue_empty(s->gain_history_original[channel]) ||
- cqueue_empty(s->gain_history_minimum[channel])) {
+ if (cqueue_empty(s->gain_history_original[channel])) {
const int pre_fill_size = s->filter_size / 2;
- const double initial_value = s->alt_boundary_mode ? current_gain_factor : 1.0;
+ const double initial_value = s->alt_boundary_mode ? gain.max_gain : s->peak_value;
s->prev_amplification_factor[channel] = initial_value;
while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
cqueue_enqueue(s->gain_history_original[channel], initial_value);
+ cqueue_enqueue(s->threshold_history[channel], gain.threshold);
}
}
- cqueue_enqueue(s->gain_history_original[channel], current_gain_factor);
+ cqueue_enqueue(s->gain_history_original[channel], gain.max_gain);
while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
double minimum;
- av_assert0(cqueue_size(s->gain_history_original[channel]) == s->filter_size);
if (cqueue_empty(s->gain_history_minimum[channel])) {
const int pre_fill_size = s->filter_size / 2;
cqueue_enqueue(s->gain_history_minimum[channel], minimum);
+ cqueue_enqueue(s->threshold_history[channel], gain.threshold);
+
cqueue_pop(s->gain_history_original[channel]);
}
while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
- double smoothed;
- av_assert0(cqueue_size(s->gain_history_minimum[channel]) == s->filter_size);
- smoothed = gaussian_filter(s, s->gain_history_minimum[channel]);
+ double smoothed, limit;
+
+ smoothed = gaussian_filter(s, s->gain_history_minimum[channel], s->threshold_history[channel]);
+ limit = cqueue_peek(s->gain_history_original[channel], 0);
+ smoothed = FFMIN(smoothed, limit);
cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
cqueue_pop(s->gain_history_minimum[channel]);
+ cqueue_pop(s->threshold_history[channel]);
}
}
s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
for (i = 0; i < frame->nb_samples; i++) {
- dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, s->fade_factors);
+ dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples);
}
}
}
for (c = 0; c < s->channels; c++) {
double *const dst_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
- const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
+ const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
}
}
dst_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
- const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
+ const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
}
}
}
if (s->channels_coupled) {
- const double current_gain_factor = get_max_local_gain(s, frame, -1);
+ const local_gain gain = get_max_local_gain(s, frame, -1);
int c;
for (c = 0; c < s->channels; c++)
- update_gain_history(s, c, current_gain_factor);
+ update_gain_history(s, c, gain);
} else {
int c;
}
}
-static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
+static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame, int enabled)
{
int c, i;
cqueue_dequeue(s->gain_history_smoothed[c], ¤t_amplification_factor);
- for (i = 0; i < frame->nb_samples; i++) {
+ for (i = 0; i < frame->nb_samples && enabled; i++) {
const double amplification_factor = fade(s->prev_amplification_factor[c],
current_amplification_factor, i,
- s->fade_factors);
+ frame->nb_samples);
dst_ptr[i] *= amplification_factor;
-
- if (fabs(dst_ptr[i]) > s->peak_value)
- dst_ptr[i] = copysign(s->peak_value, dst_ptr[i]);
}
s->prev_amplification_factor[c] = current_amplification_factor;
{
AVFilterContext *ctx = inlink->dst;
DynamicAudioNormalizerContext *s = ctx->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
- int ret = 0;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int ret = 1;
- if (!cqueue_empty(s->gain_history_smoothed[0])) {
+ while (((s->queue.available >= s->filter_size) ||
+ (s->eof && s->queue.available)) &&
+ !cqueue_empty(s->gain_history_smoothed[0])) {
AVFrame *out = ff_bufqueue_get(&s->queue);
+ double is_enabled;
+
+ cqueue_dequeue(s->is_enabled, &is_enabled);
- amplify_frame(s, out);
+ amplify_frame(s, out, is_enabled > 0.);
+ s->pts = out->pts + out->nb_samples;
ret = ff_filter_frame(outlink, out);
}
+ av_frame_make_writable(in);
analyze_frame(s, in);
- ff_bufqueue_add(ctx, &s->queue, in);
+ if (!s->eof) {
+ ff_bufqueue_add(ctx, &s->queue, in);
+ cqueue_enqueue(s->is_enabled, !ctx->is_disabled);
+ } else {
+ av_frame_free(&in);
+ }
return ret;
}
}
}
- s->delay--;
return filter_frame(inlink, out);
}
-static int request_frame(AVFilterLink *outlink)
+static int flush(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
DynamicAudioNormalizerContext *s = ctx->priv;
int ret = 0;
- ret = ff_request_frame(ctx->inputs[0]);
+ if (!cqueue_empty(s->gain_history_smoothed[0])) {
+ ret = flush_buffer(s, ctx->inputs[0], outlink);
+ } else if (s->queue.available) {
+ AVFrame *out = ff_bufqueue_get(&s->queue);
+
+ s->pts = out->pts + out->nb_samples;
+ ret = ff_filter_frame(outlink, out);
+ }
+
+ return ret;
+}
- if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay) {
- if (!cqueue_empty(s->gain_history_smoothed[0])) {
- ret = flush_buffer(s, ctx->inputs[0], outlink);
- } else if (s->queue.available) {
- AVFrame *out = ff_bufqueue_get(&s->queue);
+static int activate(AVFilterContext *ctx)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ DynamicAudioNormalizerContext *s = ctx->priv;
+ AVFrame *in = NULL;
+ int ret = 0, status;
+ int64_t pts;
+
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ if (!s->eof) {
+ ret = ff_inlink_consume_samples(inlink, s->frame_len, s->frame_len, &in);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ ret = filter_frame(inlink, in);
+ if (ret <= 0)
+ return ret;
+ }
- ret = ff_filter_frame(outlink, out);
+ if (ff_inlink_check_available_samples(inlink, s->frame_len) > 0) {
+ ff_filter_set_ready(ctx, 10);
+ return 0;
}
}
- return ret;
+ if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ if (status == AVERROR_EOF)
+ s->eof = 1;
+ }
+
+ if (s->eof && s->queue.available)
+ return flush(outlink);
+
+ if (s->eof && !s->queue.available) {
+ ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
+ return 0;
+ }
+
+ if (!s->eof)
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+
+ return FFERROR_NOT_READY;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ DynamicAudioNormalizerContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int prev_filter_size = s->filter_size;
+ int ret;
+
+ ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
+ if (ret < 0)
+ return ret;
+
+ s->filter_size |= 1;
+ if (prev_filter_size != s->filter_size) {
+ init_gaussian_filter(s);
+
+ for (int c = 0; c < s->channels; c++) {
+ cqueue_resize(s->gain_history_original[c], s->filter_size);
+ cqueue_resize(s->gain_history_minimum[c], s->filter_size);
+ cqueue_resize(s->threshold_history[c], s->filter_size);
+ }
+ }
+
+ s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
+
+ return 0;
}
static const AVFilterPad avfilter_af_dynaudnorm_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
.config_props = config_input,
- .needs_writable = 1,
},
{ NULL }
};
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .request_frame = request_frame,
},
{ NULL }
};
-AVFilter ff_af_dynaudnorm = {
+const AVFilter ff_af_dynaudnorm = {
.name = "dynaudnorm",
.description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
.query_formats = query_formats,
.priv_size = sizeof(DynamicAudioNormalizerContext),
.init = init,
.uninit = uninit,
+ .activate = activate,
.inputs = avfilter_af_dynaudnorm_inputs,
.outputs = avfilter_af_dynaudnorm_outputs,
.priv_class = &dynaudnorm_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+ .process_command = process_command,
};