2 * Copyright (c) 2020 Paul B Mahol
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
33 #define FF_BUFQUEUE_SIZE (1024)
34 #include "bufferqueue.h"
41 #define MAX_ITEMS 882000
42 #define MIN_PEAK (1. / 32768.)
44 typedef struct PeriodItem {
50 typedef struct ChannelContext {
53 PeriodItem pi[MAX_ITEMS];
61 typedef struct SpeechNormalizerContext {
66 double max_compression;
67 double threshold_value;
81 struct FFBufQueue queue;
83 void (*analyze_channel)(AVFilterContext *ctx, ChannelContext *cc,
84 const uint8_t *srcp, int nb_samples);
85 void (*filter_channels[2])(AVFilterContext *ctx,
86 AVFrame *in, int nb_samples);
87 } SpeechNormalizerContext;
89 #define OFFSET(x) offsetof(SpeechNormalizerContext, x)
90 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
92 static const AVOption speechnorm_options[] = {
93 { "peak", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl=0.95}, 0.0, 1.0, FLAGS },
94 { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl=0.95}, 0.0, 1.0, FLAGS },
95 { "expansion", "set the max expansion factor", OFFSET(max_expansion), AV_OPT_TYPE_DOUBLE, {.dbl=2.0}, 1.0, 50.0, FLAGS },
96 { "e", "set the max expansion factor", OFFSET(max_expansion), AV_OPT_TYPE_DOUBLE, {.dbl=2.0}, 1.0, 50.0, FLAGS },
97 { "compression", "set the max compression factor", OFFSET(max_compression), AV_OPT_TYPE_DOUBLE, {.dbl=2.0}, 1.0, 50.0, FLAGS },
98 { "c", "set the max compression factor", OFFSET(max_compression), AV_OPT_TYPE_DOUBLE, {.dbl=2.0}, 1.0, 50.0, FLAGS },
99 { "threshold", "set the threshold value", OFFSET(threshold_value), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0.0, 1.0, FLAGS },
100 { "t", "set the threshold value", OFFSET(threshold_value), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0.0, 1.0, FLAGS },
101 { "raise", "set the expansion raising amount", OFFSET(raise_amount), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0.0, 1.0, FLAGS },
102 { "r", "set the expansion raising amount", OFFSET(raise_amount), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0.0, 1.0, FLAGS },
103 { "fall", "set the compression raising amount", OFFSET(fall_amount), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0.0, 1.0, FLAGS },
104 { "f", "set the compression raising amount", OFFSET(fall_amount), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0.0, 1.0, FLAGS },
105 { "channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS },
106 { "h", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS },
107 { "invert", "set inverted filtering", OFFSET(invert), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
108 { "i", "set inverted filtering", OFFSET(invert), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
109 { "link", "set linked channels filtering", OFFSET(link), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
110 { "l", "set linked channels filtering", OFFSET(link), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
114 AVFILTER_DEFINE_CLASS(speechnorm);
116 static int query_formats(AVFilterContext *ctx)
118 AVFilterFormats *formats;
119 AVFilterChannelLayouts *layouts;
120 static const enum AVSampleFormat sample_fmts[] = {
121 AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
126 layouts = ff_all_channel_counts();
128 return AVERROR(ENOMEM);
129 ret = ff_set_common_channel_layouts(ctx, layouts);
133 formats = ff_make_format_list(sample_fmts);
135 return AVERROR(ENOMEM);
136 ret = ff_set_common_formats(ctx, formats);
140 formats = ff_all_samplerates();
142 return AVERROR(ENOMEM);
143 return ff_set_common_samplerates(ctx, formats);
146 static int get_pi_samples(PeriodItem *pi, int start, int end, int remain)
150 if (pi[start].type == 0)
154 while (start != end) {
156 if (start >= MAX_ITEMS)
158 if (pi[start].type == 0)
160 av_assert0(pi[start].size > 0);
161 sum += pi[start].size;
167 static int available_samples(AVFilterContext *ctx)
169 SpeechNormalizerContext *s = ctx->priv;
170 AVFilterLink *inlink = ctx->inputs[0];
171 int min_pi_nb_samples;
173 min_pi_nb_samples = get_pi_samples(s->cc[0].pi, s->cc[0].pi_start, s->cc[0].pi_end, s->cc[0].pi_size);
174 for (int ch = 1; ch < inlink->channels && min_pi_nb_samples > 0; ch++) {
175 ChannelContext *cc = &s->cc[ch];
177 min_pi_nb_samples = FFMIN(min_pi_nb_samples, get_pi_samples(cc->pi, cc->pi_start, cc->pi_end, cc->pi_size));
180 return min_pi_nb_samples;
183 static void consume_pi(ChannelContext *cc, int nb_samples)
185 if (cc->pi_size >= nb_samples) {
186 cc->pi_size -= nb_samples;
192 static double next_gain(AVFilterContext *ctx, double pi_max_peak, int bypass, double state)
194 SpeechNormalizerContext *s = ctx->priv;
195 const double expansion = FFMIN(s->max_expansion, s->peak_value / pi_max_peak);
196 const double compression = 1. / s->max_compression;
197 const int type = s->invert ? pi_max_peak <= s->threshold_value : pi_max_peak >= s->threshold_value;
202 return FFMIN(expansion, state + s->raise_amount);
204 return FFMIN(expansion, FFMAX(compression, state - s->fall_amount));
208 static void next_pi(AVFilterContext *ctx, ChannelContext *cc, int bypass)
210 av_assert0(cc->pi_size >= 0);
211 if (cc->pi_size == 0) {
212 SpeechNormalizerContext *s = ctx->priv;
213 int start = cc->pi_start;
215 av_assert0(cc->pi[start].size > 0);
216 av_assert0(cc->pi[start].type > 0 || s->eof);
217 cc->pi_size = cc->pi[start].size;
218 cc->pi_max_peak = cc->pi[start].max_peak;
219 av_assert0(cc->pi_start != cc->pi_end || s->eof);
221 if (start >= MAX_ITEMS)
223 cc->pi_start = start;
224 cc->gain_state = next_gain(ctx, cc->pi_max_peak, bypass, cc->gain_state);
228 static double min_gain(AVFilterContext *ctx, ChannelContext *cc, int max_size)
230 SpeechNormalizerContext *s = ctx->priv;
231 double min_gain = s->max_expansion;
232 double gain_state = cc->gain_state;
233 int size = cc->pi_size;
234 int idx = cc->pi_start;
236 min_gain = FFMIN(min_gain, gain_state);
237 while (size <= max_size) {
238 if (idx == cc->pi_end)
240 gain_state = next_gain(ctx, cc->pi[idx].max_peak, 0, gain_state);
241 min_gain = FFMIN(min_gain, gain_state);
242 size += cc->pi[idx].size;
244 if (idx >= MAX_ITEMS)
251 #define ANALYZE_CHANNEL(name, ptype, zero) \
252 static void analyze_channel_## name (AVFilterContext *ctx, ChannelContext *cc, \
253 const uint8_t *srcp, int nb_samples) \
255 SpeechNormalizerContext *s = ctx->priv; \
256 const ptype *src = (const ptype *)srcp; \
260 cc->state = src[0] >= zero; \
262 while (n < nb_samples) { \
263 if ((cc->state != (src[n] >= zero)) || \
264 (cc->pi[cc->pi_end].size > s->max_period)) { \
265 double max_peak = cc->pi[cc->pi_end].max_peak; \
266 int state = cc->state; \
267 cc->state = src[n] >= zero; \
268 av_assert0(cc->pi[cc->pi_end].size > 0); \
269 if (cc->pi[cc->pi_end].max_peak >= MIN_PEAK || \
270 cc->pi[cc->pi_end].size > s->max_period) { \
271 cc->pi[cc->pi_end].type = 1; \
273 if (cc->pi_end >= MAX_ITEMS) \
275 if (cc->state != state) \
276 cc->pi[cc->pi_end].max_peak = DBL_MIN; \
278 cc->pi[cc->pi_end].max_peak = max_peak; \
279 cc->pi[cc->pi_end].type = 0; \
280 cc->pi[cc->pi_end].size = 0; \
281 av_assert0(cc->pi_end != cc->pi_start); \
286 while (src[n] >= zero) { \
287 cc->pi[cc->pi_end].max_peak = FFMAX(cc->pi[cc->pi_end].max_peak, src[n]); \
288 cc->pi[cc->pi_end].size++; \
290 if (n >= nb_samples) \
294 while (src[n] < zero) { \
295 cc->pi[cc->pi_end].max_peak = FFMAX(cc->pi[cc->pi_end].max_peak, -src[n]); \
296 cc->pi[cc->pi_end].size++; \
298 if (n >= nb_samples) \
305 ANALYZE_CHANNEL(dbl, double, 0.0)
306 ANALYZE_CHANNEL(flt, float, 0.f)
308 #define FILTER_CHANNELS(name, ptype) \
309 static void filter_channels_## name (AVFilterContext *ctx, \
310 AVFrame *in, int nb_samples) \
312 SpeechNormalizerContext *s = ctx->priv; \
313 AVFilterLink *inlink = ctx->inputs[0]; \
315 for (int ch = 0; ch < inlink->channels; ch++) { \
316 ChannelContext *cc = &s->cc[ch]; \
317 ptype *dst = (ptype *)in->extended_data[ch]; \
318 const int bypass = !(av_channel_layout_extract_channel(inlink->channel_layout, ch) & s->channels); \
321 while (n < nb_samples) { \
325 next_pi(ctx, cc, bypass); \
326 size = FFMIN(nb_samples - n, cc->pi_size); \
327 av_assert0(size > 0); \
328 gain = cc->gain_state; \
329 consume_pi(cc, size); \
330 for (int i = n; i < n + size; i++) \
337 FILTER_CHANNELS(dbl, double)
338 FILTER_CHANNELS(flt, float)
340 static double lerp(double min, double max, double mix)
342 return min + (max - min) * mix;
345 #define FILTER_LINK_CHANNELS(name, ptype) \
346 static void filter_link_channels_## name (AVFilterContext *ctx, \
347 AVFrame *in, int nb_samples) \
349 SpeechNormalizerContext *s = ctx->priv; \
350 AVFilterLink *inlink = ctx->inputs[0]; \
353 while (n < nb_samples) { \
354 int min_size = nb_samples - n; \
356 ptype gain = s->max_expansion; \
358 for (int ch = 0; ch < inlink->channels; ch++) { \
359 ChannelContext *cc = &s->cc[ch]; \
361 cc->bypass = !(av_channel_layout_extract_channel(inlink->channel_layout, ch) & s->channels); \
363 next_pi(ctx, cc, cc->bypass); \
364 min_size = FFMIN(min_size, cc->pi_size); \
365 max_size = FFMAX(max_size, cc->pi_size); \
368 av_assert0(min_size > 0); \
369 for (int ch = 0; ch < inlink->channels; ch++) { \
370 ChannelContext *cc = &s->cc[ch]; \
374 gain = FFMIN(gain, min_gain(ctx, cc, max_size)); \
377 for (int ch = 0; ch < inlink->channels; ch++) { \
378 ChannelContext *cc = &s->cc[ch]; \
379 ptype *dst = (ptype *)in->extended_data[ch]; \
381 consume_pi(cc, min_size); \
385 for (int i = n; i < n + min_size; i++) { \
386 ptype g = lerp(s->prev_gain, gain, (i - n) / (double)min_size); \
391 s->prev_gain = gain; \
396 FILTER_LINK_CHANNELS(dbl, double)
397 FILTER_LINK_CHANNELS(flt, float)
399 static int filter_frame(AVFilterContext *ctx)
401 SpeechNormalizerContext *s = ctx->priv;
402 AVFilterLink *outlink = ctx->outputs[0];
403 AVFilterLink *inlink = ctx->inputs[0];
406 while (s->queue.available > 0) {
407 int min_pi_nb_samples;
410 in = ff_bufqueue_peek(&s->queue, 0);
414 min_pi_nb_samples = available_samples(ctx);
415 if (min_pi_nb_samples < in->nb_samples && !s->eof)
418 in = ff_bufqueue_get(&s->queue);
420 av_frame_make_writable(in);
422 s->filter_channels[s->link](ctx, in, in->nb_samples);
424 s->pts = in->pts + in->nb_samples;
426 return ff_filter_frame(outlink, in);
429 for (int f = 0; f < ff_inlink_queued_frames(inlink); f++) {
432 ret = ff_inlink_consume_frame(inlink, &in);
438 ff_bufqueue_add(ctx, &s->queue, in);
440 for (int ch = 0; ch < inlink->channels; ch++) {
441 ChannelContext *cc = &s->cc[ch];
443 s->analyze_channel(ctx, cc, in->extended_data[ch], in->nb_samples);
450 static int activate(AVFilterContext *ctx)
452 AVFilterLink *inlink = ctx->inputs[0];
453 AVFilterLink *outlink = ctx->outputs[0];
454 SpeechNormalizerContext *s = ctx->priv;
458 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
460 ret = filter_frame(ctx);
464 if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
465 if (status == AVERROR_EOF)
469 if (s->eof && ff_inlink_queued_samples(inlink) == 0 &&
470 s->queue.available == 0) {
471 ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
475 if (s->queue.available > 0) {
476 AVFrame *in = ff_bufqueue_peek(&s->queue, 0);
477 const int nb_samples = available_samples(ctx);
479 if (nb_samples >= in->nb_samples || s->eof) {
480 ff_filter_set_ready(ctx, 10);
485 FF_FILTER_FORWARD_WANTED(outlink, inlink);
487 return FFERROR_NOT_READY;
490 static int config_input(AVFilterLink *inlink)
492 AVFilterContext *ctx = inlink->dst;
493 SpeechNormalizerContext *s = ctx->priv;
495 s->max_period = inlink->sample_rate / 10;
498 s->cc = av_calloc(inlink->channels, sizeof(*s->cc));
500 return AVERROR(ENOMEM);
502 for (int ch = 0; ch < inlink->channels; ch++) {
503 ChannelContext *cc = &s->cc[ch];
509 switch (inlink->format) {
510 case AV_SAMPLE_FMT_FLTP:
511 s->analyze_channel = analyze_channel_flt;
512 s->filter_channels[0] = filter_channels_flt;
513 s->filter_channels[1] = filter_link_channels_flt;
515 case AV_SAMPLE_FMT_DBLP:
516 s->analyze_channel = analyze_channel_dbl;
517 s->filter_channels[0] = filter_channels_dbl;
518 s->filter_channels[1] = filter_link_channels_dbl;
527 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
528 char *res, int res_len, int flags)
530 SpeechNormalizerContext *s = ctx->priv;
534 ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
543 static av_cold void uninit(AVFilterContext *ctx)
545 SpeechNormalizerContext *s = ctx->priv;
547 ff_bufqueue_discard_all(&s->queue);
551 static const AVFilterPad inputs[] = {
554 .type = AVMEDIA_TYPE_AUDIO,
555 .config_props = config_input,
560 static const AVFilterPad outputs[] = {
563 .type = AVMEDIA_TYPE_AUDIO,
568 const AVFilter ff_af_speechnorm = {
569 .name = "speechnorm",
570 .description = NULL_IF_CONFIG_SMALL("Speech Normalizer."),
571 .query_formats = query_formats,
572 .priv_size = sizeof(SpeechNormalizerContext),
573 .priv_class = &speechnorm_class,
574 .activate = activate,
578 .process_command = process_command,