2 * Copyright (c) 1999 Chris Bagwell
3 * Copyright (c) 1999 Nick Bailey
4 * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5 * Copyright (c) 2013 Paul B Mahol
6 * Copyright (c) 2014 Andrew Kelley
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * audio compand filter
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/samplefmt.h"
38 typedef struct ChanParam {
44 typedef struct CompandSegment {
49 typedef struct CompandContext {
52 char *attacks, *decays, *points;
53 CompandSegment *segments;
59 double initial_volume;
67 int (*compand)(AVFilterContext *ctx, AVFrame *frame);
70 #define OFFSET(x) offsetof(CompandContext, x)
71 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
73 static const AVOption compand_options[] = {
74 { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0.3" }, 0, 0, A },
75 { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
76 { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20" }, 0, 0, A },
77 { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
78 { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
79 { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
80 { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
84 AVFILTER_DEFINE_CLASS(compand);
86 static av_cold int init(AVFilterContext *ctx)
88 CompandContext *s = ctx->priv;
89 s->pts = AV_NOPTS_VALUE;
93 static av_cold void uninit(AVFilterContext *ctx)
95 CompandContext *s = ctx->priv;
97 av_freep(&s->channels);
98 av_freep(&s->segments);
99 av_frame_free(&s->delay_frame);
102 static int query_formats(AVFilterContext *ctx)
104 AVFilterChannelLayouts *layouts;
105 AVFilterFormats *formats;
106 static const enum AVSampleFormat sample_fmts[] = {
112 layouts = ff_all_channel_counts();
114 return AVERROR(ENOMEM);
115 ret = ff_set_common_channel_layouts(ctx, layouts);
119 formats = ff_make_format_list(sample_fmts);
121 return AVERROR(ENOMEM);
122 ret = ff_set_common_formats(ctx, formats);
126 formats = ff_all_samplerates();
128 return AVERROR(ENOMEM);
129 return ff_set_common_samplerates(ctx, formats);
132 static void count_items(char *item_str, int *nb_items)
137 for (p = item_str; *p; p++) {
138 if (*p == ' ' || *p == '|')
143 static void update_volume(ChanParam *cp, double in)
145 double delta = in - cp->volume;
148 cp->volume += delta * cp->attack;
150 cp->volume += delta * cp->decay;
153 static double get_volume(CompandContext *s, double in_lin)
156 double in_log, out_log;
159 if (in_lin < s->in_min_lin)
160 return s->out_min_lin;
162 in_log = log(in_lin);
164 for (i = 1; i < s->nb_segments; i++)
165 if (in_log <= s->segments[i].x)
167 cs = &s->segments[i - 1];
169 out_log = cs->y + in_log * (cs->a * in_log + cs->b);
174 static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
176 CompandContext *s = ctx->priv;
177 AVFilterLink *inlink = ctx->inputs[0];
178 const int channels = inlink->channels;
179 const int nb_samples = frame->nb_samples;
184 if (av_frame_is_writable(frame)) {
187 out_frame = ff_get_audio_buffer(inlink, nb_samples);
189 av_frame_free(&frame);
190 return AVERROR(ENOMEM);
192 err = av_frame_copy_props(out_frame, frame);
194 av_frame_free(&out_frame);
195 av_frame_free(&frame);
200 for (chan = 0; chan < channels; chan++) {
201 const double *src = (double *)frame->extended_data[chan];
202 double *dst = (double *)out_frame->extended_data[chan];
203 ChanParam *cp = &s->channels[chan];
205 for (i = 0; i < nb_samples; i++) {
206 update_volume(cp, fabs(src[i]));
208 dst[i] = av_clipd(src[i] * get_volume(s, cp->volume), -1, 1);
212 if (frame != out_frame)
213 av_frame_free(&frame);
215 return ff_filter_frame(ctx->outputs[0], out_frame);
218 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
220 static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
222 CompandContext *s = ctx->priv;
223 AVFilterLink *inlink = ctx->inputs[0];
224 const int channels = inlink->channels;
225 const int nb_samples = frame->nb_samples;
226 int chan, i, av_uninit(dindex), oindex, av_uninit(count);
227 AVFrame *out_frame = NULL;
230 if (s->pts == AV_NOPTS_VALUE) {
231 s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
234 av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
236 for (chan = 0; chan < channels; chan++) {
237 AVFrame *delay_frame = s->delay_frame;
238 const double *src = (double *)frame->extended_data[chan];
239 double *dbuf = (double *)delay_frame->extended_data[chan];
240 ChanParam *cp = &s->channels[chan];
243 count = s->delay_count;
244 dindex = s->delay_index;
245 for (i = 0, oindex = 0; i < nb_samples; i++) {
246 const double in = src[i];
247 update_volume(cp, fabs(in));
249 if (count >= s->delay_samples) {
251 out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
253 av_frame_free(&frame);
254 return AVERROR(ENOMEM);
256 err = av_frame_copy_props(out_frame, frame);
258 av_frame_free(&out_frame);
259 av_frame_free(&frame);
262 out_frame->pts = s->pts;
263 s->pts += av_rescale_q(nb_samples - i,
264 (AVRational){ 1, inlink->sample_rate },
268 dst = (double *)out_frame->extended_data[chan];
269 dst[oindex++] = av_clipd(dbuf[dindex] *
270 get_volume(s, cp->volume), -1, 1);
276 dindex = MOD(dindex + 1, s->delay_samples);
280 s->delay_count = count;
281 s->delay_index = dindex;
283 av_frame_free(&frame);
286 err = ff_filter_frame(ctx->outputs[0], out_frame);
293 static int compand_drain(AVFilterLink *outlink)
295 AVFilterContext *ctx = outlink->src;
296 CompandContext *s = ctx->priv;
297 const int channels = outlink->channels;
298 AVFrame *frame = NULL;
301 /* 2048 is to limit output frame size during drain */
302 frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
304 return AVERROR(ENOMEM);
306 s->pts += av_rescale_q(frame->nb_samples,
307 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
309 av_assert0(channels > 0);
310 for (chan = 0; chan < channels; chan++) {
311 AVFrame *delay_frame = s->delay_frame;
312 double *dbuf = (double *)delay_frame->extended_data[chan];
313 double *dst = (double *)frame->extended_data[chan];
314 ChanParam *cp = &s->channels[chan];
316 dindex = s->delay_index;
317 for (i = 0; i < frame->nb_samples; i++) {
318 dst[i] = av_clipd(dbuf[dindex] * get_volume(s, cp->volume),
320 dindex = MOD(dindex + 1, s->delay_samples);
323 s->delay_count -= frame->nb_samples;
324 s->delay_index = dindex;
326 return ff_filter_frame(outlink, frame);
329 static int config_output(AVFilterLink *outlink)
331 AVFilterContext *ctx = outlink->src;
332 CompandContext *s = ctx->priv;
333 const int sample_rate = outlink->sample_rate;
334 double radius = s->curve_dB * M_LN10 / 20.0;
335 char *p, *saveptr = NULL;
336 const int channels = outlink->channels;
337 int nb_attacks, nb_decays, nb_points;
338 int new_nb_items, num;
343 count_items(s->attacks, &nb_attacks);
344 count_items(s->decays, &nb_decays);
345 count_items(s->points, &nb_points);
348 av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
349 return AVERROR(EINVAL);
352 if (nb_attacks > channels || nb_decays > channels) {
353 av_log(ctx, AV_LOG_ERROR,
354 "Number of attacks/decays bigger than number of channels.\n");
355 return AVERROR(EINVAL);
360 s->channels = av_mallocz_array(channels, sizeof(*s->channels));
361 s->nb_segments = (nb_points + 4) * 2;
362 s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
364 if (!s->channels || !s->segments) {
366 return AVERROR(ENOMEM);
370 for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
371 char *tstr = av_strtok(p, " |", &saveptr);
373 new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
374 if (s->channels[i].attack < 0) {
376 return AVERROR(EINVAL);
379 nb_attacks = new_nb_items;
382 for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
383 char *tstr = av_strtok(p, " |", &saveptr);
385 new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
386 if (s->channels[i].decay < 0) {
388 return AVERROR(EINVAL);
391 nb_decays = new_nb_items;
393 if (nb_attacks != nb_decays) {
394 av_log(ctx, AV_LOG_ERROR,
395 "Number of attacks %d differs from number of decays %d.\n",
396 nb_attacks, nb_decays);
398 return AVERROR(EINVAL);
401 for (i = nb_decays; i < channels; i++) {
402 s->channels[i].attack = s->channels[nb_decays - 1].attack;
403 s->channels[i].decay = s->channels[nb_decays - 1].decay;
406 #define S(x) s->segments[2 * ((x) + 1)]
408 for (i = 0, new_nb_items = 0; i < nb_points; i++) {
409 char *tstr = av_strtok(p, " |", &saveptr);
411 if (sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
412 av_log(ctx, AV_LOG_ERROR,
413 "Invalid and/or missing input/output value.\n");
415 return AVERROR(EINVAL);
417 if (i && S(i - 1).x > S(i).x) {
418 av_log(ctx, AV_LOG_ERROR,
419 "Transfer function input values must be increasing.\n");
421 return AVERROR(EINVAL);
424 av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
429 /* Add 0,0 if necessary */
430 if (num == 0 || S(num - 1).x)
434 #define S(x) s->segments[2 * (x)]
435 /* Add a tail off segment at the start */
436 S(0).x = S(1).x - 2 * s->curve_dB;
440 /* Join adjacent colinear segments */
441 for (i = 2; i < num; i++) {
442 double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
443 double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
449 for (j = --i; j < num; j++)
453 for (i = 0; !i || s->segments[i - 2].x; i += 2) {
454 s->segments[i].y += s->gain_dB;
455 s->segments[i].x *= M_LN10 / 20;
456 s->segments[i].y *= M_LN10 / 20;
459 #define L(x) s->segments[i - (x)]
460 for (i = 4; s->segments[i - 2].x; i += 2) {
461 double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
464 L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
467 L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
469 theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
470 len = sqrt(pow(L(2).x - L(4).x, 2.) + pow(L(2).y - L(4).y, 2.));
471 r = FFMIN(radius, len);
472 L(3).x = L(2).x - r * cos(theta);
473 L(3).y = L(2).y - r * sin(theta);
475 theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
476 len = sqrt(pow(L(0).x - L(2).x, 2.) + pow(L(0).y - L(2).y, 2.));
477 r = FFMIN(radius, len / 2);
478 x = L(2).x + r * cos(theta);
479 y = L(2).y + r * sin(theta);
481 cx = (L(3).x + L(2).x + x) / 3;
482 cy = (L(3).y + L(2).y + y) / 3;
489 in2 = L(2).x - L(3).x;
490 out2 = L(2).y - L(3).y;
491 L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
492 L(3).b = out1 / in1 - L(3).a * in1;
497 s->in_min_lin = exp(s->segments[1].x);
498 s->out_min_lin = exp(s->segments[1].y);
500 for (i = 0; i < channels; i++) {
501 ChanParam *cp = &s->channels[i];
503 if (cp->attack > 1.0 / sample_rate)
504 cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
507 if (cp->decay > 1.0 / sample_rate)
508 cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
511 cp->volume = pow(10.0, s->initial_volume / 20);
514 s->delay_samples = s->delay * sample_rate;
515 if (s->delay_samples <= 0) {
516 s->compand = compand_nodelay;
520 s->delay_frame = av_frame_alloc();
521 if (!s->delay_frame) {
523 return AVERROR(ENOMEM);
526 s->delay_frame->format = outlink->format;
527 s->delay_frame->nb_samples = s->delay_samples;
528 s->delay_frame->channel_layout = outlink->channel_layout;
530 err = av_frame_get_buffer(s->delay_frame, 32);
534 s->compand = compand_delay;
538 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
540 AVFilterContext *ctx = inlink->dst;
541 CompandContext *s = ctx->priv;
543 return s->compand(ctx, frame);
546 static int request_frame(AVFilterLink *outlink)
548 AVFilterContext *ctx = outlink->src;
549 CompandContext *s = ctx->priv;
552 ret = ff_request_frame(ctx->inputs[0]);
554 if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
555 ret = compand_drain(outlink);
560 static const AVFilterPad compand_inputs[] = {
563 .type = AVMEDIA_TYPE_AUDIO,
564 .filter_frame = filter_frame,
569 static const AVFilterPad compand_outputs[] = {
572 .request_frame = request_frame,
573 .config_props = config_output,
574 .type = AVMEDIA_TYPE_AUDIO,
580 AVFilter ff_af_compand = {
582 .description = NULL_IF_CONFIG_SMALL(
583 "Compress or expand audio dynamic range."),
584 .query_formats = query_formats,
585 .priv_size = sizeof(CompandContext),
586 .priv_class = &compand_class,
589 .inputs = compand_inputs,
590 .outputs = compand_outputs,