2 * Copyright (c) 1999 Chris Bagwell
3 * Copyright (c) 1999 Nick Bailey
4 * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5 * Copyright (c) 2013 Paul B Mahol
6 * Copyright (c) 2014 Andrew Kelley
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * audio compand filter
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/samplefmt.h"
38 typedef struct ChanParam {
44 typedef struct CompandSegment {
49 typedef struct CompandContext {
52 char *attacks, *decays, *points;
53 CompandSegment *segments;
59 double initial_volume;
67 int (*compand)(AVFilterContext *ctx, AVFrame *frame);
70 #define OFFSET(x) offsetof(CompandContext, x)
71 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
73 static const AVOption compand_options[] = {
74 { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0.3" }, 0, 0, A },
75 { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
76 { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20" }, 0, 0, A },
77 { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
78 { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
79 { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
80 { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
84 AVFILTER_DEFINE_CLASS(compand);
86 static av_cold int init(AVFilterContext *ctx)
88 CompandContext *s = ctx->priv;
89 s->pts = AV_NOPTS_VALUE;
93 static av_cold void uninit(AVFilterContext *ctx)
95 CompandContext *s = ctx->priv;
97 av_freep(&s->channels);
98 av_freep(&s->segments);
99 av_frame_free(&s->delay_frame);
102 static int query_formats(AVFilterContext *ctx)
104 AVFilterChannelLayouts *layouts;
105 AVFilterFormats *formats;
106 static const enum AVSampleFormat sample_fmts[] = {
112 layouts = ff_all_channel_counts();
114 return AVERROR(ENOMEM);
115 ret = ff_set_common_channel_layouts(ctx, layouts);
119 formats = ff_make_format_list(sample_fmts);
121 return AVERROR(ENOMEM);
122 ret = ff_set_common_formats(ctx, formats);
126 formats = ff_all_samplerates();
128 return AVERROR(ENOMEM);
129 return ff_set_common_samplerates(ctx, formats);
132 static void count_items(char *item_str, int *nb_items)
137 for (p = item_str; *p; p++) {
138 if (*p == ' ' || *p == '|')
143 static void update_volume(ChanParam *cp, double in)
145 double delta = in - cp->volume;
148 cp->volume += delta * cp->attack;
150 cp->volume += delta * cp->decay;
153 static double get_volume(CompandContext *s, double in_lin)
156 double in_log, out_log;
159 if (in_lin < s->in_min_lin)
160 return s->out_min_lin;
162 in_log = log(in_lin);
164 for (i = 1; i < s->nb_segments; i++)
165 if (in_log <= s->segments[i].x)
167 cs = &s->segments[i - 1];
169 out_log = cs->y + in_log * (cs->a * in_log + cs->b);
174 static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
176 CompandContext *s = ctx->priv;
177 AVFilterLink *inlink = ctx->inputs[0];
178 const int channels = inlink->channels;
179 const int nb_samples = frame->nb_samples;
184 if (av_frame_is_writable(frame)) {
187 out_frame = ff_get_audio_buffer(inlink, nb_samples);
189 av_frame_free(&frame);
190 return AVERROR(ENOMEM);
192 err = av_frame_copy_props(out_frame, frame);
194 av_frame_free(&out_frame);
195 av_frame_free(&frame);
200 for (chan = 0; chan < channels; chan++) {
201 const double *src = (double *)frame->extended_data[chan];
202 double *dst = (double *)out_frame->extended_data[chan];
203 ChanParam *cp = &s->channels[chan];
205 for (i = 0; i < nb_samples; i++) {
206 update_volume(cp, fabs(src[i]));
208 dst[i] = src[i] * get_volume(s, cp->volume);
212 if (frame != out_frame)
213 av_frame_free(&frame);
215 return ff_filter_frame(ctx->outputs[0], out_frame);
218 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
220 static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
222 CompandContext *s = ctx->priv;
223 AVFilterLink *inlink = ctx->inputs[0];
224 const int channels = inlink->channels;
225 const int nb_samples = frame->nb_samples;
226 int chan, i, av_uninit(dindex), oindex, av_uninit(count);
227 AVFrame *out_frame = NULL;
230 if (s->pts == AV_NOPTS_VALUE) {
231 s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
234 av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
236 for (chan = 0; chan < channels; chan++) {
237 AVFrame *delay_frame = s->delay_frame;
238 const double *src = (double *)frame->extended_data[chan];
239 double *dbuf = (double *)delay_frame->extended_data[chan];
240 ChanParam *cp = &s->channels[chan];
243 count = s->delay_count;
244 dindex = s->delay_index;
245 for (i = 0, oindex = 0; i < nb_samples; i++) {
246 const double in = src[i];
247 update_volume(cp, fabs(in));
249 if (count >= s->delay_samples) {
251 out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
253 av_frame_free(&frame);
254 return AVERROR(ENOMEM);
256 err = av_frame_copy_props(out_frame, frame);
258 av_frame_free(&out_frame);
259 av_frame_free(&frame);
262 out_frame->pts = s->pts;
263 s->pts += av_rescale_q(nb_samples - i,
264 (AVRational){ 1, inlink->sample_rate },
268 dst = (double *)out_frame->extended_data[chan];
269 dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
275 dindex = MOD(dindex + 1, s->delay_samples);
279 s->delay_count = count;
280 s->delay_index = dindex;
282 av_frame_free(&frame);
285 err = ff_filter_frame(ctx->outputs[0], out_frame);
292 static int compand_drain(AVFilterLink *outlink)
294 AVFilterContext *ctx = outlink->src;
295 CompandContext *s = ctx->priv;
296 const int channels = outlink->channels;
297 AVFrame *frame = NULL;
300 /* 2048 is to limit output frame size during drain */
301 frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
303 return AVERROR(ENOMEM);
305 s->pts += av_rescale_q(frame->nb_samples,
306 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
308 av_assert0(channels > 0);
309 for (chan = 0; chan < channels; chan++) {
310 AVFrame *delay_frame = s->delay_frame;
311 double *dbuf = (double *)delay_frame->extended_data[chan];
312 double *dst = (double *)frame->extended_data[chan];
313 ChanParam *cp = &s->channels[chan];
315 dindex = s->delay_index;
316 for (i = 0; i < frame->nb_samples; i++) {
317 dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
318 dindex = MOD(dindex + 1, s->delay_samples);
321 s->delay_count -= frame->nb_samples;
322 s->delay_index = dindex;
324 return ff_filter_frame(outlink, frame);
327 static int config_output(AVFilterLink *outlink)
329 AVFilterContext *ctx = outlink->src;
330 CompandContext *s = ctx->priv;
331 const int sample_rate = outlink->sample_rate;
332 double radius = s->curve_dB * M_LN10 / 20.0;
333 char *p, *saveptr = NULL;
334 const int channels = outlink->channels;
335 int nb_attacks, nb_decays, nb_points;
336 int new_nb_items, num;
341 count_items(s->attacks, &nb_attacks);
342 count_items(s->decays, &nb_decays);
343 count_items(s->points, &nb_points);
346 av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
347 return AVERROR(EINVAL);
350 if (nb_attacks > channels || nb_decays > channels) {
351 av_log(ctx, AV_LOG_ERROR,
352 "Number of attacks/decays bigger than number of channels.\n");
353 return AVERROR(EINVAL);
358 s->channels = av_mallocz_array(channels, sizeof(*s->channels));
359 s->nb_segments = (nb_points + 4) * 2;
360 s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
362 if (!s->channels || !s->segments) {
364 return AVERROR(ENOMEM);
368 for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
369 char *tstr = av_strtok(p, " |", &saveptr);
371 new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
372 if (s->channels[i].attack < 0) {
374 return AVERROR(EINVAL);
377 nb_attacks = new_nb_items;
380 for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
381 char *tstr = av_strtok(p, " |", &saveptr);
383 new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
384 if (s->channels[i].decay < 0) {
386 return AVERROR(EINVAL);
389 nb_decays = new_nb_items;
391 if (nb_attacks != nb_decays) {
392 av_log(ctx, AV_LOG_ERROR,
393 "Number of attacks %d differs from number of decays %d.\n",
394 nb_attacks, nb_decays);
396 return AVERROR(EINVAL);
399 for (i = nb_decays; i < channels; i++) {
400 s->channels[i].attack = s->channels[nb_decays - 1].attack;
401 s->channels[i].decay = s->channels[nb_decays - 1].decay;
404 #define S(x) s->segments[2 * ((x) + 1)]
406 for (i = 0, new_nb_items = 0; i < nb_points; i++) {
407 char *tstr = av_strtok(p, " |", &saveptr);
409 if (sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
410 av_log(ctx, AV_LOG_ERROR,
411 "Invalid and/or missing input/output value.\n");
413 return AVERROR(EINVAL);
415 if (i && S(i - 1).x > S(i).x) {
416 av_log(ctx, AV_LOG_ERROR,
417 "Transfer function input values must be increasing.\n");
419 return AVERROR(EINVAL);
422 av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
427 /* Add 0,0 if necessary */
428 if (num == 0 || S(num - 1).x)
432 #define S(x) s->segments[2 * (x)]
433 /* Add a tail off segment at the start */
434 S(0).x = S(1).x - 2 * s->curve_dB;
438 /* Join adjacent colinear segments */
439 for (i = 2; i < num; i++) {
440 double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
441 double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
447 for (j = --i; j < num; j++)
451 for (i = 0; i < s->nb_segments; i += 2) {
452 s->segments[i].y += s->gain_dB;
453 s->segments[i].x *= M_LN10 / 20;
454 s->segments[i].y *= M_LN10 / 20;
457 #define L(x) s->segments[i - (x)]
458 for (i = 4; i < s->nb_segments; i += 2) {
459 double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
462 L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
465 L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
467 theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
468 len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
469 r = FFMIN(radius, len);
470 L(3).x = L(2).x - r * cos(theta);
471 L(3).y = L(2).y - r * sin(theta);
473 theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
474 len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
475 r = FFMIN(radius, len / 2);
476 x = L(2).x + r * cos(theta);
477 y = L(2).y + r * sin(theta);
479 cx = (L(3).x + L(2).x + x) / 3;
480 cy = (L(3).y + L(2).y + y) / 3;
487 in2 = L(2).x - L(3).x;
488 out2 = L(2).y - L(3).y;
489 L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
490 L(3).b = out1 / in1 - L(3).a * in1;
495 s->in_min_lin = exp(s->segments[1].x);
496 s->out_min_lin = exp(s->segments[1].y);
498 for (i = 0; i < channels; i++) {
499 ChanParam *cp = &s->channels[i];
501 if (cp->attack > 1.0 / sample_rate)
502 cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
505 if (cp->decay > 1.0 / sample_rate)
506 cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
509 cp->volume = pow(10.0, s->initial_volume / 20);
512 s->delay_samples = s->delay * sample_rate;
513 if (s->delay_samples <= 0) {
514 s->compand = compand_nodelay;
518 s->delay_frame = av_frame_alloc();
519 if (!s->delay_frame) {
521 return AVERROR(ENOMEM);
524 s->delay_frame->format = outlink->format;
525 s->delay_frame->nb_samples = s->delay_samples;
526 s->delay_frame->channel_layout = outlink->channel_layout;
528 err = av_frame_get_buffer(s->delay_frame, 32);
532 s->compand = compand_delay;
536 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
538 AVFilterContext *ctx = inlink->dst;
539 CompandContext *s = ctx->priv;
541 return s->compand(ctx, frame);
544 static int request_frame(AVFilterLink *outlink)
546 AVFilterContext *ctx = outlink->src;
547 CompandContext *s = ctx->priv;
550 ret = ff_request_frame(ctx->inputs[0]);
552 if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
553 ret = compand_drain(outlink);
558 static const AVFilterPad compand_inputs[] = {
561 .type = AVMEDIA_TYPE_AUDIO,
562 .filter_frame = filter_frame,
567 static const AVFilterPad compand_outputs[] = {
570 .request_frame = request_frame,
571 .config_props = config_output,
572 .type = AVMEDIA_TYPE_AUDIO,
578 AVFilter ff_af_compand = {
580 .description = NULL_IF_CONFIG_SMALL(
581 "Compress or expand audio dynamic range."),
582 .query_formats = query_formats,
583 .priv_size = sizeof(CompandContext),
584 .priv_class = &compand_class,
587 .inputs = compand_inputs,
588 .outputs = compand_outputs,