]> git.sesse.net Git - nageru/blob - audio_mixer.cpp
Fix a bug where the automatic makeup gain adjustment would never quite get to the...
[nageru] / audio_mixer.cpp
1 #include "audio_mixer.h"
2
3 #include <assert.h>
4 #include <endian.h>
5 #include <bmusb/bmusb.h>
6 #include <stdio.h>
7 #include <cmath>
8
9 #include "db.h"
10 #include "flags.h"
11 #include "timebase.h"
12
13 using namespace bmusb;
14 using namespace std;
15
16 namespace {
17
18 void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
19 {
20         assert(in_channels >= out_channels);
21         for (size_t i = 0; i < num_samples; ++i) {
22                 for (size_t j = 0; j < out_channels; ++j) {
23                         uint32_t s1 = *src++;
24                         uint32_t s2 = *src++;
25                         uint32_t s3 = *src++;
26                         uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
27                         dst[i * out_channels + j] = int(s) * (1.0f / 2147483648.0f);
28                 }
29                 src += 3 * (in_channels - out_channels);
30         }
31 }
32
33 void convert_fixed32_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
34 {
35         assert(in_channels >= out_channels);
36         for (size_t i = 0; i < num_samples; ++i) {
37                 for (size_t j = 0; j < out_channels; ++j) {
38                         int32_t s = le32toh(*(int32_t *)src);
39                         dst[i * out_channels + j] = s * (1.0f / 2147483648.0f);
40                         src += 4;
41                 }
42                 src += 4 * (in_channels - out_channels);
43         }
44 }
45
46 }  // namespace
47
48 AudioMixer::AudioMixer(unsigned num_cards)
49         : num_cards(num_cards),
50           level_compressor(OUTPUT_FREQUENCY),
51           limiter(OUTPUT_FREQUENCY),
52           compressor(OUTPUT_FREQUENCY)
53 {
54         locut.init(FILTER_HPF, 2);
55
56         set_locut_enabled(global_flags.locut_enabled);
57         set_gain_staging_db(global_flags.initial_gain_staging_db);
58         set_gain_staging_auto(global_flags.gain_staging_auto);
59         set_compressor_enabled(global_flags.compressor_enabled);
60         set_limiter_enabled(global_flags.limiter_enabled);
61         set_final_makeup_gain_auto(global_flags.final_makeup_gain_auto);
62 }
63
64 void AudioMixer::reset_card(unsigned card_index)
65 {
66         CaptureCard *card = &cards[card_index];
67
68         unique_lock<mutex> lock(card->audio_mutex);
69         card->resampling_queue.reset(new ResamplingQueue(card_index, OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
70         card->next_local_pts = 0;
71 }
72
73 void AudioMixer::add_audio(unsigned card_index, const uint8_t *data, unsigned num_samples, AudioFormat audio_format, int64_t frame_length)
74 {
75         CaptureCard *card = &cards[card_index];
76
77         // Convert the audio to stereo fp32.
78         vector<float> audio;
79         audio.resize(num_samples * 2);
80         switch (audio_format.bits_per_sample) {
81         case 0:
82                 assert(num_samples == 0);
83                 break;
84         case 24:
85                 convert_fixed24_to_fp32(&audio[0], 2, data, audio_format.num_channels, num_samples);
86                 break;
87         case 32:
88                 convert_fixed32_to_fp32(&audio[0], 2, data, audio_format.num_channels, num_samples);
89                 break;
90         default:
91                 fprintf(stderr, "Cannot handle audio with %u bits per sample\n", audio_format.bits_per_sample);
92                 assert(false);
93         }
94
95         // Now add it.
96         {
97                 unique_lock<mutex> lock(card->audio_mutex);
98
99                 int64_t local_pts = card->next_local_pts;
100                 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
101                 card->next_local_pts = local_pts + frame_length;
102         }
103 }
104
105 void AudioMixer::add_silence(unsigned card_index, unsigned samples_per_frame, unsigned num_frames, int64_t frame_length)
106 {
107         CaptureCard *card = &cards[card_index];
108         unique_lock<mutex> lock(card->audio_mutex);
109
110         vector<float> silence(samples_per_frame * 2, 0.0f);
111         for (unsigned i = 0; i < num_frames; ++i) {
112                 card->resampling_queue->add_input_samples(card->next_local_pts / double(TIMEBASE), silence.data(), samples_per_frame);
113                 // Note that if the format changed in the meantime, we have
114                 // no way of detecting that; we just have to assume the frame length
115                 // is always the same.
116                 card->next_local_pts += frame_length;
117         }
118 }
119
120 vector<float> AudioMixer::get_output(double pts, unsigned num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
121 {
122         vector<float> samples_card;
123         vector<float> samples_out;
124         samples_out.resize(num_samples * 2);
125
126         // TODO: Allow more flexible input mapping.
127         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
128                 samples_card.resize(num_samples * 2);
129                 {
130                         unique_lock<mutex> lock(cards[card_index].audio_mutex);
131                         cards[card_index].resampling_queue->get_output_samples(
132                                 pts,
133                                 &samples_card[0],
134                                 num_samples,
135                                 rate_adjustment_policy);
136                 }
137
138                 float volume = from_db(cards[card_index].fader_volume_db);
139                 if (card_index == 0) {
140                         for (unsigned i = 0; i < num_samples * 2; ++i) {
141                                 samples_out[i] = samples_card[i] * volume;
142                         }
143                 } else {
144                         for (unsigned i = 0; i < num_samples * 2; ++i) {
145                                 samples_out[i] += samples_card[i] * volume;
146                         }
147                 }
148         }
149
150         // Cut away everything under 120 Hz (or whatever the cutoff is);
151         // we don't need it for voice, and it will reduce headroom
152         // and confuse the compressor. (In particular, any hums at 50 or 60 Hz
153         // should be dampened.)
154         if (locut_enabled) {
155                 locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
156         }
157
158         {
159                 unique_lock<mutex> lock(compressor_mutex);
160
161                 // Apply a level compressor to get the general level right.
162                 // Basically, if it's over about -40 dBFS, we squeeze it down to that level
163                 // (or more precisely, near it, since we don't use infinite ratio),
164                 // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
165                 // entirely arbitrary, but from practical tests with speech, it seems to
166                 // put ut around -23 LUFS, so it's a reasonable starting point for later use.
167                 {
168                         if (level_compressor_enabled) {
169                                 float threshold = 0.01f;   // -40 dBFS.
170                                 float ratio = 20.0f;
171                                 float attack_time = 0.5f;
172                                 float release_time = 20.0f;
173                                 float makeup_gain = from_db(ref_level_dbfs - (-40.0f));  // +26 dB.
174                                 level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
175                                 gain_staging_db = to_db(level_compressor.get_attenuation() * makeup_gain);
176                         } else {
177                                 // Just apply the gain we already had.
178                                 float g = from_db(gain_staging_db);
179                                 for (size_t i = 0; i < samples_out.size(); ++i) {
180                                         samples_out[i] *= g;
181                                 }
182                         }
183                 }
184
185         #if 0
186                 printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
187                         level_compressor.get_level(), to_db(level_compressor.get_level()),
188                         level_compressor.get_attenuation(), to_db(level_compressor.get_attenuation()),
189                         to_db(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
190         #endif
191
192         //      float limiter_att, compressor_att;
193
194                 // The real compressor.
195                 if (compressor_enabled) {
196                         float threshold = from_db(compressor_threshold_dbfs);
197                         float ratio = 20.0f;
198                         float attack_time = 0.005f;
199                         float release_time = 0.040f;
200                         float makeup_gain = 2.0f;  // +6 dB.
201                         compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
202         //              compressor_att = compressor.get_attenuation();
203                 }
204
205                 // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
206                 // Note that since ratio is not infinite, we could go slightly higher than this.
207                 if (limiter_enabled) {
208                         float threshold = from_db(limiter_threshold_dbfs);
209                         float ratio = 30.0f;
210                         float attack_time = 0.0f;  // Instant.
211                         float release_time = 0.020f;
212                         float makeup_gain = 1.0f;  // 0 dB.
213                         limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
214         //              limiter_att = limiter.get_attenuation();
215                 }
216
217         //      printf("limiter=%+5.1f  compressor=%+5.1f\n", to_db(limiter_att), to_db(compressor_att));
218         }
219
220         // At this point, we are most likely close to +0 LU, but all of our
221         // measurements have been on raw sample values, not R128 values.
222         // So we have a final makeup gain to get us to +0 LU; the gain
223         // adjustments required should be relatively small, and also, the
224         // offset shouldn't change much (only if the type of audio changes
225         // significantly). Thus, we shoot for updating this value basically
226         // “whenever we process buffers”, since the R128 calculation isn't exactly
227         // something we get out per-sample.
228         //
229         // Note that there's a feedback loop here, so we choose a very slow filter
230         // (half-time of 100 seconds).
231         double target_loudness_factor, alpha;
232         double loudness_lu = loudness_lufs - ref_level_lufs;
233         double current_makeup_lu = to_db(final_makeup_gain);
234         target_loudness_factor = final_makeup_gain * from_db(-loudness_lu);
235
236         // If we're outside +/- 5 LU uncorrected, we don't count it as
237         // a normal signal (probably silence) and don't change the
238         // correction factor; just apply what we already have.
239         if (fabs(loudness_lu - current_makeup_lu) >= 5.0 || !final_makeup_gain_auto) {
240                 alpha = 0.0;
241         } else {
242                 // Formula adapted from
243                 // https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter.
244                 const double half_time_s = 100.0;
245                 const double fc_mul_2pi_delta_t = 1.0 / (half_time_s * OUTPUT_FREQUENCY);
246                 alpha = fc_mul_2pi_delta_t / (fc_mul_2pi_delta_t + 1.0);
247         }
248
249         {
250                 unique_lock<mutex> lock(compressor_mutex);
251                 double m = final_makeup_gain;
252                 for (size_t i = 0; i < samples_out.size(); i += 2) {
253                         samples_out[i + 0] *= m;
254                         samples_out[i + 1] *= m;
255                         m += (target_loudness_factor - m) * alpha;
256                 }
257                 final_makeup_gain = m;
258         }
259
260         return samples_out;
261 }