]> git.sesse.net Git - nageru/blob - audio_mixer.cpp
Make some common decibel macros.
[nageru] / audio_mixer.cpp
1 #include "audio_mixer.h"
2
3 #include <assert.h>
4 #include <endian.h>
5 #include <bmusb/bmusb.h>
6 #include <stdio.h>
7 #include <cmath>
8
9 #include "db.h"
10 #include "flags.h"
11 #include "timebase.h"
12
13 using namespace bmusb;
14 using namespace std;
15
16 namespace {
17
18 void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
19 {
20         assert(in_channels >= out_channels);
21         for (size_t i = 0; i < num_samples; ++i) {
22                 for (size_t j = 0; j < out_channels; ++j) {
23                         uint32_t s1 = *src++;
24                         uint32_t s2 = *src++;
25                         uint32_t s3 = *src++;
26                         uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
27                         dst[i * out_channels + j] = int(s) * (1.0f / 2147483648.0f);
28                 }
29                 src += 3 * (in_channels - out_channels);
30         }
31 }
32
33 void convert_fixed32_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
34 {
35         assert(in_channels >= out_channels);
36         for (size_t i = 0; i < num_samples; ++i) {
37                 for (size_t j = 0; j < out_channels; ++j) {
38                         int32_t s = le32toh(*(int32_t *)src);
39                         dst[i * out_channels + j] = s * (1.0f / 2147483648.0f);
40                         src += 4;
41                 }
42                 src += 4 * (in_channels - out_channels);
43         }
44 }
45
46 }  // namespace
47
48 AudioMixer::AudioMixer(unsigned num_cards)
49         : num_cards(num_cards),
50           level_compressor(OUTPUT_FREQUENCY),
51           limiter(OUTPUT_FREQUENCY),
52           compressor(OUTPUT_FREQUENCY)
53 {
54         locut.init(FILTER_HPF, 2);
55
56         set_locut_enabled(global_flags.locut_enabled);
57         set_gain_staging_db(global_flags.initial_gain_staging_db);
58         set_gain_staging_auto(global_flags.gain_staging_auto);
59         set_compressor_enabled(global_flags.compressor_enabled);
60         set_limiter_enabled(global_flags.limiter_enabled);
61         set_final_makeup_gain_auto(global_flags.final_makeup_gain_auto);
62 }
63
64 void AudioMixer::reset_card(unsigned card_index)
65 {
66         CaptureCard *card = &cards[card_index];
67
68         unique_lock<mutex> lock(card->audio_mutex);
69         card->resampling_queue.reset(new ResamplingQueue(card_index, OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
70         card->next_local_pts = 0;
71 }
72
73 void AudioMixer::add_audio(unsigned card_index, const uint8_t *data, unsigned num_samples, AudioFormat audio_format, int64_t frame_length)
74 {
75         CaptureCard *card = &cards[card_index];
76
77         // Convert the audio to stereo fp32.
78         vector<float> audio;
79         audio.resize(num_samples * 2);
80         switch (audio_format.bits_per_sample) {
81         case 0:
82                 assert(num_samples == 0);
83                 break;
84         case 24:
85                 convert_fixed24_to_fp32(&audio[0], 2, data, audio_format.num_channels, num_samples);
86                 break;
87         case 32:
88                 convert_fixed32_to_fp32(&audio[0], 2, data, audio_format.num_channels, num_samples);
89                 break;
90         default:
91                 fprintf(stderr, "Cannot handle audio with %u bits per sample\n", audio_format.bits_per_sample);
92                 assert(false);
93         }
94
95         // Now add it.
96         {
97                 unique_lock<mutex> lock(card->audio_mutex);
98
99                 int64_t local_pts = card->next_local_pts;
100                 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
101                 card->next_local_pts = local_pts + frame_length;
102         }
103 }
104
105 void AudioMixer::add_silence(unsigned card_index, unsigned samples_per_frame, unsigned num_frames, int64_t frame_length)
106 {
107         CaptureCard *card = &cards[card_index];
108         unique_lock<mutex> lock(card->audio_mutex);
109
110         vector<float> silence(samples_per_frame * 2, 0.0f);
111         for (unsigned i = 0; i < num_frames; ++i) {
112                 card->resampling_queue->add_input_samples(card->next_local_pts / double(TIMEBASE), silence.data(), samples_per_frame);
113                 // Note that if the format changed in the meantime, we have
114                 // no way of detecting that; we just have to assume the frame length
115                 // is always the same.
116                 card->next_local_pts += frame_length;
117         }
118 }
119
120 vector<float> AudioMixer::get_output(double pts, unsigned num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
121 {
122         vector<float> samples_card;
123         vector<float> samples_out;
124         samples_out.resize(num_samples * 2);
125
126         // TODO: Allow more flexible input mapping.
127         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
128                 samples_card.resize(num_samples * 2);
129                 {
130                         unique_lock<mutex> lock(cards[card_index].audio_mutex);
131                         cards[card_index].resampling_queue->get_output_samples(
132                                 pts,
133                                 &samples_card[0],
134                                 num_samples,
135                                 rate_adjustment_policy);
136                 }
137                 if (card_index == 0) {
138                         for (unsigned i = 0; i < num_samples * 2; ++i) {
139                                 samples_out[i] = samples_card[i];
140                         }
141                 } else {
142                         for (unsigned i = 0; i < num_samples * 2; ++i) {
143                                 samples_out[i] += samples_card[i];
144                         }
145                 }
146         }
147
148         // Cut away everything under 120 Hz (or whatever the cutoff is);
149         // we don't need it for voice, and it will reduce headroom
150         // and confuse the compressor. (In particular, any hums at 50 or 60 Hz
151         // should be dampened.)
152         if (locut_enabled) {
153                 locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
154         }
155
156         {
157                 unique_lock<mutex> lock(compressor_mutex);
158
159                 // Apply a level compressor to get the general level right.
160                 // Basically, if it's over about -40 dBFS, we squeeze it down to that level
161                 // (or more precisely, near it, since we don't use infinite ratio),
162                 // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
163                 // entirely arbitrary, but from practical tests with speech, it seems to
164                 // put ut around -23 LUFS, so it's a reasonable starting point for later use.
165                 {
166                         if (level_compressor_enabled) {
167                                 float threshold = 0.01f;   // -40 dBFS.
168                                 float ratio = 20.0f;
169                                 float attack_time = 0.5f;
170                                 float release_time = 20.0f;
171                                 float makeup_gain = from_db(ref_level_dbfs - (-40.0f));  // +26 dB.
172                                 level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
173                                 gain_staging_db = to_db(level_compressor.get_attenuation() * makeup_gain);
174                         } else {
175                                 // Just apply the gain we already had.
176                                 float g = from_db(gain_staging_db);
177                                 for (size_t i = 0; i < samples_out.size(); ++i) {
178                                         samples_out[i] *= g;
179                                 }
180                         }
181                 }
182
183         #if 0
184                 printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
185                         level_compressor.get_level(), to_db(level_compressor.get_level()),
186                         level_compressor.get_attenuation(), to_db(level_compressor.get_attenuation()),
187                         to_db(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
188         #endif
189
190         //      float limiter_att, compressor_att;
191
192                 // The real compressor.
193                 if (compressor_enabled) {
194                         float threshold = from_db(compressor_threshold_dbfs);
195                         float ratio = 20.0f;
196                         float attack_time = 0.005f;
197                         float release_time = 0.040f;
198                         float makeup_gain = 2.0f;  // +6 dB.
199                         compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
200         //              compressor_att = compressor.get_attenuation();
201                 }
202
203                 // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
204                 // Note that since ratio is not infinite, we could go slightly higher than this.
205                 if (limiter_enabled) {
206                         float threshold = from_db(limiter_threshold_dbfs);
207                         float ratio = 30.0f;
208                         float attack_time = 0.0f;  // Instant.
209                         float release_time = 0.020f;
210                         float makeup_gain = 1.0f;  // 0 dB.
211                         limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
212         //              limiter_att = limiter.get_attenuation();
213                 }
214
215         //      printf("limiter=%+5.1f  compressor=%+5.1f\n", to_db(limiter_att), to_db(compressor_att));
216         }
217
218         // At this point, we are most likely close to +0 LU, but all of our
219         // measurements have been on raw sample values, not R128 values.
220         // So we have a final makeup gain to get us to +0 LU; the gain
221         // adjustments required should be relatively small, and also, the
222         // offset shouldn't change much (only if the type of audio changes
223         // significantly). Thus, we shoot for updating this value basically
224         // “whenever we process buffers”, since the R128 calculation isn't exactly
225         // something we get out per-sample.
226         //
227         // Note that there's a feedback loop here, so we choose a very slow filter
228         // (half-time of 100 seconds).
229         double target_loudness_factor, alpha;
230         double loudness_lu = loudness_lufs - ref_level_lufs;
231         double current_makeup_lu = to_db(final_makeup_gain);
232         target_loudness_factor = from_db(-loudness_lu);
233
234         // If we're outside +/- 5 LU uncorrected, we don't count it as
235         // a normal signal (probably silence) and don't change the
236         // correction factor; just apply what we already have.
237         if (fabs(loudness_lu - current_makeup_lu) >= 5.0 || !final_makeup_gain_auto) {
238                 alpha = 0.0;
239         } else {
240                 // Formula adapted from
241                 // https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter.
242                 const double half_time_s = 100.0;
243                 const double fc_mul_2pi_delta_t = 1.0 / (half_time_s * OUTPUT_FREQUENCY);
244                 alpha = fc_mul_2pi_delta_t / (fc_mul_2pi_delta_t + 1.0);
245         }
246
247         {
248                 unique_lock<mutex> lock(compressor_mutex);
249                 double m = final_makeup_gain;
250                 for (size_t i = 0; i < samples_out.size(); i += 2) {
251                         samples_out[i + 0] *= m;
252                         samples_out[i + 1] *= m;
253                         m += (target_loudness_factor - m) * alpha;
254                 }
255                 final_makeup_gain = m;
256         }
257
258         return samples_out;
259 }