]> git.sesse.net Git - nageru/blobdiff - audio_mixer.cpp
Support audio-only FFmpeg inputs. Somewhat wonky, though.
[nageru] / audio_mixer.cpp
index d0737f217c260804c4272bf9d9d5382c109aaa94..9e7dd59a0dbc64ab6398e30824c8dae6d676b912 100644 (file)
@@ -1,24 +1,32 @@
 #include "audio_mixer.h"
 
 #include <assert.h>
-#include <endian.h>
 #include <bmusb/bmusb.h>
-#include <stdio.h>
 #include <endian.h>
-#include <cmath>
-#include <limits>
-#ifdef __SSE__
+#include <math.h>
+#ifdef __SSE2__
 #include <immintrin.h>
 #endif
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <cstddef>
+#include <limits>
+#include <utility>
 
 #include "db.h"
 #include "flags.h"
-#include "mixer.h"
+#include "metrics.h"
 #include "state.pb.h"
 #include "timebase.h"
 
 using namespace bmusb;
 using namespace std;
+using namespace std::chrono;
 using namespace std::placeholders;
 
 namespace {
@@ -159,13 +167,13 @@ void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<
 
 }  // namespace
 
-AudioMixer::AudioMixer(unsigned num_cards)
-       : num_cards(num_cards),
+AudioMixer::AudioMixer(unsigned num_capture_cards, unsigned num_ffmpeg_inputs)
+       : num_capture_cards(num_capture_cards),
+         num_ffmpeg_inputs(num_ffmpeg_inputs),
+         ffmpeg_inputs(new AudioDevice[num_ffmpeg_inputs]),
          limiter(OUTPUT_FREQUENCY),
          correlation(OUTPUT_FREQUENCY)
 {
-       global_audio_mixer = this;
-
        for (unsigned bus_index = 0; bus_index < MAX_BUSES; ++bus_index) {
                locut[bus_index].init(FILTER_HPF, 2);
                eq[bus_index][EQ_BAND_BASS].init(FILTER_LOW_SHELF, 1);
@@ -178,9 +186,19 @@ AudioMixer::AudioMixer(unsigned num_cards)
        }
        set_limiter_enabled(global_flags.limiter_enabled);
        set_final_makeup_gain_auto(global_flags.final_makeup_gain_auto);
+
+       r128.init(2, OUTPUT_FREQUENCY);
+       r128.integr_start();
+
+       // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
+       // and there's a limit to how important the peak meter is.
+       peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16, /*frel=*/1.0);
+
+       global_audio_mixer = this;
        alsa_pool.init();
 
        if (!global_flags.input_mapping_filename.empty()) {
+               // Must happen after ALSAPool is initialized, as it needs to know the card list.
                current_mapping_mode = MappingMode::MULTICHANNEL;
                InputMapping new_input_mapping;
                if (!load_input_mapping_from_file(get_devices(),
@@ -198,12 +216,13 @@ AudioMixer::AudioMixer(unsigned num_cards)
                }
        }
 
-       r128.init(2, OUTPUT_FREQUENCY);
-       r128.integr_start();
-
-       // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
-       // and there's a limit to how important the peak meter is.
-       peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16, /*frel=*/1.0);
+       global_metrics.add("audio_loudness_short_lufs", &metric_audio_loudness_short_lufs, Metrics::TYPE_GAUGE);
+       global_metrics.add("audio_loudness_integrated_lufs", &metric_audio_loudness_integrated_lufs, Metrics::TYPE_GAUGE);
+       global_metrics.add("audio_loudness_range_low_lufs", &metric_audio_loudness_range_low_lufs, Metrics::TYPE_GAUGE);
+       global_metrics.add("audio_loudness_range_high_lufs", &metric_audio_loudness_range_high_lufs, Metrics::TYPE_GAUGE);
+       global_metrics.add("audio_peak_dbfs", &metric_audio_peak_dbfs, Metrics::TYPE_GAUGE);
+       global_metrics.add("audio_final_makeup_gain_db", &metric_audio_final_makeup_gain_db, Metrics::TYPE_GAUGE);
+       global_metrics.add("audio_correlation", &metric_audio_correlation, Metrics::TYPE_GAUGE);
 }
 
 void AudioMixer::reset_resampler(DeviceSpec device_spec)
@@ -219,14 +238,13 @@ void AudioMixer::reset_resampler_mutex_held(DeviceSpec device_spec)
        if (device->interesting_channels.empty()) {
                device->resampling_queue.reset();
        } else {
-               // TODO: ResamplingQueue should probably take the full device spec.
-               // (It's only used for console output, though.)
-               device->resampling_queue.reset(new ResamplingQueue(device_spec.index, device->capture_frequency, OUTPUT_FREQUENCY, device->interesting_channels.size()));
+               device->resampling_queue.reset(new ResamplingQueue(
+                       device_spec, device->capture_frequency, OUTPUT_FREQUENCY, device->interesting_channels.size(),
+                       global_flags.audio_queue_length_ms * 0.001));
        }
-       device->next_local_pts = 0;
 }
 
-bool AudioMixer::add_audio(DeviceSpec device_spec, const uint8_t *data, unsigned num_samples, AudioFormat audio_format, int64_t frame_length)
+bool AudioMixer::add_audio(DeviceSpec device_spec, const uint8_t *data, unsigned num_samples, AudioFormat audio_format, int64_t frame_length, steady_clock::time_point frame_time)
 {
        AudioDevice *device = find_audio_device(device_spec);
 
@@ -265,10 +283,14 @@ bool AudioMixer::add_audio(DeviceSpec device_spec, const uint8_t *data, unsigned
                }
        }
 
+       // If we changed frequency since last frame, we'll need to reset the resampler.
+       if (audio_format.sample_rate != device->capture_frequency) {
+               device->capture_frequency = audio_format.sample_rate;
+               reset_resampler_mutex_held(device_spec);
+       }
+
        // Now add it.
-       int64_t local_pts = device->next_local_pts;
-       device->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.get(), num_samples);
-       device->next_local_pts = local_pts + frame_length;
+       device->resampling_queue->add_input_samples(frame_time, audio.get(), num_samples, ResamplingQueue::ADJUST_RATE);
        return true;
 }
 
@@ -290,11 +312,7 @@ bool AudioMixer::add_silence(DeviceSpec device_spec, unsigned samples_per_frame,
 
        vector<float> silence(samples_per_frame * num_channels, 0.0f);
        for (unsigned i = 0; i < num_frames; ++i) {
-               device->resampling_queue->add_input_samples(device->next_local_pts / double(TIMEBASE), silence.data(), samples_per_frame);
-               // Note that if the format changed in the meantime, we have
-               // no way of detecting that; we just have to assume the frame length
-               // is always the same.
-               device->next_local_pts += frame_length;
+               device->resampling_queue->add_input_samples(steady_clock::now(), silence.data(), samples_per_frame, ResamplingQueue::DO_NOT_ADJUST_RATE);
        }
        return true;
 }
@@ -319,7 +337,9 @@ AudioMixer::BusSettings AudioMixer::get_default_bus_settings()
 {
        BusSettings settings;
        settings.fader_volume_db = 0.0f;
+       settings.muted = false;
        settings.locut_enabled = global_flags.locut_enabled;
+       settings.stereo_width = 1.0f;
        for (unsigned band_index = 0; band_index < NUM_EQ_BANDS; ++band_index) {
                settings.eq_level_db[band_index] = 0.0f;
        }
@@ -335,7 +355,9 @@ AudioMixer::BusSettings AudioMixer::get_bus_settings(unsigned bus_index) const
        lock_guard<timed_mutex> lock(audio_mutex);
        BusSettings settings;
        settings.fader_volume_db = fader_volume_db[bus_index];
+       settings.muted = mute[bus_index];
        settings.locut_enabled = locut_enabled[bus_index];
+       settings.stereo_width = stereo_width[bus_index];
        for (unsigned band_index = 0; band_index < NUM_EQ_BANDS; ++band_index) {
                settings.eq_level_db[band_index] = eq_level_db[bus_index][band_index];
        }
@@ -350,7 +372,9 @@ void AudioMixer::set_bus_settings(unsigned bus_index, const AudioMixer::BusSetti
 {
        lock_guard<timed_mutex> lock(audio_mutex);
        fader_volume_db[bus_index] = settings.fader_volume_db;
+       mute[bus_index] = settings.muted;
        locut_enabled[bus_index] = settings.locut_enabled;
+       stereo_width[bus_index] = settings.stereo_width;
        for (unsigned band_index = 0; band_index < NUM_EQ_BANDS; ++band_index) {
                eq_level_db[bus_index][band_index] = settings.eq_level_db[band_index];
        }
@@ -368,6 +392,8 @@ AudioMixer::AudioDevice *AudioMixer::find_audio_device(DeviceSpec device)
                return &video_cards[device.index];
        case InputSourceType::ALSA_INPUT:
                return &alsa_inputs[device.index];
+       case InputSourceType::FFMPEG_VIDEO_INPUT:
+               return &ffmpeg_inputs[device.index];
        case InputSourceType::SILENCE:
        default:
                assert(false);
@@ -400,23 +426,60 @@ void AudioMixer::find_sample_src_from_device(const map<DeviceSpec, vector<float>
 }
 
 // TODO: Can be SSSE3-optimized if need be.
-void AudioMixer::fill_audio_bus(const map<DeviceSpec, vector<float>> &samples_card, const InputMapping::Bus &bus, unsigned num_samples, float *output)
+void AudioMixer::fill_audio_bus(const map<DeviceSpec, vector<float>> &samples_card, const InputMapping::Bus &bus, unsigned num_samples, float stereo_width, float *output)
 {
        if (bus.device.type == InputSourceType::SILENCE) {
-               memset(output, 0, num_samples * sizeof(*output));
+               memset(output, 0, num_samples * 2 * sizeof(*output));
        } else {
                assert(bus.device.type == InputSourceType::CAPTURE_CARD ||
-                      bus.device.type == InputSourceType::ALSA_INPUT);
+                      bus.device.type == InputSourceType::ALSA_INPUT ||
+                      bus.device.type == InputSourceType::FFMPEG_VIDEO_INPUT);
                const float *lsrc, *rsrc;
                unsigned lstride, rstride;
                float *dptr = output;
                find_sample_src_from_device(samples_card, bus.device, bus.source_channel[0], &lsrc, &lstride);
                find_sample_src_from_device(samples_card, bus.device, bus.source_channel[1], &rsrc, &rstride);
-               for (unsigned i = 0; i < num_samples; ++i) {
-                       *dptr++ = *lsrc;
-                       *dptr++ = *rsrc;
-                       lsrc += lstride;
-                       rsrc += rstride;
+
+               // Apply stereo width settings. Set stereo width w to a 0..1 range instead of
+               // -1..1, since it makes for much easier calculations (so 0.5 = completely mono).
+               // Then, what we want is
+               //
+               //   L' = wL + (1-w)R = R + w(L-R)
+               //   R' = wR + (1-w)L = L + w(R-L)
+               //
+               // This can be further simplified calculation-wise by defining the weighted
+               // difference signal D = w(R-L), so that:
+               //
+               //   L' = R - D
+               //   R' = L + D
+               float w = 0.5f * stereo_width + 0.5f;
+               if (bus.source_channel[0] == bus.source_channel[1]) {
+                       // Mono anyway, so no need to bother.
+                       w = 1.0f;
+               } else if (fabs(w) < 1e-3) {
+                       // Perfect inverse.
+                       swap(lsrc, rsrc);
+                       swap(lstride, rstride);
+                       w = 1.0f;
+               }
+               if (fabs(w - 1.0f) < 1e-3) {
+                       // No calculations needed for stereo_width = 1.
+                       for (unsigned i = 0; i < num_samples; ++i) {
+                               *dptr++ = *lsrc;
+                               *dptr++ = *rsrc;
+                               lsrc += lstride;
+                               rsrc += rstride;
+                       }
+               } else {
+                       // General case.
+                       for (unsigned i = 0; i < num_samples; ++i) {
+                               float left = *lsrc, right = *rsrc;
+                               float diff = w * (right - left);
+                               *dptr++ = right - diff;
+                               *dptr++ = left + diff;
+                               lsrc += lstride;
+                               rsrc += rstride;
+                       }
                }
        }
 }
@@ -436,6 +499,12 @@ vector<DeviceSpec> AudioMixer::get_active_devices() const
                        ret.push_back(device_spec);
                }
        }
+       for (unsigned card_index = 0; card_index < num_ffmpeg_inputs; ++card_index) {
+               const DeviceSpec device_spec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index};
+               if (!find_audio_device(device_spec)->interesting_channels.empty()) {
+                       ret.push_back(device_spec);
+               }
+       }
        return ret;
 }
 
@@ -464,7 +533,7 @@ void apply_gain(float db, float last_db, vector<float> *samples)
 
 }  // namespace
 
-vector<float> AudioMixer::get_output(double pts, unsigned num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
+vector<float> AudioMixer::get_output(steady_clock::time_point ts, unsigned num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
 {
        map<DeviceSpec, vector<float>> samples_card;
        vector<float> samples_bus;
@@ -479,7 +548,7 @@ vector<float> AudioMixer::get_output(double pts, unsigned num_samples, Resamplin
                        memset(&samples_card[device_spec][0], 0, samples_card[device_spec].size() * sizeof(float));
                } else {
                        device->resampling_queue->get_output_samples(
-                               pts,
+                               ts,
                                &samples_card[device_spec][0],
                                num_samples,
                                rate_adjustment_policy);
@@ -490,7 +559,7 @@ vector<float> AudioMixer::get_output(double pts, unsigned num_samples, Resamplin
        samples_out.resize(num_samples * 2);
        samples_bus.resize(num_samples * 2);
        for (unsigned bus_index = 0; bus_index < input_mapping.buses.size(); ++bus_index) {
-               fill_audio_bus(samples_card, input_mapping.buses[bus_index], num_samples, &samples_bus[0]);
+               fill_audio_bus(samples_card, input_mapping.buses[bus_index], num_samples, stereo_width[bus_index], &samples_bus[0]);
                apply_eq(bus_index, &samples_bus);
 
                {
@@ -574,13 +643,12 @@ vector<float> AudioMixer::get_output(double pts, unsigned num_samples, Resamplin
        // (half-time of 30 seconds).
        double target_loudness_factor, alpha;
        double loudness_lu = r128.loudness_M() - ref_level_lufs;
-       double current_makeup_lu = to_db(final_makeup_gain);
        target_loudness_factor = final_makeup_gain * from_db(-loudness_lu);
 
-       // If we're outside +/- 5 LU uncorrected, we don't count it as
+       // If we're outside +/- 5 LU (after correction), we don't count it as
        // a normal signal (probably silence) and don't change the
        // correction factor; just apply what we already have.
-       if (fabs(loudness_lu - current_makeup_lu) >= 5.0 || !final_makeup_gain_auto) {
+       if (fabs(loudness_lu) >= 5.0 || !final_makeup_gain_auto) {
                alpha = 0.0;
        } else {
                // Formula adapted from
@@ -683,13 +751,14 @@ void AudioMixer::add_bus_to_master(unsigned bus_index, const vector<float> &samp
        assert(samples_bus.size() == samples_out->size());
        assert(samples_bus.size() % 2 == 0);
        unsigned num_samples = samples_bus.size() / 2;
-       if (fabs(fader_volume_db[bus_index] - last_fader_volume_db[bus_index]) > 1e-3) {
+       const float new_volume_db = mute[bus_index] ? -90.0f : fader_volume_db[bus_index].load();
+       if (fabs(new_volume_db - last_fader_volume_db[bus_index]) > 1e-3) {
                // The volume has changed; do a fade over the course of this frame.
                // (We might have some numerical issues here, but it seems to sound OK.)
                // For the purpose of fading here, the silence floor is set to -90 dB
                // (the fader only goes to -84).
                float old_volume = from_db(max<float>(last_fader_volume_db[bus_index], -90.0f));
-               float volume = from_db(max<float>(fader_volume_db[bus_index], -90.0f));
+               float volume = from_db(max<float>(new_volume_db, -90.0f));
 
                float volume_inc = pow(volume / old_volume, 1.0 / num_samples);
                volume = old_volume;
@@ -706,8 +775,8 @@ void AudioMixer::add_bus_to_master(unsigned bus_index, const vector<float> &samp
                                volume *= volume_inc;
                        }
                }
-       } else {
-               float volume = from_db(fader_volume_db[bus_index]);
+       } else if (new_volume_db > -90.0f) {
+               float volume = from_db(new_volume_db);
                if (bus_index == 0) {
                        for (unsigned i = 0; i < num_samples; ++i) {
                                (*samples_out)[i * 2 + 0] = samples_bus[i * 2 + 0] * volume;
@@ -721,13 +790,13 @@ void AudioMixer::add_bus_to_master(unsigned bus_index, const vector<float> &samp
                }
        }
 
-       last_fader_volume_db[bus_index] = fader_volume_db[bus_index];
+       last_fader_volume_db[bus_index] = new_volume_db;
 }
 
 void AudioMixer::measure_bus_levels(unsigned bus_index, const vector<float> &left, const vector<float> &right)
 {
        assert(left.size() == right.size());
-       const float volume = from_db(fader_volume_db[bus_index]);
+       const float volume = mute[bus_index] ? 0.0f : from_db(fader_volume_db[bus_index]);
        const float peak_levels[2] = {
                find_peak(left.data(), left.size()) * volume,
                find_peak(right.data(), right.size()) * volume
@@ -815,23 +884,35 @@ void AudioMixer::send_audio_level_callback()
        double loudness_range_low = r128.range_min();
        double loudness_range_high = r128.range_max();
 
+       metric_audio_loudness_short_lufs = loudness_s;
+       metric_audio_loudness_integrated_lufs = loudness_i;
+       metric_audio_loudness_range_low_lufs = loudness_range_low;
+       metric_audio_loudness_range_high_lufs = loudness_range_high;
+       metric_audio_peak_dbfs = to_db(peak);
+       metric_audio_final_makeup_gain_db = to_db(final_makeup_gain);
+       metric_audio_correlation = correlation.get_correlation();
+
        vector<BusLevel> bus_levels;
        bus_levels.resize(input_mapping.buses.size());
        {
                lock_guard<mutex> lock(compressor_mutex);
                for (unsigned bus_index = 0; bus_index < bus_levels.size(); ++bus_index) {
-                       bus_levels[bus_index].current_level_dbfs[0] = to_db(peak_history[bus_index][0].current_level);
-                       bus_levels[bus_index].current_level_dbfs[1] = to_db(peak_history[bus_index][1].current_level);
-                       bus_levels[bus_index].peak_level_dbfs[0] = to_db(peak_history[bus_index][0].current_peak);
-                       bus_levels[bus_index].peak_level_dbfs[1] = to_db(peak_history[bus_index][1].current_peak);
-                       bus_levels[bus_index].historic_peak_dbfs = to_db(
+                       BusLevel &levels = bus_levels[bus_index];
+                       BusMetrics &metrics = bus_metrics[bus_index];
+
+                       levels.current_level_dbfs[0] = metrics.current_level_dbfs[0] = to_db(peak_history[bus_index][0].current_level);
+                       levels.current_level_dbfs[1] = metrics.current_level_dbfs[1] = to_db(peak_history[bus_index][1].current_level);
+                       levels.peak_level_dbfs[0] = metrics.peak_level_dbfs[0] = to_db(peak_history[bus_index][0].current_peak);
+                       levels.peak_level_dbfs[1] = metrics.peak_level_dbfs[1] = to_db(peak_history[bus_index][1].current_peak);
+                       levels.historic_peak_dbfs = metrics.historic_peak_dbfs = to_db(
                                max(peak_history[bus_index][0].historic_peak,
                                    peak_history[bus_index][1].historic_peak));
-                       bus_levels[bus_index].gain_staging_db = gain_staging_db[bus_index];
+                       levels.gain_staging_db = metrics.gain_staging_db = gain_staging_db[bus_index];
                        if (compressor_enabled[bus_index]) {
-                               bus_levels[bus_index].compressor_attenuation_db = -to_db(compressor[bus_index]->get_attenuation());
+                               levels.compressor_attenuation_db = metrics.compressor_attenuation_db = -to_db(compressor[bus_index]->get_attenuation());
                        } else {
-                               bus_levels[bus_index].compressor_attenuation_db = 0.0;
+                               levels.compressor_attenuation_db = 0.0;
+                               metrics.compressor_attenuation_db = 0.0 / 0.0;
                        }
                }
        }
@@ -847,7 +928,7 @@ map<DeviceSpec, DeviceInfo> AudioMixer::get_devices()
        lock_guard<timed_mutex> lock(audio_mutex);
 
        map<DeviceSpec, DeviceInfo> devices;
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_capture_cards; ++card_index) {
                const DeviceSpec spec{ InputSourceType::CAPTURE_CARD, card_index };
                const AudioDevice *device = &video_cards[card_index];
                DeviceInfo info;
@@ -867,6 +948,14 @@ map<DeviceSpec, DeviceInfo> AudioMixer::get_devices()
                info.alsa_address = device.address;
                devices.insert(make_pair(spec, info));
        }
+       for (unsigned card_index = 0; card_index < num_ffmpeg_inputs; ++card_index) {
+               const DeviceSpec spec{ InputSourceType::FFMPEG_VIDEO_INPUT, card_index };
+               const AudioDevice *device = &ffmpeg_inputs[card_index];
+               DeviceInfo info;
+               info.display_name = device->display_name;
+               info.num_channels = 2;
+               devices.insert(make_pair(spec, info));
+       }
        return devices;
 }
 
@@ -893,16 +982,25 @@ void AudioMixer::serialize_device(DeviceSpec device_spec, DeviceSpecProto *devic
                case InputSourceType::ALSA_INPUT:
                        alsa_pool.serialize_device(device_spec.index, device_spec_proto);
                        break;
+               case InputSourceType::FFMPEG_VIDEO_INPUT:
+                       device_spec_proto->set_type(DeviceSpecProto::FFMPEG_VIDEO_INPUT);
+                       device_spec_proto->set_index(device_spec.index);
+                       device_spec_proto->set_display_name(ffmpeg_inputs[device_spec.index].display_name);
+                       break;
        }
 }
 
 void AudioMixer::set_simple_input(unsigned card_index)
 {
+       assert(card_index < num_capture_cards + num_ffmpeg_inputs);
        InputMapping new_input_mapping;
        InputMapping::Bus input;
        input.name = "Main";
-       input.device.type = InputSourceType::CAPTURE_CARD;
-       input.device.index = card_index;
+       if (card_index >= num_capture_cards) {
+               input.device = DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_capture_cards};
+       } else {
+               input.device = DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
+       }
        input.source_channel[0] = 0;
        input.source_channel[1] = 1;
 
@@ -922,6 +1020,11 @@ unsigned AudioMixer::get_simple_input() const
            input_mapping.buses[0].source_channel[0] == 0 &&
            input_mapping.buses[0].source_channel[1] == 1) {
                return input_mapping.buses[0].device.index;
+       } else if (input_mapping.buses.size() == 1 &&
+                  input_mapping.buses[0].device.type == InputSourceType::FFMPEG_VIDEO_INPUT &&
+                  input_mapping.buses[0].source_channel[0] == 0 &&
+                  input_mapping.buses[0].source_channel[1] == 1) {
+               return input_mapping.buses[0].device.index + num_capture_cards;
        } else {
                return numeric_limits<unsigned>::max();
        }
@@ -945,13 +1048,74 @@ void AudioMixer::set_input_mapping_lock_held(const InputMapping &new_input_mappi
        map<DeviceSpec, set<unsigned>> interesting_channels;
        for (const InputMapping::Bus &bus : new_input_mapping.buses) {
                if (bus.device.type == InputSourceType::CAPTURE_CARD ||
-                   bus.device.type == InputSourceType::ALSA_INPUT) {
+                   bus.device.type == InputSourceType::ALSA_INPUT ||
+                   bus.device.type == InputSourceType::FFMPEG_VIDEO_INPUT) {
                        for (unsigned channel = 0; channel < 2; ++channel) {
                                if (bus.source_channel[channel] != -1) {
                                        interesting_channels[bus.device].insert(bus.source_channel[channel]);
                                }
                        }
+               } else {
+                       assert(bus.device.type == InputSourceType::SILENCE);
+               }
+       }
+
+       // Kill all the old metrics, and set up new ones.
+       for (unsigned bus_index = 0; bus_index < input_mapping.buses.size(); ++bus_index) {
+               BusMetrics &metrics = bus_metrics[bus_index];
+
+               vector<pair<string, string>> labels_left = metrics.labels;
+               labels_left.emplace_back("channel", "left");
+               vector<pair<string, string>> labels_right = metrics.labels;
+               labels_right.emplace_back("channel", "right");
+
+               global_metrics.remove("bus_current_level_dbfs", labels_left);
+               global_metrics.remove("bus_current_level_dbfs", labels_right);
+               global_metrics.remove("bus_peak_level_dbfs", labels_left);
+               global_metrics.remove("bus_peak_level_dbfs", labels_right);
+               global_metrics.remove("bus_historic_peak_dbfs", metrics.labels);
+               global_metrics.remove("bus_gain_staging_db", metrics.labels);
+               global_metrics.remove("bus_compressor_attenuation_db", metrics.labels);
+       }
+       bus_metrics.reset(new BusMetrics[new_input_mapping.buses.size()]);
+       for (unsigned bus_index = 0; bus_index < new_input_mapping.buses.size(); ++bus_index) {
+               const InputMapping::Bus &bus = new_input_mapping.buses[bus_index];
+               BusMetrics &metrics = bus_metrics[bus_index];
+
+               char bus_index_str[16], source_index_str[16], source_channels_str[64];
+               snprintf(bus_index_str, sizeof(bus_index_str), "%u", bus_index);
+               snprintf(source_index_str, sizeof(source_index_str), "%u", bus.device.index);
+               snprintf(source_channels_str, sizeof(source_channels_str), "%d:%d", bus.source_channel[0], bus.source_channel[1]);
+
+               vector<pair<string, string>> labels;
+               metrics.labels.emplace_back("index", bus_index_str);
+               metrics.labels.emplace_back("name", bus.name);
+               if (bus.device.type == InputSourceType::SILENCE) {
+                       metrics.labels.emplace_back("source_type", "silence");
+               } else if (bus.device.type == InputSourceType::CAPTURE_CARD) {
+                       metrics.labels.emplace_back("source_type", "capture_card");
+               } else if (bus.device.type == InputSourceType::ALSA_INPUT) {
+                       metrics.labels.emplace_back("source_type", "alsa_input");
+               } else if (bus.device.type == InputSourceType::FFMPEG_VIDEO_INPUT) {
+                       metrics.labels.emplace_back("source_type", "ffmpeg_video_input");
+               } else {
+                       assert(false);
                }
+               metrics.labels.emplace_back("source_index", source_index_str);
+               metrics.labels.emplace_back("source_channels", source_channels_str);
+
+               vector<pair<string, string>> labels_left = metrics.labels;
+               labels_left.emplace_back("channel", "left");
+               vector<pair<string, string>> labels_right = metrics.labels;
+               labels_right.emplace_back("channel", "right");
+
+               global_metrics.add("bus_current_level_dbfs", labels_left, &metrics.current_level_dbfs[0], Metrics::TYPE_GAUGE);
+               global_metrics.add("bus_current_level_dbfs", labels_right, &metrics.current_level_dbfs[1], Metrics::TYPE_GAUGE);
+               global_metrics.add("bus_peak_level_dbfs", labels_left, &metrics.peak_level_dbfs[0], Metrics::TYPE_GAUGE);
+               global_metrics.add("bus_peak_level_dbfs", labels_right, &metrics.peak_level_dbfs[1], Metrics::TYPE_GAUGE);
+               global_metrics.add("bus_historic_peak_dbfs", metrics.labels, &metrics.historic_peak_dbfs, Metrics::TYPE_GAUGE);
+               global_metrics.add("bus_gain_staging_db", metrics.labels, &metrics.gain_staging_db, Metrics::TYPE_GAUGE);
+               global_metrics.add("bus_compressor_attenuation_db", metrics.labels, &metrics.compressor_attenuation_db, Metrics::TYPE_GAUGE);
        }
 
        // Reset resamplers for all cards that don't have the exact same state as before.
@@ -977,6 +1141,14 @@ void AudioMixer::set_input_mapping_lock_held(const InputMapping &new_input_mappi
                        reset_resampler_mutex_held(device_spec);
                }
        }
+       for (unsigned card_index = 0; card_index < num_ffmpeg_inputs; ++card_index) {
+               const DeviceSpec device_spec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index};
+               AudioDevice *device = find_audio_device(device_spec);
+               if (device->interesting_channels != interesting_channels[device_spec]) {
+                       device->interesting_channels = interesting_channels[device_spec];
+                       reset_resampler_mutex_held(device_spec);
+               }
+       }
 
        input_mapping = new_input_mapping;
 }
@@ -1006,4 +1178,18 @@ void AudioMixer::reset_peak(unsigned bus_index)
        }
 }
 
+bool AudioMixer::is_mono(unsigned bus_index)
+{
+       lock_guard<timed_mutex> lock(audio_mutex);
+       const InputMapping::Bus &bus = input_mapping.buses[bus_index];
+       if (bus.device.type == InputSourceType::SILENCE) {
+               return true;
+       } else {
+               assert(bus.device.type == InputSourceType::CAPTURE_CARD ||
+                      bus.device.type == InputSourceType::ALSA_INPUT ||
+                      bus.device.type == InputSourceType::FFMPEG_VIDEO_INPUT);
+               return bus.source_channel[0] == bus.source_channel[1];
+       }
+}
+
 AudioMixer *global_audio_mixer = nullptr;