#include "mixer.h"
#include <assert.h>
+#include <endian.h>
#include <epoxy/egl.h>
#include <movit/effect_chain.h>
#include <movit/effect_util.h>
#include <sys/time.h>
#include <time.h>
#include <algorithm>
+#include <chrono>
#include <cmath>
#include <condition_variable>
#include <cstddef>
#include <sys/resource.h>
#include "bmusb/bmusb.h"
+#include "bmusb/fake_capture.h"
#include "context.h"
#include "decklink_capture.h"
#include "defs.h"
-#include "fake_capture.h"
+#include "disk_space_estimator.h"
#include "flags.h"
#include "video_encoder.h"
#include "pbo_frame_allocator.h"
using namespace movit;
using namespace std;
+using namespace std::chrono;
using namespace std::placeholders;
+using namespace bmusb;
Mixer *global_mixer = nullptr;
bool uses_mlock = false;
uint32_t s2 = *src++;
uint32_t s3 = *src++;
uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
- dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f);
+ dst[i * out_channels + j] = int(s) * (1.0f / 2147483648.0f);
}
src += 3 * (in_channels - out_channels);
}
assert(in_channels >= out_channels);
for (size_t i = 0; i < num_samples; ++i) {
for (size_t j = 0; j < out_channels; ++j) {
- // Note: Assumes little-endian.
- int32_t s = *(int32_t *)src;
- dst[i * out_channels + j] = s * (1.0f / 4294967296.0f);
+ int32_t s = le32toh(*(int32_t *)src);
+ dst[i * out_channels + j] = s * (1.0f / 2147483648.0f);
src += 4;
}
src += 4 * (in_channels - out_channels);
movit_texel_subpixel_precision /= 2.0;
resource_pool.reset(new ResourcePool);
- theme.reset(new Theme(global_flags.theme_filename.c_str(), resource_pool.get(), num_cards));
+ theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
output_channel[i].parent = this;
output_channel[i].channel = i;
display_chain->set_dither_bits(0); // Don't bother.
display_chain->finalize();
- video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
+ video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd, global_disk_space_estimator));
// Start listening for clients only once VideoEncoder has written its header, if any.
httpd.start(9095);
- // First try initializing the fake devices, then PCI devices, then USB,
- // until we have the desired number of cards.
- unsigned num_pci_devices = 0, num_usb_devices = 0;
+ // First try initializing the then PCI devices, then USB, then
+ // fill up with fake cards until we have the desired number of cards.
+ unsigned num_pci_devices = 0;
unsigned card_index = 0;
- assert(global_flags.num_fake_cards >= 0); // Enforced in flags.cpp.
- unsigned num_fake_cards = global_flags.num_fake_cards;
-
- assert(num_fake_cards <= num_cards); // Enforced in flags.cpp.
- for ( ; card_index < num_fake_cards; ++card_index) {
- configure_card(card_index, new FakeCapture(card_index), /*is_fake_capture=*/true);
- }
-
- if (global_flags.num_fake_cards > 0) {
- fprintf(stderr, "Initialized %d fake cards.\n", global_flags.num_fake_cards);
- }
-
- if (card_index < num_cards) {
+ {
IDeckLinkIterator *decklink_iterator = CreateDeckLinkIteratorInstance();
if (decklink_iterator != nullptr) {
for ( ; card_index < num_cards; ++card_index) {
break;
}
- configure_card(card_index, new DeckLinkCapture(decklink, card_index - num_fake_cards), /*is_fake_capture=*/false);
+ configure_card(card_index, new DeckLinkCapture(decklink, card_index), /*is_fake_capture=*/false);
++num_pci_devices;
}
decklink_iterator->Release();
- fprintf(stderr, "Found %d DeckLink PCI card(s).\n", num_pci_devices);
+ fprintf(stderr, "Found %u DeckLink PCI card(s).\n", num_pci_devices);
} else {
fprintf(stderr, "DeckLink drivers not found. Probing for USB cards only.\n");
}
}
- for ( ; card_index < num_cards; ++card_index) {
- BMUSBCapture *capture = new BMUSBCapture(card_index - num_pci_devices - num_fake_cards);
+ unsigned num_usb_devices = BMUSBCapture::num_cards();
+ for (unsigned usb_card_index = 0; usb_card_index < num_usb_devices && card_index < num_cards; ++usb_card_index, ++card_index) {
+ BMUSBCapture *capture = new BMUSBCapture(usb_card_index);
capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, card_index));
configure_card(card_index, capture, /*is_fake_capture=*/false);
- ++num_usb_devices;
}
+ fprintf(stderr, "Found %u USB card(s).\n", num_usb_devices);
- if (num_usb_devices > 0) {
- has_bmusb_thread = true;
- BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
- BMUSBCapture::start_bm_thread();
+ unsigned num_fake_cards = 0;
+ for ( ; card_index < num_cards; ++card_index, ++num_fake_cards) {
+ FakeCapture *capture = new FakeCapture(WIDTH, HEIGHT, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
+ configure_card(card_index, capture, /*is_fake_capture=*/true);
}
+ if (num_fake_cards > 0) {
+ fprintf(stderr, "Initialized %u fake cards.\n", num_fake_cards);
+ }
+
+ BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
+ BMUSBCapture::start_bm_thread();
+
for (card_index = 0; card_index < num_cards; ++card_index) {
cards[card_index].queue_length_policy.reset(card_index);
cards[card_index].capture->start_bm_capture();
locut.init(FILTER_HPF, 2);
- // If --flat-audio is given, turn off everything that messes with the sound,
- // except the final makeup gain.
- if (global_flags.flat_audio) {
- set_locut_enabled(false);
- set_gain_staging_auto(false);
- set_limiter_enabled(false);
- set_compressor_enabled(false);
- }
+ set_locut_enabled(global_flags.locut_enabled);
+ set_gain_staging_db(global_flags.initial_gain_staging_db);
+ set_gain_staging_auto(global_flags.gain_staging_auto);
+ set_compressor_enabled(global_flags.compressor_enabled);
+ set_limiter_enabled(global_flags.limiter_enabled);
+ set_final_makeup_gain_auto(global_flags.final_makeup_gain_auto);
// hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
// and there's a limit to how important the peak meter is.
{
resource_pool->release_glsl_program(cbcr_program_num);
glDeleteBuffers(1, &cbcr_vbo);
- if (has_bmusb_thread) {
- BMUSBCapture::stop_bm_thread();
- }
+ BMUSBCapture::stop_bm_thread();
for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
{
is_mode_scanning[card_index] = false;
} else {
static constexpr double switch_time_s = 0.5; // Should be enough time for the signal to stabilize.
- timespec now;
- clock_gettime(CLOCK_MONOTONIC, &now);
- double sec_since_last_switch = (now.tv_sec - last_mode_scan_change[card_index].tv_sec) +
- 1e-9 * (now.tv_nsec - last_mode_scan_change[card_index].tv_nsec);
+ steady_clock::time_point now = steady_clock::now();
+ double sec_since_last_switch = duration<double>(steady_clock::now() - last_mode_scan_change[card_index]).count();
if (sec_since_last_switch > switch_time_s) {
// It isn't this mode; try the next one.
mode_scanlist_index[card_index]++;
PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
unsigned num_fields = video_format.interlaced ? 2 : 1;
- timespec frame_upload_start;
+ steady_clock::time_point frame_upload_start;
if (video_format.interlaced) {
// Send the two fields along as separate frames; the other side will need to add
// a deinterlacer to actually get this right.
assert(frame_length % 2 == 0);
frame_length /= 2;
num_fields = 2;
- clock_gettime(CLOCK_MONOTONIC, &frame_upload_start);
+ frame_upload_start = steady_clock::now();
}
userdata->last_interlaced = video_format.interlaced;
userdata->last_has_signal = video_format.has_signal;
// against the video display, although the latter is not as critical.)
// This requires our system clock to be reasonably close to the
// video clock, but that's not an unreasonable assumption.
- timespec second_field_start;
- second_field_start.tv_nsec = frame_upload_start.tv_nsec +
- frame_length * 1000000000 / TIMEBASE;
- second_field_start.tv_sec = frame_upload_start.tv_sec +
- second_field_start.tv_nsec / 1000000000;
- second_field_start.tv_nsec %= 1000000000;
-
- while (clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
- &second_field_start, nullptr) == -1 &&
- errno == EINTR) ;
+ steady_clock::time_point second_field_start = frame_upload_start +
+ nanoseconds(frame_length * 1000000000 / TIMEBASE);
+ this_thread::sleep_until(second_field_start);
}
{
exit(1);
}
- struct timespec start, now;
- clock_gettime(CLOCK_MONOTONIC, &start);
+ steady_clock::time_point start, now;
+ start = steady_clock::now();
int frame = 0;
int stats_dropped_frames = 0;
}
}
- int64_t duration = new_frames[master_card_index].length;
- render_one_frame(duration);
+ int64_t frame_duration = new_frames[master_card_index].length;
+ render_one_frame(frame_duration);
++frame;
- pts_int += duration;
+ pts_int += frame_duration;
- clock_gettime(CLOCK_MONOTONIC, &now);
- double elapsed = now.tv_sec - start.tv_sec +
- 1e-9 * (now.tv_nsec - start.tv_nsec);
+ now = steady_clock::now();
+ double elapsed = duration<double>(now - start).count();
if (frame % 100 == 0) {
printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)",
frame, stats_dropped_frames, elapsed, frame / elapsed,
CaptureCard *card = &cards[card_index];
if (card->capture->get_disconnected()) {
fprintf(stderr, "Card %u went away, replacing with a fake card.\n", card_index);
- configure_card(card_index, new FakeCapture(card_index), /*is_fake_capture=*/true);
+ FakeCapture *capture = new FakeCapture(WIDTH, HEIGHT, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
+ configure_card(card_index, capture, /*is_fake_capture=*/true);
card->queue_length_policy.reset(card_index);
card->capture->start_bm_capture();
}
// Resample the audio as needed, including from previously dropped frames.
assert(num_cards > 0);
for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
+ const bool dropped_frame = (frame_num != dropped_frames);
{
// Signal to the audio thread to process this frame.
+ // Note that if the frame is a dropped frame, we signal that
+ // we don't want to use this frame as base for adjusting
+ // the resampler rate. The reason for this is that the timing
+ // of these frames is often way too late; they typically don't
+ // “arrive” before we synthesize them. Thus, we could end up
+ // in a situation where we have inserted e.g. five audio frames
+ // into the queue before we then start pulling five of them
+ // back out. This makes ResamplingQueue overestimate the delay,
+ // causing undue resampler changes. (We _do_ use the last,
+ // non-dropped frame; perhaps we should just discard that as well,
+ // since dropped frames are expected to be rare, and it might be
+ // better to just wait until we have a slightly more normal situation).
unique_lock<mutex> lock(audio_mutex);
- audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame});
+ bool adjust_rate = !dropped_frame;
+ audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate});
audio_task_queue_changed.notify_one();
}
- if (frame_num != dropped_frames) {
+ if (dropped_frame) {
// For dropped frames, increase the pts. Note that if the format changed
// in the meantime, we have no way of detecting that; we just have to
// assume the frame length is always the same.
audio_task_queue.pop();
}
- process_audio_one_frame(task.pts_int, task.num_samples);
+ process_audio_one_frame(task.pts_int, task.num_samples, task.adjust_rate);
}
}
-void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
+void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples, bool adjust_rate)
{
vector<float> samples_card;
vector<float> samples_out;
samples_card.resize(num_samples * 2);
{
unique_lock<mutex> lock(cards[card_index].audio_mutex);
- cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples);
+ ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
+ adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
+ cards[card_index].resampling_queue->get_output_samples(
+ double(frame_pts_int) / TIMEBASE,
+ &samples_card[0],
+ num_samples,
+ rate_adjustment_policy);
}
if (card_index == selected_audio_card) {
samples_out = move(samples_card);
// printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
- // Upsample 4x to find interpolated peak.
- peak_resampler.inp_data = samples_out.data();
- peak_resampler.inp_count = samples_out.size() / 2;
-
- vector<float> interpolated_samples_out;
- interpolated_samples_out.resize(samples_out.size());
- while (peak_resampler.inp_count > 0) { // About four iterations.
- peak_resampler.out_data = &interpolated_samples_out[0];
- peak_resampler.out_count = interpolated_samples_out.size() / 2;
- peak_resampler.process();
- size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
- peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
- peak_resampler.out_data = nullptr;
- }
-
// At this point, we are most likely close to +0 LU, but all of our
// measurements have been on raw sample values, not R128 values.
// So we have a final makeup gain to get us to +0 LU; the gain
final_makeup_gain = m;
}
+ // Upsample 4x to find interpolated peak.
+ peak_resampler.inp_data = samples_out.data();
+ peak_resampler.inp_count = samples_out.size() / 2;
+
+ vector<float> interpolated_samples_out;
+ interpolated_samples_out.resize(samples_out.size());
+ while (peak_resampler.inp_count > 0) { // About four iterations.
+ peak_resampler.out_data = &interpolated_samples_out[0];
+ peak_resampler.out_count = interpolated_samples_out.size() / 2;
+ peak_resampler.process();
+ size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
+ peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
+ peak_resampler.out_data = nullptr;
+ }
+
// Find R128 levels and L/R correlation.
vector<float> left, right;
deinterleave_samples(samples_out, &left, &right);
assert(!mode_scanlist[card_index].empty());
mode_scanlist_index[card_index] = 0;
cards[card_index].capture->set_video_mode(mode_scanlist[card_index][0]);
- clock_gettime(CLOCK_MONOTONIC, &last_mode_scan_change[card_index]);
+ last_mode_scan_change[card_index] = steady_clock::now();
}
Mixer::OutputChannel::~OutputChannel()