#include "mixer.h"
#include <assert.h>
-#include <endian.h>
#include <epoxy/egl.h>
#include <movit/effect_chain.h>
#include <movit/effect_util.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
-#include <sys/time.h>
-#include <time.h>
#include <algorithm>
#include <chrono>
-#include <cmath>
#include <condition_variable>
#include <cstddef>
+#include <cstdint>
#include <memory>
#include <mutex>
+#include <ratio>
#include <string>
#include <thread>
#include <utility>
#include <vector>
+#include "DeckLinkAPI.h"
+#include "LinuxCOM.h"
+#include "alsa_output.h"
#include "bmusb/bmusb.h"
#include "bmusb/fake_capture.h"
#include "context.h"
-#include "db.h"
#include "decklink_capture.h"
#include "defs.h"
#include "disk_space_estimator.h"
#include "flags.h"
+#include "input_mapping.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_gl_sync.h"
+#include "resampling_queue.h"
#include "timebase.h"
#include "video_encoder.h"
+class IDeckLink;
class QOpenGLContext;
using namespace movit;
num_cards(num_cards),
mixer_surface(create_surface(format)),
h264_encoder_surface(create_surface(format)),
- audio_mixer(num_cards),
- correlation(OUTPUT_FREQUENCY)
+ audio_mixer(num_cards)
{
CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
check_error();
inout_format.gamma_curve = GAMMA_sRGB;
// Display chain; shows the live output produced by the main chain (its RGBA version).
- display_chain.reset(new EffectChain(WIDTH, HEIGHT, resource_pool.get()));
+ display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
check_error();
- display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, WIDTH, HEIGHT); // FIXME: GL_UNSIGNED_BYTE is really wrong.
+ display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, global_flags.width, global_flags.height); // FIXME: GL_UNSIGNED_BYTE is really wrong.
display_chain->add_input(display_input);
display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
display_chain->set_dither_bits(0); // Don't bother.
display_chain->finalize();
- video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd, global_disk_space_estimator));
+ video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, global_flags.width, global_flags.height, &httpd, global_disk_space_estimator));
// Start listening for clients only once VideoEncoder has written its header, if any.
httpd.start(9095);
unsigned num_fake_cards = 0;
for ( ; card_index < num_cards; ++card_index, ++num_fake_cards) {
- FakeCapture *capture = new FakeCapture(WIDTH, HEIGHT, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
+ FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
configure_card(card_index, capture, /*is_fake_capture=*/true);
}
BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
BMUSBCapture::start_bm_thread();
- for (card_index = 0; card_index < num_cards; ++card_index) {
+ for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
cards[card_index].queue_length_policy.reset(card_index);
- cards[card_index].capture->start_bm_capture();
}
// Set up stuff for NV12 conversion.
+ //
+ // Note: Due to the horizontally co-sited chroma/luma samples in H.264
+ // (chrome position is left for horizontal and center for vertical),
+ // we need to be a bit careful in our subsampling. A diagram will make
+ // this clearer, showing some luma and chroma samples:
+ //
+ // a b c d
+ // +---+---+---+---+
+ // | | | | |
+ // | Y | Y | Y | Y |
+ // | | | | |
+ // +---+---+---+---+
+ //
+ // +-------+-------+
+ // | | |
+ // | C | C |
+ // | | |
+ // +-------+-------+
+ //
+ // Clearly, the rightmost chroma sample here needs to be equivalent to
+ // b/4 + c/2 + d/4. (We could also implement more sophisticated filters,
+ // of course, but as long as the upsampling is not going to be equally
+ // sophisticated, it's probably not worth it.) If we sample once with
+ // no mipmapping, we get just c, ie., no actual filtering in the
+ // horizontal direction. (For the vertical direction, we can just
+ // sample in the middle to get the right filtering.) One could imagine
+ // we could use mipmapping (assuming we can create mipmaps cheaply),
+ // but then, what we'd get is this:
+ //
+ // (a+b)/2 (c+d)/2
+ // +-------+-------+
+ // | | |
+ // | Y | Y |
+ // | | |
+ // +-------+-------+
+ //
+ // +-------+-------+
+ // | | |
+ // | C | C |
+ // | | |
+ // +-------+-------+
+ //
+ // which ends up sampling equally from a and b, which clearly isn't right. Instead,
+ // we need to do two (non-mipmapped) chroma samples, both hitting exactly in-between
+ // source pixels.
+ //
+ // Sampling in-between b and c gives us the sample (b+c)/2, and similarly for c and d.
+ // Taking the average of these gives of (b+c)/4 + (c+d)/4 = b/4 + c/2 + d/4, which is
+ // exactly what we want.
+ //
+ // See also http://www.poynton.com/PDFs/Merging_RGB_and_422.pdf, pages 6–7.
// Cb/Cr shader.
string cbcr_vert_shader =
" \n"
"in vec2 position; \n"
"in vec2 texcoord; \n"
- "out vec2 tc0; \n"
+ "out vec2 tc0, tc1; \n"
"uniform vec2 foo_chroma_offset_0; \n"
+ "uniform vec2 foo_chroma_offset_1; \n"
" \n"
"void main() \n"
"{ \n"
" gl_Position = vec4(2.0 * position.x - 1.0, 2.0 * position.y - 1.0, -1.0, 1.0); \n"
" vec2 flipped_tc = texcoord; \n"
" tc0 = flipped_tc + foo_chroma_offset_0; \n"
+ " tc1 = flipped_tc + foo_chroma_offset_1; \n"
"} \n";
string cbcr_frag_shader =
"#version 130 \n"
- "in vec2 tc0; \n"
+ "in vec2 tc0, tc1; \n"
"uniform sampler2D cbcr_tex; \n"
"out vec4 FragColor; \n"
"void main() { \n"
- " FragColor = texture(cbcr_tex, tc0); \n"
+ " FragColor = 0.5 * (texture(cbcr_tex, tc0) + texture(cbcr_tex, tc1)); \n"
"} \n";
vector<string> frag_shader_outputs;
cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader, frag_shader_outputs);
2.0f, 0.0f
};
cbcr_vbo = generate_vbo(2, GL_FLOAT, sizeof(vertices), vertices);
+ cbcr_texture_sampler_uniform = glGetUniformLocation(cbcr_program_num, "cbcr_tex");
cbcr_position_attribute_index = glGetAttribLocation(cbcr_program_num, "position");
cbcr_texcoord_attribute_index = glGetAttribLocation(cbcr_program_num, "texcoord");
- r128.init(2, OUTPUT_FREQUENCY);
- r128.integr_start();
-
- // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
- // and there's a limit to how important the peak meter is.
- peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16, /*frel=*/1.0);
-
if (global_flags.enable_alsa_output) {
alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
}
card->is_fake_capture = is_fake_capture;
card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
if (card->frame_allocator == nullptr) {
- card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, WIDTH, HEIGHT)); // 8 MB.
+ card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, global_flags.width, global_flags.height)); // 8 MB.
}
card->capture->set_video_frame_allocator(card->frame_allocator.get());
if (card->surface == nullptr) {
card->capture->configure_card();
DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
- audio_mixer.reset_device(device);
- audio_mixer.set_name(device, card->capture->get_description());
+ audio_mixer.reset_resampler(device);
+ audio_mixer.set_display_name(device, card->capture->get_description());
+ audio_mixer.trigger_state_changed_callback();
}
}
}
-float find_peak(const float *samples, size_t num_samples)
-{
- float m = fabs(samples[0]);
- for (size_t i = 1; i < num_samples; ++i) {
- m = max(m, fabs(samples[i]));
- }
- return m;
-}
-
-void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<float> *out_r)
-{
- size_t num_samples = in.size() / 2;
- out_l->resize(num_samples);
- out_r->resize(num_samples);
-
- const float *inptr = in.data();
- float *lptr = &(*out_l)[0];
- float *rptr = &(*out_r)[0];
- for (size_t i = 0; i < num_samples; ++i) {
- *lptr++ = *inptr++;
- *rptr++ = *inptr++;
- }
-}
-
} // namespace
void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
if (dropped_frames > MAX_FPS * 2) {
fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
card_index, card->last_timecode, timecode);
- audio_mixer.reset_device(device);
+ audio_mixer.reset_resampler(device);
dropped_frames = 0;
} else if (dropped_frames > 0) {
// Insert silence as needed.
fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
card_index, dropped_frames, timecode);
- audio_mixer.add_silence(device, silence_samples, dropped_frames, frame_length);
+ bool success;
+ do {
+ success = audio_mixer.add_silence(device, silence_samples, dropped_frames, frame_length);
+ } while (!success);
}
audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length);
new_frame.interlaced = video_format.interlaced;
new_frame.upload_func = upload_func;
new_frame.dropped_frames = dropped_frames;
+ new_frame.received_timestamp = video_frame.received_timestamp; // Ignore the audio timestamp.
card->new_frames.push(move(new_frame));
card->new_frames_changed.notify_all();
}
exit(1);
}
+ // Start the actual capture. (We don't want to do it before we're actually ready
+ // to process output frames.)
+ for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+ cards[card_index].capture->start_bm_capture();
+ }
+
steady_clock::time_point start, now;
start = steady_clock::now();
int stats_dropped_frames = 0;
while (!should_quit) {
- CaptureCard::NewFrame new_frames[MAX_CARDS];
- bool has_new_frame[MAX_CARDS] = { false };
- int num_samples[MAX_CARDS] = { 0 };
+ CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS];
+ bool has_new_frame[MAX_VIDEO_CARDS] = { false };
+ int num_samples[MAX_VIDEO_CARDS] = { 0 };
unsigned master_card_index = theme->map_signal(master_clock_channel);
assert(master_card_index < num_cards);
get_one_frame_from_each_card(master_card_index, new_frames, has_new_frame, num_samples);
schedule_audio_resampling_tasks(new_frames[master_card_index].dropped_frames, num_samples[master_card_index], new_frames[master_card_index].length);
stats_dropped_frames += new_frames[master_card_index].dropped_frames;
- send_audio_level_callback();
handle_hotplugged_cards();
resource_pool->clean_context();
}
-void Mixer::get_one_frame_from_each_card(unsigned master_card_index, CaptureCard::NewFrame new_frames[MAX_CARDS], bool has_new_frame[MAX_CARDS], int num_samples[MAX_CARDS])
+void Mixer::get_one_frame_from_each_card(unsigned master_card_index, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS], int num_samples[MAX_VIDEO_CARDS])
{
start:
// The first card is the master timer, so wait for it to have a new frame.
CaptureCard *card = &cards[card_index];
if (card->capture->get_disconnected()) {
fprintf(stderr, "Card %u went away, replacing with a fake card.\n", card_index);
- FakeCapture *capture = new FakeCapture(WIDTH, HEIGHT, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
+ FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
configure_card(card_index, capture, /*is_fake_capture=*/true);
card->queue_length_policy.reset(card_index);
card->capture->start_bm_capture();
void Mixer::render_one_frame(int64_t duration)
{
// Get the main chain from the theme, and set its state immediately.
- Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state);
+ Theme::Chain theme_main_chain = theme->get_chain(0, pts(), global_flags.width, global_flags.height, input_state);
EffectChain *chain = theme_main_chain.chain;
theme_main_chain.setup_chain();
//theme_main_chain.chain->enable_phase_timing(true);
assert(got_frame);
// Render main chain.
- GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
- GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up.
+ GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, global_flags.width, global_flags.height);
+ GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, global_flags.width, global_flags.height); // Saves texture bandwidth, although dithering gets messed up.
GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
check_error();
- chain->render_to_fbo(fbo, WIDTH, HEIGHT);
+ chain->render_to_fbo(fbo, global_flags.width, global_flags.height);
resource_pool->release_fbo(fbo);
subsample_chroma(cbcr_full_tex, cbcr_tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
+ const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE); // Corresponds to the delay in ResamplingQueue.
RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
// The live frame just shows the RGBA texture we just rendered.
// Set up preview and any additional channels.
for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
DisplayFrame display_frame;
- Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions
+ Theme::Chain chain = theme->get_chain(i, pts(), global_flags.width, global_flags.height, input_state); // FIXME: dimensions
display_frame.chain = chain.chain;
display_frame.setup_chain = chain.setup_chain;
display_frame.ready_fence = fence;
}
}
-void Mixer::send_audio_level_callback()
-{
- if (audio_level_callback == nullptr) {
- return;
- }
-
- unique_lock<mutex> lock(audio_measure_mutex);
- double loudness_s = r128.loudness_S();
- double loudness_i = r128.integrated();
- double loudness_range_low = r128.range_min();
- double loudness_range_high = r128.range_max();
-
- audio_level_callback(loudness_s, to_db(peak),
- loudness_i, loudness_range_low, loudness_range_high,
- audio_mixer.get_gain_staging_db(),
- audio_mixer.get_final_makeup_gain_db(),
- correlation.get_correlation());
-}
-
void Mixer::audio_thread_func()
{
while (!should_quit) {
ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
- process_audio_one_frame(task.pts_int, task.num_samples, rate_adjustment_policy);
- }
-}
-
-void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
-{
- vector<float> samples_out = audio_mixer.get_output(double(frame_pts_int) / TIMEBASE, num_samples, rate_adjustment_policy);
-
- // Upsample 4x to find interpolated peak.
- peak_resampler.inp_data = samples_out.data();
- peak_resampler.inp_count = samples_out.size() / 2;
-
- vector<float> interpolated_samples_out;
- interpolated_samples_out.resize(samples_out.size());
- {
- unique_lock<mutex> lock(audio_measure_mutex);
-
- while (peak_resampler.inp_count > 0) { // About four iterations.
- peak_resampler.out_data = &interpolated_samples_out[0];
- peak_resampler.out_count = interpolated_samples_out.size() / 2;
- peak_resampler.process();
- size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
- peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
- peak_resampler.out_data = nullptr;
+ vector<float> samples_out = audio_mixer.get_output(
+ double(task.pts_int) / TIMEBASE,
+ task.num_samples,
+ rate_adjustment_policy);
+
+ // Send the samples to the sound card, then add them to the output.
+ if (alsa) {
+ alsa->write(samples_out);
}
+ video_encoder->add_audio(task.pts_int, move(samples_out));
}
-
- // Find R128 levels and L/R correlation.
- vector<float> left, right;
- deinterleave_samples(samples_out, &left, &right);
- float *ptrs[] = { left.data(), right.data() };
- {
- unique_lock<mutex> lock(audio_measure_mutex);
- r128.process(left.size(), ptrs);
- audio_mixer.set_current_loudness(r128.loudness_M());
- correlation.process_samples(samples_out);
- }
-
- // Send the samples to the sound card.
- if (alsa) {
- alsa->write(samples_out);
- }
-
- // And finally add them to the output.
- video_encoder->add_audio(frame_pts_int, move(samples_out));
}
void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
// Extract Cb/Cr.
GLuint fbo = resource_pool->create_fbo(dst_tex);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
- glViewport(0, 0, WIDTH/2, HEIGHT/2);
+ glViewport(0, 0, global_flags.width/2, global_flags.height/2);
check_error();
glUseProgram(cbcr_program_num);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
check_error();
- float chroma_offset_0[] = { -0.5f / WIDTH, 0.0f };
+ float chroma_offset_0[] = { -1.0f / global_flags.width, 0.0f };
+ float chroma_offset_1[] = { -0.0f / global_flags.width, 0.0f };
set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_0", chroma_offset_0);
+ set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_1", chroma_offset_1);
+
+ glUniform1i(cbcr_texture_sampler_uniform, 0);
glBindBuffer(GL_ARRAY_BUFFER, cbcr_vbo);
check_error();
theme->channel_clicked(preview_num);
}
-void Mixer::reset_meters()
-{
- unique_lock<mutex> lock(audio_measure_mutex);
- peak_resampler.reset();
- peak = 0.0f;
- r128.reset();
- r128.integr_start();
- correlation.reset();
-}
-
void Mixer::start_mode_scanning(unsigned card_index)
{
assert(card_index < num_cards);