X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=mixer.cpp;h=aabc67e543fdfade00bc018f86d2cdd837b5d266;hb=45aa08dd2f81037860fdd3c5d1115736f7acf1a8;hp=6e1e45d953762ba55523248ea9a38345fe669c23;hpb=0fff2c95c89541e5b23611962a00886c64c00daa;p=nageru diff --git a/mixer.cpp b/mixer.cpp index 6e1e45d..aabc67e 100644 --- a/mixer.cpp +++ b/mixer.cpp @@ -3,20 +3,23 @@ #include "mixer.h" #include +#include #include -#include #include #include #include #include +#include #include #include #include #include #include +#include #include #include #include +#include #include #include #include @@ -28,37 +31,31 @@ #include #include "bmusb/bmusb.h" +#include "bmusb/fake_capture.h" #include "context.h" +#include "db.h" +#include "decklink_capture.h" #include "defs.h" -#include "h264encode.h" +#include "disk_space_estimator.h" +#include "flags.h" #include "pbo_frame_allocator.h" #include "ref_counted_gl_sync.h" #include "timebase.h" +#include "video_encoder.h" class QOpenGLContext; using namespace movit; using namespace std; +using namespace std::chrono; using namespace std::placeholders; +using namespace bmusb; Mixer *global_mixer = nullptr; +bool uses_mlock = false; namespace { -void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples) -{ - for (size_t i = 0; i < num_samples; ++i) { - for (size_t j = 0; j < out_channels; ++j) { - uint32_t s1 = *src++; - uint32_t s2 = *src++; - uint32_t s3 = *src++; - uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24); - dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f); - } - src += 3 * (in_channels - out_channels); - } -} - void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state) { if (interlaced) { @@ -74,37 +71,42 @@ void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced } } -string generate_local_dump_filename(int frame) +} // namespace + +void QueueLengthPolicy::update_policy(int queue_length) { - time_t now = time(NULL); - tm now_tm; - localtime_r(&now, &now_tm); - - char timestamp[256]; - strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm); - - // Use the frame number to disambiguate between two cuts starting - // on the same second. - char filename[256]; - snprintf(filename, sizeof(filename), "%s%s-f%02d%s", - LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX); - return filename; + if (queue_length < 0) { // Starvation. + if (been_at_safe_point_since_last_starvation && safe_queue_length < 5) { + ++safe_queue_length; + fprintf(stderr, "Card %u: Starvation, increasing safe limit to %u frames\n", + card_index, safe_queue_length); + } + frames_with_at_least_one = 0; + been_at_safe_point_since_last_starvation = false; + return; + } + if (queue_length > 0) { + if (queue_length >= int(safe_queue_length)) { + been_at_safe_point_since_last_starvation = true; + } + if (++frames_with_at_least_one >= 1000 && safe_queue_length > 0) { + --safe_queue_length; + fprintf(stderr, "Card %u: Spare frames for more than 1000 frames, reducing safe limit to %u frames\n", + card_index, safe_queue_length); + frames_with_at_least_one = 0; + } + } else { + frames_with_at_least_one = 0; + } } -} // namespace - Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) - : httpd(WIDTH, HEIGHT), + : httpd(), num_cards(num_cards), mixer_surface(create_surface(format)), h264_encoder_surface(create_surface(format)), - level_compressor(OUTPUT_FREQUENCY), - limiter(OUTPUT_FREQUENCY), - compressor(OUTPUT_FREQUENCY) + audio_mixer(num_cards) { - httpd.open_output_file(generate_local_dump_filename(/*frame=*/0).c_str()); - httpd.start(9095); - CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF)); check_error(); @@ -113,9 +115,10 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) movit_texel_subpixel_precision /= 2.0; resource_pool.reset(new ResourcePool); - theme.reset(new Theme("theme.lua", resource_pool.get(), num_cards)); + theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards)); for (unsigned i = 0; i < NUM_OUTPUTS; ++i) { output_channel[i].parent = this; + output_channel[i].channel = i; } ImageFormat inout_format; @@ -131,81 +134,157 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) display_chain->set_dither_bits(0); // Don't bother. display_chain->finalize(); - h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd)); + video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd, global_disk_space_estimator)); - for (unsigned card_index = 0; card_index < num_cards; ++card_index) { - printf("Configuring card %d...\n", card_index); - CaptureCard *card = &cards[card_index]; - card->usb = new BMUSBCapture(card_index); - card->usb->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7)); - card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, WIDTH, HEIGHT)); // 8 MB. - card->usb->set_video_frame_allocator(card->frame_allocator.get()); - card->surface = create_surface(format); - card->usb->set_dequeue_thread_callbacks( - [card]{ - eglBindAPI(EGL_OPENGL_API); - card->context = create_context(card->surface); - if (!make_current(card->context, card->surface)) { - printf("failed to create bmusb context\n"); - exit(1); + // Start listening for clients only once VideoEncoder has written its header, if any. + httpd.start(9095); + + // First try initializing the then PCI devices, then USB, then + // fill up with fake cards until we have the desired number of cards. + unsigned num_pci_devices = 0; + unsigned card_index = 0; + + { + IDeckLinkIterator *decklink_iterator = CreateDeckLinkIteratorInstance(); + if (decklink_iterator != nullptr) { + for ( ; card_index < num_cards; ++card_index) { + IDeckLink *decklink; + if (decklink_iterator->Next(&decklink) != S_OK) { + break; } - }, - [this]{ - resource_pool->clean_context(); - }); - card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2)); - card->usb->configure_card(); + + configure_card(card_index, new DeckLinkCapture(decklink, card_index), /*is_fake_capture=*/false); + ++num_pci_devices; + } + decklink_iterator->Release(); + fprintf(stderr, "Found %u DeckLink PCI card(s).\n", num_pci_devices); + } else { + fprintf(stderr, "DeckLink drivers not found. Probing for USB cards only.\n"); + } + } + unsigned num_usb_devices = BMUSBCapture::num_cards(); + for (unsigned usb_card_index = 0; usb_card_index < num_usb_devices && card_index < num_cards; ++usb_card_index, ++card_index) { + BMUSBCapture *capture = new BMUSBCapture(usb_card_index); + capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, card_index)); + configure_card(card_index, capture, /*is_fake_capture=*/false); + } + fprintf(stderr, "Found %u USB card(s).\n", num_usb_devices); + + unsigned num_fake_cards = 0; + for ( ; card_index < num_cards; ++card_index, ++num_fake_cards) { + FakeCapture *capture = new FakeCapture(WIDTH, HEIGHT, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio); + configure_card(card_index, capture, /*is_fake_capture=*/true); } + if (num_fake_cards > 0) { + fprintf(stderr, "Initialized %u fake cards.\n", num_fake_cards); + } + + BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1)); BMUSBCapture::start_bm_thread(); - for (unsigned card_index = 0; card_index < num_cards; ++card_index) { - cards[card_index].usb->start_bm_capture(); + for (card_index = 0; card_index < num_cards; ++card_index) { + cards[card_index].queue_length_policy.reset(card_index); + cards[card_index].capture->start_bm_capture(); } // Set up stuff for NV12 conversion. // Cb/Cr shader. - string cbcr_vert_shader = read_file("vs-cbcr.130.vert"); + string cbcr_vert_shader = + "#version 130 \n" + " \n" + "in vec2 position; \n" + "in vec2 texcoord; \n" + "out vec2 tc0; \n" + "uniform vec2 foo_chroma_offset_0; \n" + " \n" + "void main() \n" + "{ \n" + " // The result of glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0) is: \n" + " // \n" + " // 2.000 0.000 0.000 -1.000 \n" + " // 0.000 2.000 0.000 -1.000 \n" + " // 0.000 0.000 -2.000 -1.000 \n" + " // 0.000 0.000 0.000 1.000 \n" + " gl_Position = vec4(2.0 * position.x - 1.0, 2.0 * position.y - 1.0, -1.0, 1.0); \n" + " vec2 flipped_tc = texcoord; \n" + " tc0 = flipped_tc + foo_chroma_offset_0; \n" + "} \n"; string cbcr_frag_shader = "#version 130 \n" "in vec2 tc0; \n" "uniform sampler2D cbcr_tex; \n" + "out vec4 FragColor; \n" "void main() { \n" - " gl_FragColor = texture2D(cbcr_tex, tc0); \n" + " FragColor = texture(cbcr_tex, tc0); \n" "} \n"; vector frag_shader_outputs; cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader, frag_shader_outputs); - r128.init(2, OUTPUT_FREQUENCY); - r128.integr_start(); - - locut.init(FILTER_HPF, 2); - - // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise, - // and there's a limit to how important the peak meter is. - peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16); + float vertices[] = { + 0.0f, 2.0f, + 0.0f, 0.0f, + 2.0f, 0.0f + }; + cbcr_vbo = generate_vbo(2, GL_FLOAT, sizeof(vertices), vertices); + cbcr_position_attribute_index = glGetAttribLocation(cbcr_program_num, "position"); + cbcr_texcoord_attribute_index = glGetAttribLocation(cbcr_program_num, "texcoord"); - alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2)); + if (global_flags.enable_alsa_output) { + alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2)); + } } Mixer::~Mixer() { resource_pool->release_glsl_program(cbcr_program_num); + glDeleteBuffers(1, &cbcr_vbo); BMUSBCapture::stop_bm_thread(); for (unsigned card_index = 0; card_index < num_cards; ++card_index) { { unique_lock lock(bmusb_mutex); cards[card_index].should_quit = true; // Unblock thread. - cards[card_index].new_data_ready_changed.notify_all(); + cards[card_index].new_frames_changed.notify_all(); } - cards[card_index].usb->stop_dequeue_thread(); + cards[card_index].capture->stop_dequeue_thread(); } - h264_encoder.reset(nullptr); + video_encoder.reset(nullptr); +} + +void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, bool is_fake_capture) +{ + printf("Configuring card %d...\n", card_index); + + CaptureCard *card = &cards[card_index]; + if (card->capture != nullptr) { + card->capture->stop_dequeue_thread(); + delete card->capture; + } + card->capture = capture; + card->is_fake_capture = is_fake_capture; + card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7)); + if (card->frame_allocator == nullptr) { + card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, WIDTH, HEIGHT)); // 8 MB. + } + card->capture->set_video_frame_allocator(card->frame_allocator.get()); + if (card->surface == nullptr) { + card->surface = create_surface_with_same_format(mixer_surface); + } + while (!card->new_frames.empty()) card->new_frames.pop(); + card->fractional_samples = 0; + card->last_timecode = -1; + card->capture->configure_card(); + + DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index}; + audio_mixer.reset_resampler(device); + audio_mixer.set_display_name(device, card->capture->get_description()); + audio_mixer.trigger_state_changed_callback(); } + namespace { int unwrap_timecode(uint16_t current_wrapped, int last) @@ -218,50 +297,41 @@ int unwrap_timecode(uint16_t current_wrapped, int last) } } -float find_peak(const float *samples, size_t num_samples) -{ - float m = fabs(samples[0]); - for (size_t i = 1; i < num_samples; ++i) { - m = max(m, fabs(samples[i])); - } - return m; -} - -void deinterleave_samples(const vector &in, vector *out_l, vector *out_r) -{ - size_t num_samples = in.size() / 2; - out_l->resize(num_samples); - out_r->resize(num_samples); - - const float *inptr = in.data(); - float *lptr = &(*out_l)[0]; - float *rptr = &(*out_r)[0]; - for (size_t i = 0; i < num_samples; ++i) { - *lptr++ = *inptr++; - *rptr++ = *inptr++; - } -} - } // namespace void Mixer::bm_frame(unsigned card_index, uint16_t timecode, - FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format, - FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format) + FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format, + FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format) { + DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index}; CaptureCard *card = &cards[card_index]; - unsigned width, height, second_field_start, frame_rate_nom, frame_rate_den, extra_lines_top, extra_lines_bottom; - bool interlaced; + if (is_mode_scanning[card_index]) { + if (video_format.has_signal) { + // Found a stable signal, so stop scanning. + is_mode_scanning[card_index] = false; + } else { + static constexpr double switch_time_s = 0.5; // Should be enough time for the signal to stabilize. + steady_clock::time_point now = steady_clock::now(); + double sec_since_last_switch = duration(steady_clock::now() - last_mode_scan_change[card_index]).count(); + if (sec_since_last_switch > switch_time_s) { + // It isn't this mode; try the next one. + mode_scanlist_index[card_index]++; + mode_scanlist_index[card_index] %= mode_scanlist[card_index].size(); + cards[card_index].capture->set_video_mode(mode_scanlist[card_index][mode_scanlist_index[card_index]]); + last_mode_scan_change[card_index] = now; + } + } + } - decode_video_format(video_format, &width, &height, &second_field_start, &extra_lines_top, &extra_lines_bottom, - &frame_rate_nom, &frame_rate_den, &interlaced); // Ignore return value for now. - int64_t frame_length = TIMEBASE * frame_rate_den / frame_rate_nom; + int64_t frame_length = int64_t(TIMEBASE) * video_format.frame_rate_den / video_format.frame_rate_nom; + assert(frame_length > 0); - size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0; + size_t num_samples = (audio_frame.len > audio_offset) ? (audio_frame.len - audio_offset) / audio_format.num_channels / (audio_format.bits_per_sample / 8) : 0; if (num_samples > OUTPUT_FREQUENCY / 10) { printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n", card_index, int(audio_frame.len), int(audio_offset), - timecode, int(video_frame.len), int(video_offset), video_format); + timecode, int(video_frame.len), int(video_offset), video_format.id); if (video_frame.owner) { video_frame.owner->release_frame(video_frame); } @@ -271,66 +341,41 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, return; } - int64_t local_pts = card->next_local_pts; int dropped_frames = 0; if (card->last_timecode != -1) { dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1; } - // Convert the audio to stereo fp32 and add it. - vector audio; - audio.resize(num_samples * 2); - convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples); - - // Add the audio. - { - unique_lock lock(card->audio_mutex); - - // Number of samples per frame if we need to insert silence. - // (Could be nonintegral, but resampling will save us then.) - int silence_samples = OUTPUT_FREQUENCY * frame_rate_den / frame_rate_nom; - - if (dropped_frames > MAX_FPS * 2) { - fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n", - card_index, card->last_timecode, timecode); - card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2)); - dropped_frames = 0; - } else if (dropped_frames > 0) { - // Insert silence as needed. - fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n", - card_index, dropped_frames, timecode); - vector silence(silence_samples * 2, 0.0f); - for (int i = 0; i < dropped_frames; ++i) { - card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples); - // Note that if the format changed in the meantime, we have - // no way of detecting that; we just have to assume the frame length - // is always the same. - local_pts += frame_length; - } - } - if (num_samples == 0) { - audio.resize(silence_samples * 2); - num_samples = silence_samples; - } - card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples); - card->next_local_pts = local_pts + frame_length; + // Number of samples per frame if we need to insert silence. + // (Could be nonintegral, but resampling will save us then.) + const int silence_samples = OUTPUT_FREQUENCY * video_format.frame_rate_den / video_format.frame_rate_nom; + + if (dropped_frames > MAX_FPS * 2) { + fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n", + card_index, card->last_timecode, timecode); + audio_mixer.reset_resampler(device); + dropped_frames = 0; + } else if (dropped_frames > 0) { + // Insert silence as needed. + fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n", + card_index, dropped_frames, timecode); + + bool success; + do { + success = audio_mixer.add_silence(device, silence_samples, dropped_frames, frame_length); + } while (!success); } - card->last_timecode = timecode; + audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length); // Done with the audio, so release it. if (audio_frame.owner) { audio_frame.owner->release_frame(audio_frame); } - { - // Wait until the previous frame was consumed. - unique_lock lock(bmusb_mutex); - card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; }); - if (card->should_quit) return; - } + card->last_timecode = timecode; - size_t expected_length = width * (height + extra_lines_top + extra_lines_bottom) * 2; + size_t expected_length = video_format.width * (video_format.height + video_format.extra_lines_top + video_format.extra_lines_bottom) * 2; if (video_frame.len - video_offset == 0 || video_frame.len - video_offset != expected_length) { if (video_frame.len != 0) { @@ -345,83 +390,102 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, // so that pts can go up accordingly. { unique_lock lock(bmusb_mutex); - card->new_data_ready = true; - card->new_frame = RefCountedFrame(FrameAllocator::Frame()); - card->new_frame_length = frame_length; - card->new_frame_interlaced = false; - card->new_data_ready_fence = nullptr; - card->dropped_frames = dropped_frames; - card->new_data_ready_changed.notify_all(); + CaptureCard::NewFrame new_frame; + new_frame.frame = RefCountedFrame(FrameAllocator::Frame()); + new_frame.length = frame_length; + new_frame.interlaced = false; + new_frame.dropped_frames = dropped_frames; + card->new_frames.push(move(new_frame)); + card->new_frames_changed.notify_all(); } return; } PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata; - unsigned num_fields = interlaced ? 2 : 1; - timespec frame_upload_start; - if (interlaced) { + unsigned num_fields = video_format.interlaced ? 2 : 1; + steady_clock::time_point frame_upload_start; + if (video_format.interlaced) { // Send the two fields along as separate frames; the other side will need to add // a deinterlacer to actually get this right. - assert(height % 2 == 0); - height /= 2; + assert(video_format.height % 2 == 0); + video_format.height /= 2; assert(frame_length % 2 == 0); frame_length /= 2; num_fields = 2; - clock_gettime(CLOCK_MONOTONIC, &frame_upload_start); + frame_upload_start = steady_clock::now(); } - userdata->last_interlaced = interlaced; - userdata->last_frame_rate_nom = frame_rate_nom; - userdata->last_frame_rate_den = frame_rate_den; - RefCountedFrame new_frame(video_frame); + userdata->last_interlaced = video_format.interlaced; + userdata->last_has_signal = video_format.has_signal; + userdata->last_is_connected = video_format.is_connected; + userdata->last_frame_rate_nom = video_format.frame_rate_nom; + userdata->last_frame_rate_den = video_format.frame_rate_den; + RefCountedFrame frame(video_frame); // Upload the textures. - size_t cbcr_width = width / 2; + size_t cbcr_width = video_format.width / 2; size_t cbcr_offset = video_offset / 2; size_t y_offset = video_frame.size / 2 + video_offset / 2; for (unsigned field = 0; field < num_fields; ++field) { - unsigned field_start_line = (field == 1) ? second_field_start : extra_lines_top + field * (height + 22); - - if (userdata->tex_y[field] == 0 || - userdata->tex_cbcr[field] == 0 || - width != userdata->last_width[field] || - height != userdata->last_height[field]) { - // We changed resolution since last use of this texture, so we need to create - // a new object. Note that this each card has its own PBOFrameAllocator, - // we don't need to worry about these flip-flopping between resolutions. + // Put the actual texture upload in a lambda that is executed in the main thread. + // It is entirely possible to do this in the same thread (and it might even be + // faster, depending on the GPU and driver), but it appears to be trickling + // driver bugs very easily. + // + // Note that this means we must hold on to the actual frame data in + // until the upload command is run, but we hold on to much longer than that + // (in fact, all the way until we no longer use the texture in rendering). + auto upload_func = [field, video_format, y_offset, cbcr_offset, cbcr_width, userdata]() { + unsigned field_start_line = (field == 1) ? video_format.second_field_start : video_format.extra_lines_top + field * (video_format.height + 22); + + if (userdata->tex_y[field] == 0 || + userdata->tex_cbcr[field] == 0 || + video_format.width != userdata->last_width[field] || + video_format.height != userdata->last_height[field]) { + // We changed resolution since last use of this texture, so we need to create + // a new object. Note that this each card has its own PBOFrameAllocator, + // we don't need to worry about these flip-flopping between resolutions. + glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]); + check_error(); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, video_format.height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr); + check_error(); + glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]); + check_error(); + glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, video_format.width, video_format.height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr); + check_error(); + userdata->last_width[field] = video_format.width; + userdata->last_height[field] = video_format.height; + } + + GLuint pbo = userdata->pbo; + check_error(); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo); + check_error(); + + size_t field_y_start = y_offset + video_format.width * field_start_line; + size_t field_cbcr_start = cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t); + + if (global_flags.flush_pbos) { + glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, field_y_start, video_format.width * video_format.height); + check_error(); + glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, field_cbcr_start, cbcr_width * video_format.height * sizeof(uint16_t)); + check_error(); + } + glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]); check_error(); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr); + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, video_format.height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(field_cbcr_start)); check_error(); glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]); check_error(); - glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr); + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, video_format.width, video_format.height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(field_y_start)); check_error(); - userdata->last_width[field] = width; - userdata->last_height[field] = height; - } - - GLuint pbo = userdata->pbo; - check_error(); - glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo); - check_error(); - glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); - check_error(); - - glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]); - check_error(); - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t))); - check_error(); - glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]); - check_error(); - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + width * field_start_line)); - check_error(); - glBindTexture(GL_TEXTURE_2D, 0); - check_error(); - GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); - check_error(); - assert(fence != nullptr); + glBindTexture(GL_TEXTURE_2D, 0); + check_error(); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); + check_error(); + }; if (field == 1) { // Don't upload the second field as fast as we can; wait until @@ -430,38 +494,37 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, // against the video display, although the latter is not as critical.) // This requires our system clock to be reasonably close to the // video clock, but that's not an unreasonable assumption. - timespec second_field_start; - second_field_start.tv_nsec = frame_upload_start.tv_nsec + - frame_length * 1000000000 / TIMEBASE; - second_field_start.tv_sec = frame_upload_start.tv_sec + - second_field_start.tv_nsec / 1000000000; - second_field_start.tv_nsec %= 1000000000; - - while (clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, - &second_field_start, nullptr) == -1 && - errno == EINTR) ; + steady_clock::time_point second_field_start = frame_upload_start + + nanoseconds(frame_length * 1000000000 / TIMEBASE); + this_thread::sleep_until(second_field_start); } { unique_lock lock(bmusb_mutex); - card->new_data_ready = true; - card->new_frame = new_frame; - card->new_frame_length = frame_length; - card->new_frame_field = field; - card->new_frame_interlaced = interlaced; - card->new_data_ready_fence = fence; - card->dropped_frames = dropped_frames; - card->new_data_ready_changed.notify_all(); - - if (field != num_fields - 1) { - // Wait until the previous frame was consumed. - card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; }); - if (card->should_quit) return; - } + CaptureCard::NewFrame new_frame; + new_frame.frame = frame; + new_frame.length = frame_length; + new_frame.field = field; + new_frame.interlaced = video_format.interlaced; + new_frame.upload_func = upload_func; + new_frame.dropped_frames = dropped_frames; + card->new_frames.push(move(new_frame)); + card->new_frames_changed.notify_all(); } } } +void Mixer::bm_hotplug_add(libusb_device *dev) +{ + lock_guard lock(hotplug_mutex); + hotplugged_cards.push_back(dev); +} + +void Mixer::bm_hotplug_remove(unsigned card_index) +{ + cards[card_index].new_frames_changed.notify_all(); +} + void Mixer::thread_func() { eglBindAPI(EGL_OPENGL_API); @@ -471,186 +534,106 @@ void Mixer::thread_func() exit(1); } - struct timespec start, now; - clock_gettime(CLOCK_MONOTONIC, &start); + steady_clock::time_point start, now; + start = steady_clock::now(); int frame = 0; int stats_dropped_frames = 0; while (!should_quit) { - CaptureCard card_copy[MAX_CARDS]; - int num_samples[MAX_CARDS]; + CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS]; + bool has_new_frame[MAX_VIDEO_CARDS] = { false }; + int num_samples[MAX_VIDEO_CARDS] = { 0 }; - { - unique_lock lock(bmusb_mutex); + unsigned master_card_index = theme->map_signal(master_clock_channel); + assert(master_card_index < num_cards); - // The first card is the master timer, so wait for it to have a new frame. - // TODO: Make configurable, and with a timeout. - cards[0].new_data_ready_changed.wait(lock, [this]{ return cards[0].new_data_ready; }); - - for (unsigned card_index = 0; card_index < num_cards; ++card_index) { - CaptureCard *card = &cards[card_index]; - card_copy[card_index].usb = card->usb; - card_copy[card_index].new_data_ready = card->new_data_ready; - card_copy[card_index].new_frame = card->new_frame; - card_copy[card_index].new_frame_length = card->new_frame_length; - card_copy[card_index].new_frame_field = card->new_frame_field; - card_copy[card_index].new_frame_interlaced = card->new_frame_interlaced; - card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence; - card_copy[card_index].dropped_frames = card->dropped_frames; - card->new_data_ready = false; - card->new_data_ready_changed.notify_all(); - - int num_samples_times_timebase = OUTPUT_FREQUENCY * card->new_frame_length + card->fractional_samples; - num_samples[card_index] = num_samples_times_timebase / TIMEBASE; - card->fractional_samples = num_samples_times_timebase % TIMEBASE; - assert(num_samples[card_index] >= 0); - } - } + get_one_frame_from_each_card(master_card_index, new_frames, has_new_frame, num_samples); + schedule_audio_resampling_tasks(new_frames[master_card_index].dropped_frames, num_samples[master_card_index], new_frames[master_card_index].length); + stats_dropped_frames += new_frames[master_card_index].dropped_frames; - // Resample the audio as needed, including from previously dropped frames. - for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) { - { - // Signal to the audio thread to process this frame. - unique_lock lock(audio_mutex); - audio_task_queue.push(AudioTask{pts_int, num_samples[0]}); - audio_task_queue_changed.notify_one(); - } - if (frame_num != card_copy[0].dropped_frames) { - // For dropped frames, increase the pts. Note that if the format changed - // in the meantime, we have no way of detecting that; we just have to - // assume the frame length is always the same. - ++stats_dropped_frames; - pts_int += card_copy[0].new_frame_length; - } - } - - if (audio_level_callback != nullptr) { - unique_lock lock(r128_mutex); - double loudness_s = r128.loudness_S(); - double loudness_i = r128.integrated(); - double loudness_range_low = r128.range_min(); - double loudness_range_high = r128.range_max(); + handle_hotplugged_cards(); - audio_level_callback(loudness_s, 20.0 * log10(peak), - loudness_i, loudness_range_low, loudness_range_high, - last_gain_staging_db); - } - - for (unsigned card_index = 1; card_index < num_cards; ++card_index) { - if (card_copy[card_index].new_data_ready && card_copy[card_index].new_frame->len == 0) { - ++card_copy[card_index].dropped_frames; + for (unsigned card_index = 0; card_index < num_cards; ++card_index) { + if (card_index == master_card_index || !has_new_frame[card_index]) { + continue; + } + if (new_frames[card_index].frame->len == 0) { + ++new_frames[card_index].dropped_frames; } - if (card_copy[card_index].dropped_frames > 0) { + if (new_frames[card_index].dropped_frames > 0) { printf("Card %u dropped %d frames before this\n", - card_index, int(card_copy[card_index].dropped_frames)); + card_index, int(new_frames[card_index].dropped_frames)); } } // If the first card is reporting a corrupted or otherwise dropped frame, // just increase the pts (skipping over this frame) and don't try to compute anything new. - if (card_copy[0].new_frame->len == 0) { + if (new_frames[master_card_index].frame->len == 0) { ++stats_dropped_frames; - pts_int += card_copy[0].new_frame_length; + pts_int += new_frames[master_card_index].length; continue; } for (unsigned card_index = 0; card_index < num_cards; ++card_index) { - CaptureCard *card = &card_copy[card_index]; - if (!card->new_data_ready || card->new_frame->len == 0) + if (!has_new_frame[card_index] || new_frames[card_index].frame->len == 0) continue; - assert(card->new_frame != nullptr); - insert_new_frame(card->new_frame, card->new_frame_field, card->new_frame_interlaced, card_index, &input_state); + CaptureCard::NewFrame *new_frame = &new_frames[card_index]; + assert(new_frame->frame != nullptr); + insert_new_frame(new_frame->frame, new_frame->field, new_frame->interlaced, card_index, &input_state); check_error(); - // The new texture might still be uploaded, - // tell the GPU to wait until it's there. - if (card->new_data_ready_fence) { - glWaitSync(card->new_data_ready_fence, /*flags=*/0, GL_TIMEOUT_IGNORED); - check_error(); - glDeleteSync(card->new_data_ready_fence); - check_error(); + // The new texture might need uploading before use. + if (new_frame->upload_func) { + new_frame->upload_func(); + new_frame->upload_func = nullptr; } } - // Get the main chain from the theme, and set its state immediately. - Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state); - EffectChain *chain = theme_main_chain.chain; - theme_main_chain.setup_chain(); - //theme_main_chain.chain->enable_phase_timing(true); - - GLuint y_tex, cbcr_tex; - bool got_frame = h264_encoder->begin_frame(&y_tex, &cbcr_tex); - assert(got_frame); - - // Render main chain. - GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT); - GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up. - GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex); - check_error(); - chain->render_to_fbo(fbo, WIDTH, HEIGHT); - resource_pool->release_fbo(fbo); - - subsample_chroma(cbcr_full_tex, cbcr_tex); - resource_pool->release_2d_texture(cbcr_full_tex); - - // Set the right state for rgba_tex. - glBindFramebuffer(GL_FRAMEBUFFER, 0); - glBindTexture(GL_TEXTURE_2D, rgba_tex); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - - RefCountedGLsync fence(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); - check_error(); - - const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded. - h264_encoder->end_frame(fence, pts_int + av_delay, theme_main_chain.input_frames); + int64_t frame_duration = new_frames[master_card_index].length; + render_one_frame(frame_duration); ++frame; - pts_int += card_copy[0].new_frame_length; - - // The live frame just shows the RGBA texture we just rendered. - // It owns rgba_tex now. - DisplayFrame live_frame; - live_frame.chain = display_chain.get(); - live_frame.setup_chain = [this, rgba_tex]{ - display_input->set_texture_num(rgba_tex); - }; - live_frame.ready_fence = fence; - live_frame.input_frames = {}; - live_frame.temp_textures = { rgba_tex }; - output_channel[OUTPUT_LIVE].output_frame(live_frame); - - // Set up preview and any additional channels. - for (int i = 1; i < theme->get_num_channels() + 2; ++i) { - DisplayFrame display_frame; - Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions - display_frame.chain = chain.chain; - display_frame.setup_chain = chain.setup_chain; - display_frame.ready_fence = fence; - display_frame.input_frames = chain.input_frames; - display_frame.temp_textures = {}; - output_channel[i].output_frame(display_frame); - } - - clock_gettime(CLOCK_MONOTONIC, &now); - double elapsed = now.tv_sec - start.tv_sec + - 1e-9 * (now.tv_nsec - start.tv_nsec); + pts_int += frame_duration; + + now = steady_clock::now(); + double elapsed = duration(now - start).count(); if (frame % 100 == 0) { - printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)\n", + printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)", frame, stats_dropped_frames, elapsed, frame / elapsed, 1e3 * elapsed / frame); // chain->print_phase_timing(); + + // Check our memory usage, to see if we are close to our mlockall() + // limit (if at all set). + rusage used; + if (getrusage(RUSAGE_SELF, &used) == -1) { + perror("getrusage(RUSAGE_SELF)"); + assert(false); + } + + if (uses_mlock) { + rlimit limit; + if (getrlimit(RLIMIT_MEMLOCK, &limit) == -1) { + perror("getrlimit(RLIMIT_MEMLOCK)"); + assert(false); + } + + printf(", using %ld / %ld MB lockable memory (%.1f%%)", + long(used.ru_maxrss / 1024), + long(limit.rlim_cur / 1048576), + float(100.0 * (used.ru_maxrss * 1024.0) / limit.rlim_cur)); + } else { + printf(", using %ld MB memory (not locked)", + long(used.ru_maxrss / 1024)); + } + + printf("\n"); } + if (should_cut.exchange(false)) { // Test and clear. - string filename = generate_local_dump_filename(frame); - printf("Starting new recording: %s\n", filename.c_str()); - h264_encoder->shutdown(); - httpd.close_output_file(); - httpd.open_output_file(filename.c_str()); - h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd)); + video_encoder->do_cut(frame); } #if 0 @@ -669,127 +652,222 @@ void Mixer::thread_func() resource_pool->clean_context(); } -void Mixer::audio_thread_func() +void Mixer::get_one_frame_from_each_card(unsigned master_card_index, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS], int num_samples[MAX_VIDEO_CARDS]) { - while (!should_quit) { - AudioTask task; +start: + // The first card is the master timer, so wait for it to have a new frame. + // TODO: Add a timeout. + unique_lock lock(bmusb_mutex); + cards[master_card_index].new_frames_changed.wait(lock, [this, master_card_index]{ return !cards[master_card_index].new_frames.empty() || cards[master_card_index].capture->get_disconnected(); }); + + if (cards[master_card_index].new_frames.empty()) { + // We were woken up, but not due to a new frame. Deal with it + // and then restart. + assert(cards[master_card_index].capture->get_disconnected()); + handle_hotplugged_cards(); + goto start; + } - { - unique_lock lock(audio_mutex); - audio_task_queue_changed.wait(lock, [this]{ return !audio_task_queue.empty(); }); - task = audio_task_queue.front(); - audio_task_queue.pop(); + for (unsigned card_index = 0; card_index < num_cards; ++card_index) { + CaptureCard *card = &cards[card_index]; + if (card->new_frames.empty()) { + assert(card_index != master_card_index); + card->queue_length_policy.update_policy(-1); + continue; + } + new_frames[card_index] = move(card->new_frames.front()); + has_new_frame[card_index] = true; + card->new_frames.pop(); + card->new_frames_changed.notify_all(); + + int num_samples_times_timebase = OUTPUT_FREQUENCY * new_frames[card_index].length + card->fractional_samples; + num_samples[card_index] = num_samples_times_timebase / TIMEBASE; + card->fractional_samples = num_samples_times_timebase % TIMEBASE; + assert(num_samples[card_index] >= 0); + + if (card_index == master_card_index) { + // We don't use the queue length policy for the master card, + // but we will if it stops being the master. Thus, clear out + // the policy in case we switch in the future. + card->queue_length_policy.reset(card_index); + } else { + // If we have excess frames compared to the policy for this card, + // drop frames from the head. + card->queue_length_policy.update_policy(card->new_frames.size()); + while (card->new_frames.size() > card->queue_length_policy.get_safe_queue_length()) { + card->new_frames.pop(); + } } - - process_audio_one_frame(task.pts_int, task.num_samples); } } -void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples) +void Mixer::handle_hotplugged_cards() { - vector samples_card; - vector samples_out; + // Check for cards that have been disconnected since last frame. for (unsigned card_index = 0; card_index < num_cards; ++card_index) { - samples_card.resize(num_samples * 2); - { - unique_lock lock(cards[card_index].audio_mutex); - if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples)) { - printf("Card %d reported previous underrun.\n", card_index); + CaptureCard *card = &cards[card_index]; + if (card->capture->get_disconnected()) { + fprintf(stderr, "Card %u went away, replacing with a fake card.\n", card_index); + FakeCapture *capture = new FakeCapture(WIDTH, HEIGHT, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio); + configure_card(card_index, capture, /*is_fake_capture=*/true); + card->queue_length_policy.reset(card_index); + card->capture->start_bm_capture(); + } + } + + // Check for cards that have been connected since last frame. + vector hotplugged_cards_copy; + { + lock_guard lock(hotplug_mutex); + swap(hotplugged_cards, hotplugged_cards_copy); + } + for (libusb_device *new_dev : hotplugged_cards_copy) { + // Look for a fake capture card where we can stick this in. + int free_card_index = -1; + for (unsigned card_index = 0; card_index < num_cards; ++card_index) { + if (cards[card_index].is_fake_capture) { + free_card_index = int(card_index); + break; } } - // TODO: Allow using audio from the other card(s) as well. - if (card_index == 0) { - samples_out = move(samples_card); + + if (free_card_index == -1) { + fprintf(stderr, "New card plugged in, but no free slots -- ignoring.\n"); + libusb_unref_device(new_dev); + } else { + // BMUSBCapture takes ownership. + fprintf(stderr, "New card plugged in, choosing slot %d.\n", free_card_index); + CaptureCard *card = &cards[free_card_index]; + BMUSBCapture *capture = new BMUSBCapture(free_card_index, new_dev); + configure_card(free_card_index, capture, /*is_fake_capture=*/false); + card->queue_length_policy.reset(free_card_index); + capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, free_card_index)); + capture->start_bm_capture(); } } +} - // Cut away everything under 120 Hz (or whatever the cutoff is); - // we don't need it for voice, and it will reduce headroom - // and confuse the compressor. (In particular, any hums at 50 or 60 Hz - // should be dampened.) - locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f); - // Apply a level compressor to get the general level right. - // Basically, if it's over about -40 dBFS, we squeeze it down to that level - // (or more precisely, near it, since we don't use infinite ratio), - // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course, - // entirely arbitrary, but from practical tests with speech, it seems to - // put ut around -23 LUFS, so it's a reasonable starting point for later use. - float ref_level_dbfs = -14.0f; - { - float threshold = 0.01f; // -40 dBFS. - float ratio = 20.0f; - float attack_time = 0.5f; - float release_time = 20.0f; - float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB. - level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); - last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain); +void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame) +{ + // Resample the audio as needed, including from previously dropped frames. + assert(num_cards > 0); + for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) { + const bool dropped_frame = (frame_num != dropped_frames); + { + // Signal to the audio thread to process this frame. + // Note that if the frame is a dropped frame, we signal that + // we don't want to use this frame as base for adjusting + // the resampler rate. The reason for this is that the timing + // of these frames is often way too late; they typically don't + // “arrive” before we synthesize them. Thus, we could end up + // in a situation where we have inserted e.g. five audio frames + // into the queue before we then start pulling five of them + // back out. This makes ResamplingQueue overestimate the delay, + // causing undue resampler changes. (We _do_ use the last, + // non-dropped frame; perhaps we should just discard that as well, + // since dropped frames are expected to be rare, and it might be + // better to just wait until we have a slightly more normal situation). + unique_lock lock(audio_mutex); + bool adjust_rate = !dropped_frame; + audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate}); + audio_task_queue_changed.notify_one(); + } + if (dropped_frame) { + // For dropped frames, increase the pts. Note that if the format changed + // in the meantime, we have no way of detecting that; we just have to + // assume the frame length is always the same. + pts_int += length_per_frame; + } } +} -#if 0 - printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n", - level_compressor.get_level(), 20.0 * log10(level_compressor.get_level()), - level_compressor.get_attenuation(), 20.0 * log10(level_compressor.get_attenuation()), - 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain)); -#endif +void Mixer::render_one_frame(int64_t duration) +{ + // Get the main chain from the theme, and set its state immediately. + Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state); + EffectChain *chain = theme_main_chain.chain; + theme_main_chain.setup_chain(); + //theme_main_chain.chain->enable_phase_timing(true); + + GLuint y_tex, cbcr_tex; + bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex); + assert(got_frame); + + // Render main chain. + GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT); + GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up. + GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex); + check_error(); + chain->render_to_fbo(fbo, WIDTH, HEIGHT); + resource_pool->release_fbo(fbo); -// float limiter_att, compressor_att; - - // The real compressor. - if (compressor_enabled) { - float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f); - float ratio = 20.0f; - float attack_time = 0.005f; - float release_time = 0.040f; - float makeup_gain = 2.0f; // +6 dB. - compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); -// compressor_att = compressor.get_attenuation(); - } - - // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only. - // Note that since ratio is not infinite, we could go slightly higher than this. - if (limiter_enabled) { - float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f); - float ratio = 30.0f; - float attack_time = 0.0f; // Instant. - float release_time = 0.020f; - float makeup_gain = 1.0f; // 0 dB. - limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); -// limiter_att = limiter.get_attenuation(); - } - -// printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att)); - - // Upsample 4x to find interpolated peak. - peak_resampler.inp_data = samples_out.data(); - peak_resampler.inp_count = samples_out.size() / 2; - - vector interpolated_samples_out; - interpolated_samples_out.resize(samples_out.size()); - while (peak_resampler.inp_count > 0) { // About four iterations. - peak_resampler.out_data = &interpolated_samples_out[0]; - peak_resampler.out_count = interpolated_samples_out.size() / 2; - peak_resampler.process(); - size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count; - peak = max(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2)); - } - - // Find R128 levels. - vector left, right; - deinterleave_samples(samples_out, &left, &right); - float *ptrs[] = { left.data(), right.data() }; - { - unique_lock lock(r128_mutex); - r128.process(left.size(), ptrs); - } + subsample_chroma(cbcr_full_tex, cbcr_tex); + resource_pool->release_2d_texture(cbcr_full_tex); + + // Set the right state for rgba_tex. + glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindTexture(GL_TEXTURE_2D, rgba_tex); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded. + RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames); - // Send the samples to the sound card. - if (alsa) { - alsa->write(samples_out); + // The live frame just shows the RGBA texture we just rendered. + // It owns rgba_tex now. + DisplayFrame live_frame; + live_frame.chain = display_chain.get(); + live_frame.setup_chain = [this, rgba_tex]{ + display_input->set_texture_num(rgba_tex); + }; + live_frame.ready_fence = fence; + live_frame.input_frames = {}; + live_frame.temp_textures = { rgba_tex }; + output_channel[OUTPUT_LIVE].output_frame(live_frame); + + // Set up preview and any additional channels. + for (int i = 1; i < theme->get_num_channels() + 2; ++i) { + DisplayFrame display_frame; + Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions + display_frame.chain = chain.chain; + display_frame.setup_chain = chain.setup_chain; + display_frame.ready_fence = fence; + display_frame.input_frames = chain.input_frames; + display_frame.temp_textures = {}; + output_channel[i].output_frame(display_frame); } +} + +void Mixer::audio_thread_func() +{ + while (!should_quit) { + AudioTask task; + + { + unique_lock lock(audio_mutex); + audio_task_queue_changed.wait(lock, [this]{ return should_quit || !audio_task_queue.empty(); }); + if (should_quit) { + return; + } + task = audio_task_queue.front(); + audio_task_queue.pop(); + } + + ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy = + task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE; + vector samples_out = audio_mixer.get_output( + double(task.pts_int) / TIMEBASE, + task.num_samples, + rate_adjustment_policy); - // And finally add them to the output. - h264_encoder->add_audio(frame_pts_int, move(samples_out)); + // Send the samples to the sound card, then add them to the output. + if (alsa) { + alsa->write(samples_out); + } + video_encoder->add_audio(task.pts_int, move(samples_out)); + } } void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex) @@ -798,12 +876,6 @@ void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex) glGenVertexArrays(1, &vao); check_error(); - float vertices[] = { - 0.0f, 2.0f, - 0.0f, 0.0f, - 2.0f, 0.0f - }; - glBindVertexArray(vao); check_error(); @@ -830,17 +902,28 @@ void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex) float chroma_offset_0[] = { -0.5f / WIDTH, 0.0f }; set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_0", chroma_offset_0); - GLuint position_vbo = fill_vertex_attribute(cbcr_program_num, "position", 2, GL_FLOAT, sizeof(vertices), vertices); - GLuint texcoord_vbo = fill_vertex_attribute(cbcr_program_num, "texcoord", 2, GL_FLOAT, sizeof(vertices), vertices); // Same as vertices. + glBindBuffer(GL_ARRAY_BUFFER, cbcr_vbo); + check_error(); + + for (GLint attr_index : { cbcr_position_attribute_index, cbcr_texcoord_attribute_index }) { + glEnableVertexAttribArray(attr_index); + check_error(); + glVertexAttribPointer(attr_index, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0)); + check_error(); + } glDrawArrays(GL_TRIANGLES, 0, 3); check_error(); - cleanup_vertex_attribute(cbcr_program_num, "position", position_vbo); - cleanup_vertex_attribute(cbcr_program_num, "texcoord", texcoord_vbo); + for (GLint attr_index : { cbcr_position_attribute_index, cbcr_texcoord_attribute_index }) { + glDisableVertexAttribArray(attr_index); + check_error(); + } glUseProgram(0); check_error(); + glBindFramebuffer(GL_FRAMEBUFFER, 0); + check_error(); resource_pool->release_fbo(fbo); glDeleteVertexArrays(1, &vao); @@ -865,6 +948,7 @@ void Mixer::start() void Mixer::quit() { should_quit = true; + audio_task_queue_changed.notify_one(); mixer_thread.join(); audio_thread.join(); } @@ -879,12 +963,21 @@ void Mixer::channel_clicked(int preview_num) theme->channel_clicked(preview_num); } -void Mixer::reset_meters() +void Mixer::start_mode_scanning(unsigned card_index) { - peak_resampler.reset(); - peak = 0.0f; - r128.reset(); - r128.integr_start(); + assert(card_index < num_cards); + if (is_mode_scanning[card_index]) { + return; + } + is_mode_scanning[card_index] = true; + mode_scanlist[card_index].clear(); + for (const auto &mode : cards[card_index].capture->get_available_video_modes()) { + mode_scanlist[card_index].push_back(mode.first); + } + assert(!mode_scanlist[card_index].empty()); + mode_scanlist_index[card_index] = 0; + cards[card_index].capture->set_video_mode(mode_scanlist[card_index][0]); + last_mode_scan_change[card_index] = steady_clock::now(); } Mixer::OutputChannel::~OutputChannel() @@ -910,9 +1003,47 @@ void Mixer::OutputChannel::output_frame(DisplayFrame frame) has_ready_frame = true; } - if (has_new_frame_ready_callback) { + if (new_frame_ready_callback) { new_frame_ready_callback(); } + + // Reduce the number of callbacks by filtering duplicates. The reason + // why we bother doing this is that Qt seemingly can get into a state + // where its builds up an essentially unbounded queue of signals, + // consuming more and more memory, and there's no good way of collapsing + // user-defined signals or limiting the length of the queue. + if (transition_names_updated_callback) { + vector transition_names = global_mixer->get_transition_names(); + bool changed = false; + if (transition_names.size() != last_transition_names.size()) { + changed = true; + } else { + for (unsigned i = 0; i < transition_names.size(); ++i) { + if (transition_names[i] != last_transition_names[i]) { + changed = true; + break; + } + } + } + if (changed) { + transition_names_updated_callback(transition_names); + last_transition_names = transition_names; + } + } + if (name_updated_callback) { + string name = global_mixer->get_channel_name(channel); + if (name != last_name) { + name_updated_callback(name); + last_name = name; + } + } + if (color_updated_callback) { + string color = global_mixer->get_channel_color(channel); + if (color != last_color) { + color_updated_callback(color); + last_color = color; + } + } } bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame) @@ -943,5 +1074,21 @@ bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame) void Mixer::OutputChannel::set_frame_ready_callback(Mixer::new_frame_ready_callback_t callback) { new_frame_ready_callback = callback; - has_new_frame_ready_callback = true; } + +void Mixer::OutputChannel::set_transition_names_updated_callback(Mixer::transition_names_updated_callback_t callback) +{ + transition_names_updated_callback = callback; +} + +void Mixer::OutputChannel::set_name_updated_callback(Mixer::name_updated_callback_t callback) +{ + name_updated_callback = callback; +} + +void Mixer::OutputChannel::set_color_updated_callback(Mixer::color_updated_callback_t callback) +{ + color_updated_callback = callback; +} + +mutex RefCountedGLsync::fence_lock;