X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=nageru%2Fmixer.cpp;h=212041bd1b0f5c69ecab7e6116e5e1844015ad8e;hb=11d27b16a092efb99c045b21903da0eb7ee8c4d1;hp=32c1984fa4c3ee89dd214695a26113c5729f554d;hpb=23da824e1d61e37fbe0cc1c0f4d32052022a50ba;p=nageru diff --git a/nageru/mixer.cpp b/nageru/mixer.cpp index 32c1984..212041b 100644 --- a/nageru/mixer.cpp +++ b/nageru/mixer.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -44,6 +45,7 @@ #include "shared/disk_space_estimator.h" #include "ffmpeg_capture.h" #include "flags.h" +#include "image_input.h" #include "input_mapping.h" #include "shared/metrics.h" #include "mjpeg_encoder.h" @@ -60,6 +62,12 @@ #include #include "json.pb.h" +#ifdef HAVE_SRT +// Must come after CEF, since it includes , which has #defines +// that conflict with CEF logging constants. +#include +#endif + class IDeckLink; class QOpenGLContext; @@ -108,11 +116,17 @@ void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned f assert(false); } - if (first || - width != userdata->last_width[field] || - height != userdata->last_height[field] || - cbcr_width != userdata->last_cbcr_width[field] || - cbcr_height != userdata->last_cbcr_height[field]) { + const bool recreate_main_texture = + first || + width != userdata->last_width[field] || + height != userdata->last_height[field] || + cbcr_width != userdata->last_cbcr_width[field] || + cbcr_height != userdata->last_cbcr_height[field]; + const bool recreate_v210_texture = + global_flags.ten_bit_input && + (first || v210_width != userdata->last_v210_width[field] || height != userdata->last_height[field]); + + if (recreate_main_texture) { // We changed resolution since last use of this texture, so we need to create // a new object. Note that this each card has its own PBOFrameAllocator, // we don't need to worry about these flip-flopping between resolutions. @@ -152,11 +166,8 @@ void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned f case PixelFormat_8BitBGRA: glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]); check_error(); - if (global_flags.can_disable_srgb_decoder) { // See the comments in tweaked_inputs.h. - glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr); - } else { - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr); - } + // NOTE: sRGB may be disabled by sRGBSwitchingFlatInput. + glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr); check_error(); break; default: @@ -167,14 +178,14 @@ void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned f userdata->last_cbcr_width[field] = cbcr_width; userdata->last_cbcr_height[field] = cbcr_height; } - if (global_flags.ten_bit_input && - (first || v210_width != userdata->last_v210_width[field])) { + if (recreate_v210_texture) { // Same as above; we need to recreate the texture. glBindTexture(GL_TEXTURE_2D, userdata->tex_v210[field]); check_error(); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, v210_width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr); check_error(); userdata->last_v210_width[field] = v210_width; + userdata->last_height[field] = height; } } @@ -306,16 +317,18 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) num_cards(num_cards), mixer_surface(create_surface(format)), h264_encoder_surface(create_surface(format)), - decklink_output_surface(create_surface(format)) + decklink_output_surface(create_surface(format)), + image_update_surface(create_surface(format)) { memcpy(ycbcr_interpretation, global_flags.ycbcr_interpretation, sizeof(ycbcr_interpretation)); CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF)); check_error(); - // This nearly always should be true. - global_flags.can_disable_srgb_decoder = - epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") && - epoxy_has_gl_extension("GL_ARB_sampler_objects"); + if (!epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") || + !epoxy_has_gl_extension("GL_ARB_sampler_objects")) { + fprintf(stderr, "Nageru requires GL_EXT_texture_sRGB_decode and GL_ARB_sampler_objects to run.\n"); + exit(1); + } // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision // will be halved when sampling them, and we need to compensate here. @@ -347,6 +360,11 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) ycbcr_format.cb_y_position = 0.5f; ycbcr_format.cr_y_position = 0.5f; + // Initialize the neutral colors to sane values. + for (unsigned i = 0; i < MAX_VIDEO_CARDS; ++i) { + last_received_neutral_color[i] = RGBTriplet(1.0f, 1.0f, 1.0f); + } + // Display chain; shows the live output produced by the main chain (or rather, a copy of it). display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get())); check_error(); @@ -370,10 +388,10 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) audio_mixer.reset(new AudioMixer(num_cards, video_inputs.size())); httpd.add_endpoint("/channels", bind(&Mixer::get_channels_json, this), HTTPD::ALLOW_ALL_ORIGINS); - for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) { + for (int channel_idx = 0; channel_idx < theme->get_num_channels(); ++channel_idx) { char url[256]; - snprintf(url, sizeof(url), "/channels/%d/color", channel_idx); - httpd.add_endpoint(url, bind(&Mixer::get_channel_color_http, this, unsigned(channel_idx)), HTTPD::ALLOW_ALL_ORIGINS); + snprintf(url, sizeof(url), "/channels/%d/color", channel_idx + 2); + httpd.add_endpoint(url, bind(&Mixer::get_channel_color_http, this, unsigned(channel_idx + 2)), HTTPD::ALLOW_ALL_ORIGINS); } // Start listening for clients only once VideoEncoder has written its header, if any. @@ -432,7 +450,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) for (unsigned video_card_index = 0; video_card_index < video_inputs.size(); ++card_index, ++video_card_index) { if (card_index >= MAX_VIDEO_CARDS) { fprintf(stderr, "ERROR: Not enough card slots available for the videos the theme requested.\n"); - exit(1); + abort(); } configure_card(card_index, video_inputs[video_card_index], CardType::FFMPEG_INPUT, /*output=*/nullptr); video_inputs[video_card_index]->set_card_index(card_index); @@ -445,7 +463,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) for (unsigned html_card_index = 0; html_card_index < html_inputs.size(); ++card_index, ++html_card_index) { if (card_index >= MAX_VIDEO_CARDS) { fprintf(stderr, "ERROR: Not enough card slots available for the HTML inputs the theme requested.\n"); - exit(1); + abort(); } configure_card(card_index, html_inputs[html_card_index], CardType::CEF_INPUT, /*output=*/nullptr); html_inputs[html_card_index]->set_card_index(card_index); @@ -456,6 +474,12 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1)); BMUSBCapture::start_bm_thread(); +#ifdef HAVE_SRT + if (global_flags.srt_port >= 0) { + start_srt(); + } +#endif + for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) { cards[card_index].queue_length_policy.reset(card_index); } @@ -466,7 +490,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) if (!v210Converter::has_hardware_support()) { fprintf(stderr, "ERROR: --ten-bit-input requires support for OpenGL compute shaders\n"); fprintf(stderr, " (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n"); - exit(1); + abort(); } v210_converter.reset(new v210Converter()); @@ -483,7 +507,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) if (!v210Converter::has_hardware_support()) { fprintf(stderr, "ERROR: --ten-bit-output requires support for OpenGL compute shaders\n"); fprintf(stderr, " (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n"); - exit(1); + abort(); } } @@ -500,10 +524,14 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) } output_jitter_history.register_metrics({{ "card", "output" }}); + + ImageInput::start_update_thread(image_update_surface); } Mixer::~Mixer() { + ImageInput::end_update_thread(); + if (mjpeg_encoder != nullptr) { mjpeg_encoder->stop(); } @@ -521,7 +549,7 @@ Mixer::~Mixer() video_encoder.reset(nullptr); } -void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardType card_type, DeckLinkOutput *output) +void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardType card_type, DeckLinkOutput *output, bool override_card_as_live) { printf("Configuring card %d...\n", card_index); @@ -552,6 +580,15 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardT card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7)); if (card->frame_allocator == nullptr) { card->frame_allocator.reset(new PBOFrameAllocator(pixel_format, 8 << 20, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get())); // 8 MB. + } else { + // The format could have changed, but we cannot reset the allocator + // and create a new one from scratch, since there may be allocated + // frames from it that expect to call release_frame() on it. + // Instead, ask the allocator to create new frames for us and discard + // any old ones as they come back. This takes the mutex while + // allocating, but nothing should really be sending frames in there + // right now anyway (start_bm_capture() has not been called yet). + card->frame_allocator->reconfigure(pixel_format, 8 << 20, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get()); } card->capture->set_video_frame_allocator(card->frame_allocator.get()); if (card->surface == nullptr) { @@ -564,8 +601,12 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardT // NOTE: start_bm_capture() happens in thread_func(). + if (override_card_as_live) { + assert(card_type == CardType::FFMPEG_INPUT); + } + DeviceSpec device; - if (card_type == CardType::FFMPEG_INPUT) { + if (card_type == CardType::FFMPEG_INPUT && !override_card_as_live) { device = DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards}; } else { device = DeviceSpec{InputSourceType::CAPTURE_CARD, card_index}; @@ -785,6 +826,35 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, if (num_samples > 0) { audio_mixer->add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, audio_frame.received_timestamp); + + // Audio for the MJPEG stream. We don't resample; audio that's not in 48 kHz + // just gets dropped for now. + // + // Only bother doing MJPEG encoding if there are any connected clients + // that want the stream. + if (httpd.get_num_connected_multicam_clients() > 0 || + httpd.get_num_connected_siphon_clients(card_index) > 0) { + vector converted_samples = convert_audio_to_fixed32(audio_frame.data + audio_offset, num_samples, audio_format, 2); + lock_guard lock(card_mutex); + if (card->new_raw_audio.empty()) { + card->new_raw_audio = move(converted_samples); + } else { + // For raw audio, we don't really synchronize audio and video; + // we just put the audio in frame by frame, and if a video frame is + // dropped, we still keep the audio, which means it will be added + // to the beginning of the next frame. It would probably be better + // to move the audio pts earlier to show this, but most players can + // live with some jitter, and in a lot of ways, it's much nicer for + // Futatabi to have all audio locked to a video frame. + card->new_raw_audio.insert(card->new_raw_audio.end(), converted_samples.begin(), converted_samples.end()); + + // Truncate to one second, just to be sure we don't have infinite buildup in case of weirdness. + if (card->new_raw_audio.size() > OUTPUT_FREQUENCY * 2) { + size_t excess_samples = card->new_raw_audio.size() - OUTPUT_FREQUENCY * 2; + card->new_raw_audio.erase(card->new_raw_audio.begin(), card->new_raw_audio.begin() + excess_samples); + } + } + } } // Done with the audio, so release it. @@ -971,6 +1041,10 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, new_frame.video_format = video_format; new_frame.y_offset = y_offset; new_frame.cbcr_offset = cbcr_offset; + if (card->type == CardType::FFMPEG_INPUT) { + FFmpegCapture *ffmpeg_capture = static_cast(card->capture.get()); + new_frame.neutral_color = ffmpeg_capture->get_last_neutral_color(); + } card->new_frames.push_back(move(new_frame)); card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames); card->may_have_dropped_last_frame = false; @@ -998,7 +1072,7 @@ void Mixer::thread_func() QOpenGLContext *context = create_context(mixer_surface); if (!make_current(context, mixer_surface)) { printf("oops\n"); - exit(1); + abort(); } // Start the actual capture. (We don't want to do it before we're actually ready @@ -1034,16 +1108,17 @@ void Mixer::thread_func() master_card_index = output_card_index; } else { master_card_is_output = false; - master_card_index = theme->map_signal(master_clock_channel); + master_card_index = theme->map_signal_to_card(master_clock_channel); assert(master_card_index < num_cards + num_video_inputs); } - OutputFrameInfo output_frame_info = get_one_frame_from_each_card(master_card_index, master_card_is_output, new_frames, has_new_frame); + handle_hotplugged_cards(); + + vector raw_audio[MAX_VIDEO_CARDS]; // For MJPEG encoding. + OutputFrameInfo output_frame_info = get_one_frame_from_each_card(master_card_index, master_card_is_output, new_frames, has_new_frame, raw_audio); schedule_audio_resampling_tasks(output_frame_info.dropped_frames, output_frame_info.num_samples, output_frame_info.frame_duration, output_frame_info.is_preroll, output_frame_info.frame_timestamp); stats_dropped_frames += output_frame_info.dropped_frames; - handle_hotplugged_cards(); - for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) { DeviceSpec device = card_index_to_device(card_index, num_cards); if (card_index == master_card_index || !has_new_frame[card_index]) { @@ -1081,12 +1156,24 @@ void Mixer::thread_func() new_frame->upload_func = nullptr; } - if (new_frame->frame->data_copy != nullptr) { - int mjpeg_card_index = mjpeg_encoder->get_mjpeg_stream_for_card(card_index); - if (mjpeg_card_index != -1) { - mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset); - } + // Only set the white balance if it actually changed. This means that the user + // is free to override the white balance in a video with no white balance information + // actually set (ie. r=g=b=1 all the time), or one where the white point is wrong, + // but frame-to-frame decisions will be heeded. We do this pretty much as late + // as possible (ie., after picking out the frame from the buffer), so that we are sure + // that the change takes effect on exactly the right frame. + if (fabs(new_frame->neutral_color.r - last_received_neutral_color[card_index].r) > 1e-3 || + fabs(new_frame->neutral_color.g - last_received_neutral_color[card_index].g) > 1e-3 || + fabs(new_frame->neutral_color.b - last_received_neutral_color[card_index].b) > 1e-3) { + theme->set_wb_for_card(card_index, new_frame->neutral_color.r, new_frame->neutral_color.g, new_frame->neutral_color.b); + last_received_neutral_color[card_index] = new_frame->neutral_color; } + + if (new_frame->frame->data_copy != nullptr && mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) { + RGBTriplet neutral_color = theme->get_white_balance_for_card(card_index); + mjpeg_encoder->upload_frame(pts_int, card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]), neutral_color); + } + } int64_t frame_duration = output_frame_info.frame_duration; @@ -1182,11 +1269,11 @@ void Mixer::trim_queue(CaptureCard *card, size_t safe_queue_length) pair Mixer::get_channels_json() { Channels ret; - for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) { + for (int channel_idx = 0; channel_idx < theme->get_num_channels(); ++channel_idx) { Channel *channel = ret.add_channel(); - channel->set_index(channel_idx); - channel->set_name(theme->get_channel_name(channel_idx)); - channel->set_color(theme->get_channel_color(channel_idx)); + channel->set_index(channel_idx + 2); + channel->set_name(theme->get_channel_name(channel_idx + 2)); + channel->set_color(theme->get_channel_color(channel_idx + 2)); } string contents; google::protobuf::util::MessageToJsonString(ret, &contents); // Ignore any errors. @@ -1198,7 +1285,7 @@ pair Mixer::get_channel_color_http(unsigned channel_idx) return make_pair(theme->get_channel_color(channel_idx), "text/plain"); } -Mixer::OutputFrameInfo Mixer::get_one_frame_from_each_card(unsigned master_card_index, bool master_card_is_output, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS]) +Mixer::OutputFrameInfo Mixer::get_one_frame_from_each_card(unsigned master_card_index, bool master_card_is_output, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS], vector raw_audio[MAX_VIDEO_CARDS]) { OutputFrameInfo output_frame_info; start: @@ -1237,7 +1324,7 @@ start: // we dropped. (may_have_dropped_last_frame is set whenever we // trim the queue completely away, and cleared when we actually // get a new frame.) - ((CEFCapture *)card->capture.get())->request_new_frame(); + ((CEFCapture *)card->capture.get())->request_new_frame(/*ignore_if_locked=*/true); } #endif } else { @@ -1246,6 +1333,8 @@ start: card->new_frames.pop_front(); card->new_frames_changed.notify_all(); } + + raw_audio[card_index] = move(card->new_raw_audio); } if (!master_card_is_output) { @@ -1277,7 +1366,7 @@ start: // This might get off by a fractional sample when changing master card // between ones with different frame rates, but that's fine. - int num_samples_times_timebase = OUTPUT_FREQUENCY * output_frame_info.frame_duration + fractional_samples; + int64_t num_samples_times_timebase = int64_t(OUTPUT_FREQUENCY) * output_frame_info.frame_duration + fractional_samples; output_frame_info.num_samples = num_samples_times_timebase / TIMEBASE; fractional_samples = num_samples_times_timebase % TIMEBASE; assert(output_frame_info.num_samples >= 0); @@ -1301,9 +1390,15 @@ void Mixer::handle_hotplugged_cards() // Check for cards that have been connected since last frame. vector hotplugged_cards_copy; +#ifdef HAVE_SRT + vector hotplugged_srt_cards_copy; +#endif { lock_guard lock(hotplug_mutex); swap(hotplugged_cards, hotplugged_cards_copy); +#ifdef HAVE_SRT + swap(hotplugged_srt_cards, hotplugged_srt_cards_copy); +#endif } for (libusb_device *new_dev : hotplugged_cards_copy) { // Look for a fake capture card where we can stick this in. @@ -1329,6 +1424,49 @@ void Mixer::handle_hotplugged_cards() capture->start_bm_capture(); } } + +#ifdef HAVE_SRT + // Same, for SRT inputs. + // TODO: On disconnect and reconnect, we might want to use the stream ID + // to find the slot it used to go into? + for (SRTSOCKET sock : hotplugged_srt_cards_copy) { + // Look for a fake capture card where we can stick this in. + int free_card_index = -1; + for (unsigned card_index = 0; card_index < num_cards; ++card_index) { + if (cards[card_index].is_fake_capture) { + free_card_index = card_index; + break; + } + } + + char name[256]; + int namelen = sizeof(name); + srt_getsockopt(sock, /*ignored=*/0, SRTO_STREAMID, name, &namelen); + string stream_id(name, namelen); + + if (free_card_index == -1) { + if (stream_id.empty()) { + stream_id = "no name"; + } + fprintf(stderr, "New SRT stream connected (%s), but no free slots -- ignoring.\n", stream_id.c_str()); + srt_close(sock); + } else { + // FFmpegCapture takes ownership. + if (stream_id.empty()) { + fprintf(stderr, "New unnamed SRT stream connected, choosing slot %d.\n", free_card_index); + } else { + fprintf(stderr, "New SRT stream connected (%s), choosing slot %d.\n", stream_id.c_str(), free_card_index); + } + CaptureCard *card = &cards[free_card_index]; + FFmpegCapture *capture = new FFmpegCapture(sock, stream_id); + capture->set_card_index(free_card_index); + configure_card(free_card_index, capture, CardType::FFMPEG_INPUT, /*output=*/nullptr, /*override_card_as_live=*/true); + card->queue_length_policy.reset(free_card_index); + capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, free_card_index)); + capture->start_bm_capture(); + } + } +#endif } @@ -1568,6 +1706,23 @@ void Mixer::quit() audio_task_queue_changed.notify_one(); mixer_thread.join(); audio_thread.join(); +#ifdef HAVE_SRT + if (global_flags.srt_port >= 0) { + // There's seemingly no other reasonable way to wake up the thread + // (libsrt's epoll equivalent is busy-waiting). + int sock = srt_socket(AF_INET6, 0, 0); + if (sock != -1) { + sockaddr_in6 addr; + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_addr = IN6ADDR_LOOPBACK_INIT; + addr.sin6_port = htons(global_flags.srt_port); + srt_connect(sock, (sockaddr *)&addr, sizeof(addr)); + srt_close(sock); + } + srt_thread.join(); + } +#endif } void Mixer::transition_clicked(int transition_num) @@ -1755,4 +1910,43 @@ void Mixer::OutputChannel::set_color_updated_callback(Mixer::color_updated_callb color_updated_callback = callback; } +#ifdef HAVE_SRT +void Mixer::start_srt() +{ + SRTSOCKET sock = srt_socket(AF_INET6, 0, 0); + sockaddr_in6 addr; + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_port = htons(global_flags.srt_port); + + int err = srt_bind(sock, (sockaddr *)&addr, sizeof(addr)); + if (err != 0) { + fprintf(stderr, "srt_bind: %s\n", srt_getlasterror_str()); + abort(); + } + err = srt_listen(sock, MAX_VIDEO_CARDS); + if (err != 0) { + fprintf(stderr, "srt_listen: %s\n", srt_getlasterror_str()); + abort(); + } + + srt_thread = thread([this, sock] { + sockaddr_in6 addr; + for ( ;; ) { + int sa_len = sizeof(addr); + int clientsock = srt_accept(sock, (sockaddr *)&addr, &sa_len); + if (should_quit) { + if (clientsock != -1) { + srt_close(clientsock); + } + break; + } + lock_guard lock(hotplug_mutex); + hotplugged_srt_cards.push_back(clientsock); + } + srt_close(sock); + }); +} +#endif + mutex RefCountedGLsync::fence_lock;