}
check_error();
break;
+ default:
+ assert(false);
}
userdata->last_width[field] = width;
userdata->last_height[field] = height;
num_cards(num_cards),
mixer_surface(create_surface(format)),
h264_encoder_surface(create_surface(format)),
- decklink_output_surface(create_surface(format)),
- audio_mixer(num_cards)
+ decklink_output_surface(create_surface(format))
{
memcpy(ycbcr_interpretation, global_flags.ycbcr_interpretation, sizeof(ycbcr_interpretation));
CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
// Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
+ // Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
+ std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
+ audio_mixer.reset(new AudioMixer(num_cards, video_inputs.size()));
+
httpd.add_endpoint("/channels", bind(&Mixer::get_channels_json, this), HTTPD::ALLOW_ALL_ORIGINS);
for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
char url[256];
// Initialize all video inputs the theme asked for. Note that these are
// all put _after_ the regular cards, which stop at <num_cards> - 1.
- std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
for (unsigned video_card_index = 0; video_card_index < video_inputs.size(); ++card_index, ++video_card_index) {
if (card_index >= MAX_VIDEO_CARDS) {
fprintf(stderr, "ERROR: Not enough card slots available for the videos the theme requested.\n");
Mixer::~Mixer()
{
+ httpd.stop();
BMUSBCapture::stop_bm_thread();
for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
- {
- unique_lock<mutex> lock(card_mutex);
- cards[card_index].should_quit = true; // Unblock thread.
- cards[card_index].new_frames_changed.notify_all();
- }
cards[card_index].capture->stop_dequeue_thread();
if (cards[card_index].output) {
cards[card_index].output->end_output();
// NOTE: start_bm_capture() happens in thread_func().
- DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
- audio_mixer.reset_resampler(device);
- audio_mixer.set_display_name(device, card->capture->get_description());
- audio_mixer.trigger_state_changed_callback();
+ DeviceSpec device;
+ if (card_type == CardType::FFMPEG_INPUT) {
+ device = DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards};
+ } else {
+ device = DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
+ }
+ audio_mixer->reset_resampler(device);
+ audio_mixer->set_display_name(device, card->capture->get_description());
+ audio_mixer->trigger_state_changed_callback();
// Unregister old metrics, if any.
if (!card->labels.empty()) {
}
}
+DeviceSpec card_index_to_device(unsigned card_index, unsigned num_cards)
+{
+ if (card_index >= num_cards) {
+ return DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards};
+ } else {
+ return DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
+ }
+}
+
} // namespace
void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
{
- DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
+ DeviceSpec device = card_index_to_device(card_index, num_cards);
CaptureCard *card = &cards[card_index];
++card->metric_input_received_frames;
assert(frame_length > 0);
size_t num_samples = (audio_frame.len > audio_offset) ? (audio_frame.len - audio_offset) / audio_format.num_channels / (audio_format.bits_per_sample / 8) : 0;
- if (num_samples > OUTPUT_FREQUENCY / 10) {
- printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
- card_index, int(audio_frame.len), int(audio_offset),
+ if (num_samples > OUTPUT_FREQUENCY / 10 && card->type != CardType::FFMPEG_INPUT) {
+ printf("%s: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
+ spec_to_string(device).c_str(), int(audio_frame.len), int(audio_offset),
timecode, int(video_frame.len), int(video_offset), video_format.id);
if (video_frame.owner) {
video_frame.owner->release_frame(video_frame);
const int silence_samples = OUTPUT_FREQUENCY * video_format.frame_rate_den / video_format.frame_rate_nom;
if (dropped_frames > MAX_FPS * 2) {
- fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
- card_index, card->last_timecode, timecode);
- audio_mixer.reset_resampler(device);
+ fprintf(stderr, "%s lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
+ spec_to_string(device).c_str(), card->last_timecode, timecode);
+ audio_mixer->reset_resampler(device);
dropped_frames = 0;
++card->metric_input_resets;
} else if (dropped_frames > 0) {
// Insert silence as needed.
- fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
- card_index, dropped_frames, timecode);
+ fprintf(stderr, "%s dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
+ spec_to_string(device).c_str(), dropped_frames, timecode);
card->metric_input_dropped_frames_error += dropped_frames;
bool success;
do {
- success = audio_mixer.add_silence(device, silence_samples, dropped_frames, frame_length);
+ success = audio_mixer->add_silence(device, silence_samples, dropped_frames, frame_length);
} while (!success);
}
if (num_samples > 0) {
- audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
+ audio_mixer->add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
}
// Done with the audio, so release it.
if (video_frame.len - video_offset == 0 ||
video_frame.len - video_offset != expected_length) {
if (video_frame.len != 0) {
- printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n",
- card_index, video_frame.len - video_offset, expected_length);
+ printf("%s: Dropping video frame with wrong length (%ld; expected %ld)\n",
+ spec_to_string(device).c_str(), video_frame.len - video_offset, expected_length);
}
if (video_frame.owner) {
video_frame.owner->release_frame(video_frame);
}
}
- BasicStats basic_stats(/*verbose=*/true);
+ BasicStats basic_stats(/*verbose=*/true, /*use_opengl=*/true);
int stats_dropped_frames = 0;
while (!should_quit) {
} else {
master_card_is_output = false;
master_card_index = theme->map_signal(master_clock_channel);
- assert(master_card_index < num_cards);
+ assert(master_card_index < num_cards + num_video_inputs);
}
OutputFrameInfo output_frame_info = get_one_frame_from_each_card(master_card_index, master_card_is_output, new_frames, has_new_frame);
handle_hotplugged_cards();
for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
+ DeviceSpec device = card_index_to_device(card_index, num_cards);
if (card_index == master_card_index || !has_new_frame[card_index]) {
continue;
}
++new_frames[card_index].dropped_frames;
}
if (new_frames[card_index].dropped_frames > 0) {
- printf("Card %u dropped %d frames before this\n",
- card_index, int(new_frames[card_index].dropped_frames));
+ printf("%s dropped %d frames before this\n",
+ spec_to_string(device).c_str(), int(new_frames[card_index].dropped_frames));
}
}
int64_t frame_duration = output_frame_info.frame_duration;
render_one_frame(frame_duration);
- ++frame_num;
+ {
+ lock_guard<mutex> lock(frame_num_mutex);
+ ++frame_num;
+ }
+ frame_num_updated.notify_all();
pts_int += frame_duration;
basic_stats.update(frame_num, stats_dropped_frames);
CaptureCard *card = &cards[card_index];
if (card->new_frames.empty()) { // Starvation.
++card->metric_input_duped_frames;
+#ifdef HAVE_CEF
if (card->is_cef_capture && card->may_have_dropped_last_frame) {
// Unlike other sources, CEF is not guaranteed to send us a steady
// stream of frames, so we'll have to ask it to repaint the frame
// get a new frame.)
((CEFCapture *)card->capture.get())->request_new_frame();
}
+#endif
} else {
new_frames[card_index] = move(card->new_frames.front());
has_new_frame[card_index] = true;
theme_main_chain.setup_chain();
//theme_main_chain.chain->enable_phase_timing(true);
- // The theme can't (or at least shouldn't!) call connect_signal() on
- // each FFmpeg or CEF input, so we'll do it here.
- for (const pair<LiveInputWrapper *, FFmpegCapture *> &conn : theme->get_video_signal_connections()) {
- conn.first->connect_signal_raw(conn.second->get_card_index(), input_state);
- }
-#ifdef HAVE_CEF
- for (const pair<LiveInputWrapper *, CEFCapture *> &conn : theme->get_html_signal_connections()) {
- conn.first->connect_signal_raw(conn.second->get_card_index(), input_state);
- }
-#endif
-
// If HDMI/SDI output is active and the user has requested auto mode,
// its mode overrides the existing Y'CbCr setting for the chain.
YCbCrLumaCoefficients ycbcr_output_coefficients;
ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
- vector<float> samples_out = audio_mixer.get_output(
+ vector<float> samples_out = audio_mixer->get_output(
task.frame_timestamp,
task.num_samples,
rate_adjustment_policy);
return cards[desired_output_card_index].output->get_available_video_modes();
}
+string Mixer::get_ffmpeg_filename(unsigned card_index) const
+{
+ assert(card_index >= num_cards && card_index < num_cards + num_video_inputs);
+ return ((FFmpegCapture *)(cards[card_index].capture.get()))->get_filename();
+}
+
+void Mixer::set_ffmpeg_filename(unsigned card_index, const string &filename) {
+ assert(card_index >= num_cards && card_index < num_cards + num_video_inputs);
+ ((FFmpegCapture *)(cards[card_index].capture.get()))->change_filename(filename);
+}
+
+void Mixer::wait_for_next_frame()
+{
+ unique_lock<mutex> lock(frame_num_mutex);
+ unsigned old_frame_num = frame_num;
+ frame_num_updated.wait_for(lock, seconds(1), // Timeout is just in case.
+ [old_frame_num, this]{ return this->frame_num > old_frame_num; });
+}
+
Mixer::OutputChannel::~OutputChannel()
{
if (has_current_frame) {