assert(false);
}
- if (first ||
- width != userdata->last_width[field] ||
- height != userdata->last_height[field] ||
- cbcr_width != userdata->last_cbcr_width[field] ||
- cbcr_height != userdata->last_cbcr_height[field]) {
+ const bool recreate_main_texture =
+ first ||
+ width != userdata->last_width[field] ||
+ height != userdata->last_height[field] ||
+ cbcr_width != userdata->last_cbcr_width[field] ||
+ cbcr_height != userdata->last_cbcr_height[field];
+ const bool recreate_v210_texture =
+ global_flags.ten_bit_input &&
+ (first || v210_width != userdata->last_v210_width[field] || height != userdata->last_height[field]);
+
+ if (recreate_main_texture) {
// We changed resolution since last use of this texture, so we need to create
// a new object. Note that this each card has its own PBOFrameAllocator,
// we don't need to worry about these flip-flopping between resolutions.
userdata->last_cbcr_width[field] = cbcr_width;
userdata->last_cbcr_height[field] = cbcr_height;
}
- if (global_flags.ten_bit_input &&
- (first || v210_width != userdata->last_v210_width[field])) {
+ if (recreate_v210_texture) {
// Same as above; we need to recreate the texture.
glBindTexture(GL_TEXTURE_2D, userdata->tex_v210[field]);
check_error();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, v210_width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
check_error();
userdata->last_v210_width[field] = v210_width;
+ userdata->last_height[field] = height;
}
}
ycbcr_format.cb_y_position = 0.5f;
ycbcr_format.cr_y_position = 0.5f;
+ // Initialize the neutral colors to sane values.
+ for (unsigned i = 0; i < MAX_VIDEO_CARDS; ++i) {
+ last_received_neutral_color[i] = RGBTriplet(1.0f, 1.0f, 1.0f);
+ }
+
// Display chain; shows the live output produced by the main chain (or rather, a copy of it).
display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
check_error();
card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
if (card->frame_allocator == nullptr) {
card->frame_allocator.reset(new PBOFrameAllocator(pixel_format, 8 << 20, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get())); // 8 MB.
+ } else {
+ // The format could have changed, but we cannot reset the allocator
+ // and create a new one from scratch, since there may be allocated
+ // frames from it that expect to call release_frame() on it.
+ // Instead, ask the allocator to create new frames for us and discard
+ // any old ones as they come back. This takes the mutex while
+ // allocating, but nothing should really be sending frames in there
+ // right now anyway (start_bm_capture() has not been called yet).
+ card->frame_allocator->reconfigure(pixel_format, 8 << 20, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get());
}
card->capture->set_video_frame_allocator(card->frame_allocator.get());
if (card->surface == nullptr) {
//
// Only bother doing MJPEG encoding if there are any connected clients
// that want the stream.
- if (httpd.get_num_connected_multicam_clients() > 0) {
+ if (httpd.get_num_connected_multicam_clients() > 0 ||
+ httpd.get_num_connected_siphon_clients(card_index) > 0) {
vector<int32_t> converted_samples = convert_audio_to_fixed32(audio_frame.data + audio_offset, num_samples, audio_format, 2);
lock_guard<mutex> lock(card_mutex);
if (card->new_raw_audio.empty()) {
new_frame.video_format = video_format;
new_frame.y_offset = y_offset;
new_frame.cbcr_offset = cbcr_offset;
+ if (card->type == CardType::FFMPEG_INPUT) {
+ FFmpegCapture *ffmpeg_capture = static_cast<FFmpegCapture *>(card->capture.get());
+ new_frame.neutral_color = ffmpeg_capture->get_last_neutral_color();
+ }
card->new_frames.push_back(move(new_frame));
card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
card->may_have_dropped_last_frame = false;
master_card_index = output_card_index;
} else {
master_card_is_output = false;
- master_card_index = theme->map_signal(master_clock_channel);
+ master_card_index = theme->map_signal_to_card(master_clock_channel);
assert(master_card_index < num_cards + num_video_inputs);
}
new_frame->upload_func = nullptr;
}
- if (new_frame->frame->data_copy != nullptr) {
- int mjpeg_card_index = mjpeg_encoder->get_mjpeg_stream_for_card(card_index);
- if (mjpeg_card_index != -1) {
- mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]));
- }
+ // Only set the white balance if it actually changed. This means that the user
+ // is free to override the white balance in a video with no white balance information
+ // actually set (ie. r=g=b=1 all the time), or one where the white point is wrong,
+ // but frame-to-frame decisions will be heeded. We do this pretty much as late
+ // as possible (ie., after picking out the frame from the buffer), so that we are sure
+ // that the change takes effect on exactly the right frame.
+ if (fabs(new_frame->neutral_color.r - last_received_neutral_color[card_index].r) > 1e-3 ||
+ fabs(new_frame->neutral_color.g - last_received_neutral_color[card_index].g) > 1e-3 ||
+ fabs(new_frame->neutral_color.b - last_received_neutral_color[card_index].b) > 1e-3) {
+ theme->set_wb_for_card(card_index, new_frame->neutral_color.r, new_frame->neutral_color.g, new_frame->neutral_color.b);
+ last_received_neutral_color[card_index] = new_frame->neutral_color;
+ }
+
+ if (new_frame->frame->data_copy != nullptr && mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) {
+ RGBTriplet neutral_color = theme->get_white_balance_for_card(card_index);
+ mjpeg_encoder->upload_frame(pts_int, card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]), neutral_color);
}
+
}
int64_t frame_duration = output_frame_info.frame_duration;
pair<string, string> Mixer::get_channels_json()
{
Channels ret;
- for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
+ for (int channel_idx = 0; channel_idx < theme->get_num_channels(); ++channel_idx) {
Channel *channel = ret.add_channel();
- channel->set_index(channel_idx);
- channel->set_name(theme->get_channel_name(channel_idx));
- channel->set_color(theme->get_channel_color(channel_idx));
+ channel->set_index(channel_idx + 2);
+ channel->set_name(theme->get_channel_name(channel_idx + 2));
+ channel->set_color(theme->get_channel_color(channel_idx + 2));
}
string contents;
google::protobuf::util::MessageToJsonString(ret, &contents); // Ignore any errors.