#include <assert.h>
#include <epoxy/egl.h>
+#include <movit/effect.h>
#include <movit/effect_chain.h>
#include <movit/effect_util.h>
#include <movit/flat_input.h>
#include "shared/disk_space_estimator.h"
#include "ffmpeg_capture.h"
#include "flags.h"
+#include "image_input.h"
#include "input_mapping.h"
#include "shared/metrics.h"
#include "mjpeg_encoder.h"
case PixelFormat_8BitBGRA:
glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
check_error();
- if (global_flags.can_disable_srgb_decoder) { // See the comments in tweaked_inputs.h.
- glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
- } else {
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
- }
+ // NOTE: sRGB may be disabled by sRGBSwitchingFlatInput.
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
check_error();
break;
default:
num_cards(num_cards),
mixer_surface(create_surface(format)),
h264_encoder_surface(create_surface(format)),
- decklink_output_surface(create_surface(format))
+ decklink_output_surface(create_surface(format)),
+ image_update_surface(create_surface(format))
{
memcpy(ycbcr_interpretation, global_flags.ycbcr_interpretation, sizeof(ycbcr_interpretation));
CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
check_error();
- // This nearly always should be true.
- global_flags.can_disable_srgb_decoder =
- epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") &&
- epoxy_has_gl_extension("GL_ARB_sampler_objects");
+ if (!epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") ||
+ !epoxy_has_gl_extension("GL_ARB_sampler_objects")) {
+ fprintf(stderr, "Nageru requires GL_EXT_texture_sRGB_decode and GL_ARB_sampler_objects to run.\n");
+ exit(1);
+ }
// Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
// will be halved when sampling them, and we need to compensate here.
ycbcr_format.cb_y_position = 0.5f;
ycbcr_format.cr_y_position = 0.5f;
+ // Initialize the neutral colors to sane values.
+ for (unsigned i = 0; i < MAX_VIDEO_CARDS; ++i) {
+ last_received_neutral_color[i] = RGBTriplet(1.0f, 1.0f, 1.0f);
+ }
+
// Display chain; shows the live output produced by the main chain (or rather, a copy of it).
display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
check_error();
audio_mixer.reset(new AudioMixer(num_cards, video_inputs.size()));
httpd.add_endpoint("/channels", bind(&Mixer::get_channels_json, this), HTTPD::ALLOW_ALL_ORIGINS);
- for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
+ for (int channel_idx = 0; channel_idx < theme->get_num_channels(); ++channel_idx) {
char url[256];
- snprintf(url, sizeof(url), "/channels/%d/color", channel_idx);
- httpd.add_endpoint(url, bind(&Mixer::get_channel_color_http, this, unsigned(channel_idx)), HTTPD::ALLOW_ALL_ORIGINS);
+ snprintf(url, sizeof(url), "/channels/%d/color", channel_idx + 2);
+ httpd.add_endpoint(url, bind(&Mixer::get_channel_color_http, this, unsigned(channel_idx + 2)), HTTPD::ALLOW_ALL_ORIGINS);
}
// Start listening for clients only once VideoEncoder has written its header, if any.
}
output_jitter_history.register_metrics({{ "card", "output" }});
+
+ ImageInput::start_update_thread(image_update_surface);
}
Mixer::~Mixer()
{
+ ImageInput::end_update_thread();
+
if (mjpeg_encoder != nullptr) {
mjpeg_encoder->stop();
}
//
// Only bother doing MJPEG encoding if there are any connected clients
// that want the stream.
- if (httpd.get_num_connected_multicam_clients() > 0) {
+ if (httpd.get_num_connected_multicam_clients() > 0 ||
+ httpd.get_num_connected_siphon_clients(card_index) > 0) {
vector<int32_t> converted_samples = convert_audio_to_fixed32(audio_frame.data + audio_offset, num_samples, audio_format, 2);
lock_guard<mutex> lock(card_mutex);
if (card->new_raw_audio.empty()) {
new_frame.video_format = video_format;
new_frame.y_offset = y_offset;
new_frame.cbcr_offset = cbcr_offset;
+ if (card->type == CardType::FFMPEG_INPUT) {
+ FFmpegCapture *ffmpeg_capture = static_cast<FFmpegCapture *>(card->capture.get());
+ new_frame.neutral_color = ffmpeg_capture->get_last_neutral_color();
+ }
card->new_frames.push_back(move(new_frame));
card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
card->may_have_dropped_last_frame = false;
master_card_index = output_card_index;
} else {
master_card_is_output = false;
- master_card_index = theme->map_signal(master_clock_channel);
+ master_card_index = theme->map_signal_to_card(master_clock_channel);
assert(master_card_index < num_cards + num_video_inputs);
}
new_frame->upload_func = nullptr;
}
- if (new_frame->frame->data_copy != nullptr) {
- int mjpeg_card_index = mjpeg_encoder->get_mjpeg_stream_for_card(card_index);
- if (mjpeg_card_index != -1) {
- mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]));
- }
+ // Only set the white balance if it actually changed. This means that the user
+ // is free to override the white balance in a video with no white balance information
+ // actually set (ie. r=g=b=1 all the time), or one where the white point is wrong,
+ // but frame-to-frame decisions will be heeded. We do this pretty much as late
+ // as possible (ie., after picking out the frame from the buffer), so that we are sure
+ // that the change takes effect on exactly the right frame.
+ if (fabs(new_frame->neutral_color.r - last_received_neutral_color[card_index].r) > 1e-3 ||
+ fabs(new_frame->neutral_color.g - last_received_neutral_color[card_index].g) > 1e-3 ||
+ fabs(new_frame->neutral_color.b - last_received_neutral_color[card_index].b) > 1e-3) {
+ theme->set_wb_for_card(card_index, new_frame->neutral_color.r, new_frame->neutral_color.g, new_frame->neutral_color.b);
+ last_received_neutral_color[card_index] = new_frame->neutral_color;
}
+
+ if (new_frame->frame->data_copy != nullptr && mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) {
+ RGBTriplet neutral_color = theme->get_white_balance_for_card(card_index);
+ mjpeg_encoder->upload_frame(pts_int, card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]), neutral_color);
+ }
+
}
int64_t frame_duration = output_frame_info.frame_duration;
pair<string, string> Mixer::get_channels_json()
{
Channels ret;
- for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
+ for (int channel_idx = 0; channel_idx < theme->get_num_channels(); ++channel_idx) {
Channel *channel = ret.add_channel();
- channel->set_index(channel_idx);
- channel->set_name(theme->get_channel_name(channel_idx));
- channel->set_color(theme->get_channel_color(channel_idx));
+ channel->set_index(channel_idx + 2);
+ channel->set_name(theme->get_channel_name(channel_idx + 2));
+ channel->set_color(theme->get_channel_color(channel_idx + 2));
}
string contents;
google::protobuf::util::MessageToJsonString(ret, &contents); // Ignore any errors.
// we dropped. (may_have_dropped_last_frame is set whenever we
// trim the queue completely away, and cleared when we actually
// get a new frame.)
- ((CEFCapture *)card->capture.get())->request_new_frame();
+ ((CEFCapture *)card->capture.get())->request_new_frame(/*ignore_if_locked=*/true);
}
#endif
} else {