alsadep = dependency('alsa')
bmusbdep = dependency('bmusb', required: not embedded_bmusb)
dldep = cxx.find_library('dl')
+eigendep = dependency('eigen3')
epoxydep = dependency('epoxy')
libavcodecdep = dependency('libavcodec')
libavformatdep = dependency('libavformat')
nageru_deps = [shareddep, qt5deps, libjpegdep, movitdep, protobufdep,
vax11dep, vadrmdep, x11dep, libavformatdep, libswresampledep, libavcodecdep, libavutildep,
libswscaledep, libusbdep, luajitdep, dldep, x264dep, alsadep, zitaresamplerdep,
- qcustomplotdep, threaddep]
+ qcustomplotdep, threaddep, eigendep]
nageru_include_dirs = [include_directories('nageru')]
nageru_link_with = []
nageru_build_rpath = ''
#include <utility>
#include <vector>
+#include <Eigen/Core>
+#include <Eigen/LU>
+#include <movit/colorspace_conversion_effect.h>
+
#include "bmusb/bmusb.h"
#include "shared/ffmpeg_raii.h"
#include "ffmpeg_util.h"
using namespace std::chrono;
using namespace bmusb;
using namespace movit;
+using namespace Eigen;
namespace {
return format;
}
+RGBTriplet get_neutral_color(AVDictionary *metadata)
+{
+ if (metadata == nullptr) {
+ return RGBTriplet(1.0f, 1.0f, 1.0f);
+ }
+ AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
+ if (entry == nullptr) {
+ return RGBTriplet(1.0f, 1.0f, 1.0f);
+ }
+
+ unsigned x_nom, x_den, y_nom, y_den;
+ if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
+ fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
+ return RGBTriplet(1.0f, 1.0f, 1.0f);
+ }
+
+ double x = double(x_nom) / x_den;
+ double y = double(y_nom) / y_den;
+ double z = 1.0 - x - y;
+
+ Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
+ Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
+
+ return RGBTriplet(rgb[0], rgb[1], rgb[2]);
+}
+
} // namespace
FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
// audio discontinuity.)
timecode += MAX_FPS * 2 + 1;
}
+ last_neutral_color = get_neutral_color(frame->metadata);
frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
video_frame.get_and_release(), 0, video_format,
audio_frame.get_and_release(), 0, audio_format);
#include <string>
#include <thread>
+#include <movit/effect.h>
#include <movit/ycbcr.h>
extern "C" {
return has_last_subtitle;
}
+ // Same.
+ movit::RGBTriplet get_last_neutral_color() const
+ {
+ return last_neutral_color;
+ }
+
void set_dequeue_thread_callbacks(std::function<void()> init, std::function<void()> cleanup) override
{
dequeue_init_callback = init;
// Subtitles (no decoding done, really).
bool has_last_subtitle = false;
std::string last_subtitle;
+
+ movit::RGBTriplet last_neutral_color;
};
#endif // !defined(_FFMPEG_CAPTURE_H)
ycbcr_format.cb_y_position = 0.5f;
ycbcr_format.cr_y_position = 0.5f;
+ // Initialize the neutral colors to sane values.
+ for (unsigned i = 0; i < MAX_VIDEO_CARDS; ++i) {
+ last_received_neutral_color[i] = RGBTriplet(1.0f, 1.0f, 1.0f);
+ }
+
// Display chain; shows the live output produced by the main chain (or rather, a copy of it).
display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
check_error();
new_frame.video_format = video_format;
new_frame.y_offset = y_offset;
new_frame.cbcr_offset = cbcr_offset;
+ if (card->type == CardType::FFMPEG_INPUT) {
+ FFmpegCapture *ffmpeg_capture = static_cast<FFmpegCapture *>(card->capture.get());
+ new_frame.neutral_color = ffmpeg_capture->get_last_neutral_color();
+ }
card->new_frames.push_back(move(new_frame));
card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
card->may_have_dropped_last_frame = false;
new_frame->upload_func = nullptr;
}
+ // Only set the white balance if it actually changed. This means that the user
+ // is free to override the white balance in a video with no white balance information
+ // actually set (ie. r=g=b=1 all the time), or one where the white point is wrong,
+ // but frame-to-frame decisions will be heeded. We do this pretty much as late
+ // as possible (ie., after picking out the frame from the buffer), so that we are sure
+ // that the change takes effect on exactly the right frame.
+ if (fabs(new_frame->neutral_color.r - last_received_neutral_color[card_index].r) > 1e-3 ||
+ fabs(new_frame->neutral_color.g - last_received_neutral_color[card_index].g) > 1e-3 ||
+ fabs(new_frame->neutral_color.b - last_received_neutral_color[card_index].b) > 1e-3) {
+ theme->set_wb_for_signal(card_index, new_frame->neutral_color.r, new_frame->neutral_color.g, new_frame->neutral_color.b);
+ last_received_neutral_color[card_index] = new_frame->neutral_color;
+ }
+
if (new_frame->frame->data_copy != nullptr) {
int mjpeg_card_index = mjpeg_encoder->get_mjpeg_stream_for_card(card_index);
if (mjpeg_card_index != -1) {
- RGBTriplet white_balance = theme->get_white_balance_for_signal(card_index);
- mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]), white_balance);
+ RGBTriplet neutral_color = theme->get_white_balance_for_signal(card_index);
+ mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]), neutral_color);
}
}
+
}
int64_t frame_duration = output_frame_info.frame_duration;
#include <thread>
#include <vector>
+#include <movit/effect.h>
#include <movit/image_format.h>
#include "audio_mixer.h"
std::function<void()> upload_func; // Needs to be called to actually upload the texture to OpenGL.
unsigned dropped_frames = 0; // Number of dropped frames before this one.
std::chrono::steady_clock::time_point received_timestamp = std::chrono::steady_clock::time_point::min();
+ movit::RGBTriplet neutral_color{1.0f, 1.0f, 1.0f};
// Used for MJPEG encoding. (upload_func packs everything it needs
// into the functor, but would otherwise also use these.)
JitterHistory output_jitter_history;
CaptureCard cards[MAX_VIDEO_CARDS]; // Protected by <card_mutex>.
YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS]; // Protected by <card_mutex>.
+ movit::RGBTriplet last_received_neutral_color[MAX_VIDEO_CARDS]; // Used by the mixer thread only. Constructor-initialiezd.
std::unique_ptr<AudioMixer> audio_mixer; // Same as global_audio_mixer (see audio_mixer.h).
bool input_card_is_master_clock(unsigned card_index, unsigned master_card_index) const;
struct OutputFrameInfo {
unsigned last_frame_rate_nom, last_frame_rate_den;
bool has_last_subtitle = false;
std::string last_subtitle;
+ movit::RGBTriplet white_balance{1.0f, 1.0f, 1.0f};
// These are the source of the “data_copy” member in Frame,
// used for MJPEG encoding. There are three possibilities:
void Theme::set_wb(unsigned channel, float r, float g, float b)
{
lock_guard<mutex> lock(m);
-
if (channel_signals.count(channel)) {
white_balance_for_signal[channel_signals[channel]] = RGBTriplet{ r, g, b };
}
+ call_lua_wb_callback(channel, r, g, b);
+}
+
+void Theme::set_wb_for_signal(int signal, float r, float g, float b)
+{
+ lock_guard<mutex> lock(m);
+ white_balance_for_signal[signal] = RGBTriplet{ r, g, b };
+
+ for (const auto &channel_and_signal : channel_signals) {
+ if (channel_and_signal.second == signal) {
+ call_lua_wb_callback(channel_and_signal.first, r, g, b);
+ }
+ }
+}
+
+void Theme::call_lua_wb_callback(unsigned channel, float r, float g, float b)
+{
lua_getglobal(L, "set_wb");
if (lua_isnil(L, -1)) {
// The function doesn't exist, to just ignore. We've stored the white balance,
int get_channel_signal(unsigned channel);
bool get_supports_set_wb(unsigned channel);
void set_wb(unsigned channel, float r, float g, float b);
+ void set_wb_for_signal(int signal, float r, float g, float b);
movit::RGBTriplet get_white_balance_for_signal(int signal);
std::string get_channel_color(unsigned channel);
void register_class(const char *class_name, const luaL_Reg *funcs, EffectType effect_type = NO_EFFECT_TYPE);
int set_theme_menu(lua_State *L);
Chain get_chain_from_effect_chain(movit::EffectChain *effect_chain, unsigned num, const InputState &input_state);
+ void call_lua_wb_callback(unsigned channel, float r, float g, float b);
std::string theme_path;