#include <epoxy/gl.h>
#include <lauxlib.h>
#include <lua.hpp>
+#include <movit/deinterlace_effect.h>
#include <movit/effect.h>
#include <movit/effect_chain.h>
#include <movit/image_format.h>
+#include <movit/input.h>
#include <movit/mix_effect.h>
#include <movit/multiply_effect.h>
#include <movit/overlay_effect.h>
#include <utility>
#include "defs.h"
-#include "deinterlace_effect.h"
+#ifdef HAVE_CEF
+#include "cef_capture.h"
+#endif
#include "ffmpeg_capture.h"
#include "flags.h"
#include "image_input.h"
-#include "input.h"
#include "input_state.h"
#include "pbo_frame_allocator.h"
// doesn't care about the object anymore. (If we change this, we'd need
// to also unregister the signal connection on __gc.)
int ret = wrap_lua_object_nonowned<LiveInputWrapper>(
- L, "LiveInputWrapper", theme, chain, bmusb::PixelFormat_8BitBGRA,
+ L, "LiveInputWrapper", theme, chain, (*capture)->get_current_pixel_format(),
/*override_bounce=*/false, deinterlace);
if (ret == 1) {
Theme *theme = get_theme_updata(L);
LiveInputWrapper **live_input = (LiveInputWrapper **)lua_touserdata(L, -1);
- theme->register_signal_connection(*live_input, *capture);
+ theme->register_video_signal_connection(*live_input, *capture);
}
return ret;
}
+#ifdef HAVE_CEF
+int EffectChain_add_html_input(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ Theme *theme = get_theme_updata(L);
+ EffectChain *chain = (EffectChain *)luaL_checkudata(L, 1, "EffectChain");
+ CEFCapture **capture = (CEFCapture **)luaL_checkudata(L, 2, "HTMLInput");
+
+ // These need to be nonowned, so that the LiveInputWrapper still exists
+ // and can feed frames to the right EffectChain even if the Lua code
+ // doesn't care about the object anymore. (If we change this, we'd need
+ // to also unregister the signal connection on __gc.)
+ int ret = wrap_lua_object_nonowned<LiveInputWrapper>(
+ L, "LiveInputWrapper", theme, chain, (*capture)->get_current_pixel_format(),
+ /*override_bounce=*/false, /*deinterlace=*/false);
+ if (ret == 1) {
+ Theme *theme = get_theme_updata(L);
+ LiveInputWrapper **live_input = (LiveInputWrapper **)lua_touserdata(L, -1);
+ theme->register_html_signal_connection(*live_input, *capture);
+ }
+ return ret;
+}
+#endif
+
int EffectChain_add_effect(lua_State* L)
{
assert(lua_gettop(L) >= 2);
int VideoInput_new(lua_State* L)
{
- assert(lua_gettop(L) == 1);
+ assert(lua_gettop(L) == 2);
string filename = checkstdstring(L, 1);
+ int pixel_format = luaL_checknumber(L, 2);
+ if (pixel_format != bmusb::PixelFormat_8BitYCbCrPlanar &&
+ pixel_format != bmusb::PixelFormat_8BitBGRA) {
+ fprintf(stderr, "WARNING: Invalid enum %d used for video format, choosing Y'CbCr.\n",
+ pixel_format);
+ pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
+ }
int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
if (ret == 1) {
- Theme *theme = get_theme_updata(L);
FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
+ (*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
+
+ Theme *theme = get_theme_updata(L);
theme->register_video_input(*capture);
}
return ret;
return 0;
}
+int VideoInput_get_signal_num(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ FFmpegCapture **video_input = (FFmpegCapture **)luaL_checkudata(L, 1, "VideoInput");
+ lua_pushnumber(L, -1 - (*video_input)->get_card_index());
+ return 1;
+}
+
+int HTMLInput_new(lua_State* L)
+{
+#ifdef HAVE_CEF
+ assert(lua_gettop(L) == 1);
+ string url = checkstdstring(L, 1);
+ int ret = wrap_lua_object_nonowned<CEFCapture>(L, "HTMLInput", url, global_flags.width, global_flags.height);
+ if (ret == 1) {
+ CEFCapture **capture = (CEFCapture **)lua_touserdata(L, -1);
+ Theme *theme = get_theme_updata(L);
+ theme->register_html_input(*capture);
+ }
+ return ret;
+#else
+ fprintf(stderr, "This version of Nageru has been compiled without CEF support.\n");
+ fprintf(stderr, "HTMLInput is not available.\n");
+ exit(1);
+#endif
+}
+
+#ifdef HAVE_CEF
+int HTMLInput_set_url(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ string new_url = checkstdstring(L, 2);
+ (*video_input)->set_url(new_url);
+ return 0;
+}
+
+int HTMLInput_reload(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ (*video_input)->reload();
+ return 0;
+}
+
+int HTMLInput_set_max_fps(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ int max_fps = lrint(luaL_checknumber(L, 2));
+ (*video_input)->set_max_fps(max_fps);
+ return 0;
+}
+
+int HTMLInput_execute_javascript_async(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ string js = checkstdstring(L, 2);
+ (*video_input)->execute_javascript_async(js);
+ return 0;
+}
+
+int HTMLInput_get_signal_num(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ lua_pushnumber(L, -1 - (*video_input)->get_card_index());
+ return 1;
+}
+#endif
+
int WhiteBalanceEffect_new(lua_State* L)
{
assert(lua_gettop(L) == 0);
{ "__gc", EffectChain_gc },
{ "add_live_input", EffectChain_add_live_input },
{ "add_video_input", EffectChain_add_video_input },
+#ifdef HAVE_CEF
+ { "add_html_input", EffectChain_add_html_input },
+#endif
{ "add_effect", EffectChain_add_effect },
{ "finalize", EffectChain_finalize },
{ NULL, NULL }
{ "new", VideoInput_new },
{ "rewind", VideoInput_rewind },
{ "change_rate", VideoInput_change_rate },
+ { "get_signal_num", VideoInput_get_signal_num },
+ { NULL, NULL }
+};
+
+const luaL_Reg HTMLInput_funcs[] = {
+ { "new", HTMLInput_new },
+#ifdef HAVE_CEF
+ { "set_url", HTMLInput_set_url },
+ { "reload", HTMLInput_reload },
+ { "set_max_fps", HTMLInput_set_max_fps },
+ { "execute_javascript_async", HTMLInput_execute_javascript_async },
+ { "get_signal_num", HTMLInput_get_signal_num },
+#endif
{ NULL, NULL }
};
chain->add_effect(deinterlace_effect, reverse_inputs);
}
} else {
- assert(pixel_format == bmusb::PixelFormat_8BitYCbCr || pixel_format == bmusb::PixelFormat_10BitYCbCr);
- // The Blackmagic driver docs claim that the device outputs Y'CbCr
- // according to Rec. 601, but practical testing indicates it definitely
- // is Rec. 709 (at least up to errors attributable to rounding errors).
- // Perhaps 601 was only to indicate the subsampling positions, not the
- // colorspace itself? Tested with a Lenovo X1 gen 3 as input.
- YCbCrFormat input_ycbcr_format;
+ assert(pixel_format == bmusb::PixelFormat_8BitYCbCr ||
+ pixel_format == bmusb::PixelFormat_10BitYCbCr ||
+ pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
+
+ // Most of these settings will be overridden later if using PixelFormat_8BitYCbCrPlanar.
input_ycbcr_format.chroma_subsampling_x = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? 1 : 2;
input_ycbcr_format.chroma_subsampling_y = 1;
input_ycbcr_format.num_levels = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? 1024 : 256;
input_ycbcr_format.cr_x_position = 0.0;
input_ycbcr_format.cb_y_position = 0.5;
input_ycbcr_format.cr_y_position = 0.5;
- input_ycbcr_format.luma_coefficients = YCBCR_REC_709;
- input_ycbcr_format.full_range = false;
+ input_ycbcr_format.luma_coefficients = YCBCR_REC_709; // Will be overridden later even if not planar.
+ input_ycbcr_format.full_range = false; // Will be overridden later even if not planar.
for (unsigned i = 0; i < num_inputs; ++i) {
// When using 10-bit input, we're converting to interleaved through v210Converter.
- YCbCrInputSplitting splitting = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? YCBCR_INPUT_INTERLEAVED : YCBCR_INPUT_SPLIT_Y_AND_CBCR;
+ YCbCrInputSplitting splitting;
+ if (pixel_format == bmusb::PixelFormat_10BitYCbCr) {
+ splitting = YCBCR_INPUT_INTERLEAVED;
+ } else if (pixel_format == bmusb::PixelFormat_8BitYCbCr) {
+ splitting = YCBCR_INPUT_SPLIT_Y_AND_CBCR;
+ } else {
+ splitting = YCBCR_INPUT_PLANAR;
+ }
if (override_bounce) {
ycbcr_inputs.push_back(new NonBouncingYCbCrInput(inout_format, input_ycbcr_format, global_flags.width, global_flags.height, splitting));
} else {
}
signal_num = theme->map_signal(signal_num);
- connect_signal_raw(signal_num);
+ connect_signal_raw(signal_num, *theme->input_state);
}
-void LiveInputWrapper::connect_signal_raw(int signal_num)
+void LiveInputWrapper::connect_signal_raw(int signal_num, const InputState &input_state)
{
- BufferedFrame first_frame = theme->input_state->buffered_frames[signal_num][0];
+ BufferedFrame first_frame = input_state.buffered_frames[signal_num][0];
if (first_frame.frame == nullptr) {
// No data yet.
return;
height = userdata->last_height[first_frame.field_number];
}
+ movit::YCbCrLumaCoefficients ycbcr_coefficients = input_state.ycbcr_coefficients[signal_num];
+ bool full_range = input_state.full_range[signal_num];
+
+ if (input_state.ycbcr_coefficients_auto[signal_num]) {
+ full_range = false;
+
+ // The Blackmagic driver docs claim that the device outputs Y'CbCr
+ // according to Rec. 601, but this seems to indicate the subsampling
+ // positions only, as they publish Y'CbCr → RGB formulas that are
+ // different for HD and SD (corresponding to Rec. 709 and 601, respectively),
+ // and a Lenovo X1 gen 3 I used to test definitely outputs Rec. 709
+ // (at least up to rounding error). Other devices seem to use Rec. 601
+ // even on HD resolutions. Nevertheless, Rec. 709 _is_ the right choice
+ // for HD, so we default to that if the user hasn't set anything.
+ if (height >= 720) {
+ ycbcr_coefficients = YCBCR_REC_709;
+ } else {
+ ycbcr_coefficients = YCBCR_REC_601;
+ }
+ }
+
+ // This is a global, but it doesn't really matter.
+ input_ycbcr_format.luma_coefficients = ycbcr_coefficients;
+ input_ycbcr_format.full_range = full_range;
+
BufferedFrame last_good_frame = first_frame;
for (unsigned i = 0; i < max(ycbcr_inputs.size(), rgba_inputs.size()); ++i) {
- BufferedFrame frame = theme->input_state->buffered_frames[signal_num][i];
+ BufferedFrame frame = input_state.buffered_frames[signal_num][i];
if (frame.frame == nullptr) {
// Not enough data; reuse last frame (well, field).
// This is suboptimal, but we have nothing better.
case bmusb::PixelFormat_8BitYCbCr:
ycbcr_inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
ycbcr_inputs[i]->set_texture_num(1, userdata->tex_cbcr[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(input_ycbcr_format);
+ ycbcr_inputs[i]->set_width(width);
+ ycbcr_inputs[i]->set_height(height);
+ break;
+ case bmusb::PixelFormat_8BitYCbCrPlanar:
+ ycbcr_inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(1, userdata->tex_cb[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(2, userdata->tex_cr[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(userdata->ycbcr_format);
ycbcr_inputs[i]->set_width(width);
ycbcr_inputs[i]->set_height(height);
break;
case bmusb::PixelFormat_10BitYCbCr:
ycbcr_inputs[i]->set_texture_num(0, userdata->tex_444[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(input_ycbcr_format);
ycbcr_inputs[i]->set_width(width);
ycbcr_inputs[i]->set_height(height);
break;
}
if (deinterlace) {
- BufferedFrame frame = theme->input_state->buffered_frames[signal_num][0];
+ BufferedFrame frame = input_state.buffered_frames[signal_num][0];
CHECK(deinterlace_effect->set_int("current_field_position", frame.field_number));
}
}
L = luaL_newstate();
luaL_openlibs(L);
+ register_constants();
register_class("EffectChain", EffectChain_funcs);
register_class("LiveInputWrapper", LiveInputWrapper_funcs);
register_class("ImageInput", ImageInput_funcs);
register_class("VideoInput", VideoInput_funcs);
+ register_class("HTMLInput", HTMLInput_funcs);
register_class("WhiteBalanceEffect", WhiteBalanceEffect_funcs);
register_class("ResampleEffect", ResampleEffect_funcs);
register_class("PaddingEffect", PaddingEffect_funcs);
lua_close(L);
}
+void Theme::register_constants()
+{
+ // Set Nageru.VIDEO_FORMAT_BGRA = bmusb::PixelFormat_8BitBGRA, etc.
+ const vector<pair<string, int>> constants = {
+ { "VIDEO_FORMAT_BGRA", bmusb::PixelFormat_8BitBGRA },
+ { "VIDEO_FORMAT_YCBCR", bmusb::PixelFormat_8BitYCbCrPlanar },
+ };
+
+ lua_newtable(L); // t = {}
+
+ for (const pair<string, int> &constant : constants) {
+ lua_pushstring(L, constant.first.c_str());
+ lua_pushinteger(L, constant.second);
+ lua_settable(L, 1); // t[key] = value
+ }
+
+ lua_setglobal(L, "Nageru"); // Nageru = t
+ assert(lua_gettop(L) == 0);
+}
+
void Theme::register_class(const char *class_name, const luaL_Reg *funcs)
{
assert(lua_gettop(L) == 0);
chain.setup_chain = [this, funcref, input_state]{
unique_lock<mutex> lock(m);
+ assert(this->input_state == nullptr);
this->input_state = &input_state;
// Set up state, including connecting signals.
exit(1);
}
assert(lua_gettop(L) == 0);
+
+ this->input_state = nullptr;
};
// TODO: Can we do better, e.g. by running setup_chain() and seeing what it references?
// Actually, setup_chain does maybe hold all the references we need now anyway?
+ chain.input_frames.reserve(num_cards * FRAME_HISTORY_LENGTH);
for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
chain.input_frames.push_back(input_state.buffered_frames[card_index][frame_num].frame);
int Theme::map_signal(int signal_num)
{
+ // Negative numbers map to raw signals.
+ if (signal_num < 0) {
+ return -1 - signal_num;
+ }
+
unique_lock<mutex> lock(map_m);
if (signal_to_card_mapping.count(signal_num)) {
return signal_to_card_mapping[signal_num];