#include "theme.h"
#include <assert.h>
+#include <bmusb/bmusb.h>
+#include <epoxy/gl.h>
#include <lauxlib.h>
#include <lua.hpp>
+#include <movit/deinterlace_effect.h>
#include <movit/effect.h>
#include <movit/effect_chain.h>
#include <movit/image_format.h>
+#include <movit/input.h>
#include <movit/mix_effect.h>
+#include <movit/multiply_effect.h>
#include <movit/overlay_effect.h>
#include <movit/padding_effect.h>
#include <movit/resample_effect.h>
#include <movit/resize_effect.h>
-#include <movit/multiply_effect.h>
#include <movit/util.h>
#include <movit/white_balance_effect.h>
#include <movit/ycbcr.h>
#include <stdio.h>
#include <stdlib.h>
#include <cstddef>
+#include <memory>
#include <new>
#include <utility>
-#include <memory>
#include "defs.h"
+#ifdef HAVE_CEF
+#include "cef_capture.h"
+#endif
+#include "ffmpeg_capture.h"
#include "flags.h"
#include "image_input.h"
-#include "mixer.h"
+#include "input_state.h"
+#include "pbo_frame_allocator.h"
+
+class Mixer;
namespace movit {
class ResourcePool;
struct InputStateInfo {
InputStateInfo(const InputState& input_state);
- unsigned last_width[MAX_CARDS], last_height[MAX_CARDS];
- bool last_interlaced[MAX_CARDS], last_has_signal[MAX_CARDS];
- unsigned last_frame_rate_nom[MAX_CARDS], last_frame_rate_den[MAX_CARDS];
+ unsigned last_width[MAX_VIDEO_CARDS], last_height[MAX_VIDEO_CARDS];
+ bool last_interlaced[MAX_VIDEO_CARDS], last_has_signal[MAX_VIDEO_CARDS], last_is_connected[MAX_VIDEO_CARDS];
+ unsigned last_frame_rate_nom[MAX_VIDEO_CARDS], last_frame_rate_den[MAX_VIDEO_CARDS];
};
InputStateInfo::InputStateInfo(const InputState &input_state)
{
- for (unsigned signal_num = 0; signal_num < MAX_CARDS; ++signal_num) {
+ for (unsigned signal_num = 0; signal_num < MAX_VIDEO_CARDS; ++signal_num) {
BufferedFrame frame = input_state.buffered_frames[signal_num][0];
if (frame.frame == nullptr) {
last_width[signal_num] = last_height[signal_num] = 0;
last_interlaced[signal_num] = false;
last_has_signal[signal_num] = false;
+ last_is_connected[signal_num] = false;
continue;
}
const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)frame.frame->userdata;
last_height[signal_num] = userdata->last_height[frame.field_number];
last_interlaced[signal_num] = userdata->last_interlaced;
last_has_signal[signal_num] = userdata->last_has_signal;
+ last_is_connected[signal_num] = userdata->last_is_connected;
last_frame_rate_nom[signal_num] = userdata->last_frame_rate_nom;
last_frame_rate_den[signal_num] = userdata->last_frame_rate_den;
}
EffectChain *chain = (EffectChain *)luaL_checkudata(L, 1, "EffectChain");
bool override_bounce = checkbool(L, 2);
bool deinterlace = checkbool(L, 3);
- return wrap_lua_object<LiveInputWrapper>(L, "LiveInputWrapper", theme, chain, override_bounce, deinterlace);
+ bmusb::PixelFormat pixel_format = global_flags.ten_bit_input ? bmusb::PixelFormat_10BitYCbCr : bmusb::PixelFormat_8BitYCbCr;
+
+ // Needs to be nonowned to match add_video_input (see below).
+ return wrap_lua_object_nonowned<LiveInputWrapper>(L, "LiveInputWrapper", theme, chain, pixel_format, override_bounce, deinterlace);
+}
+
+int EffectChain_add_video_input(lua_State* L)
+{
+ assert(lua_gettop(L) == 3);
+ Theme *theme = get_theme_updata(L);
+ EffectChain *chain = (EffectChain *)luaL_checkudata(L, 1, "EffectChain");
+ FFmpegCapture **capture = (FFmpegCapture **)luaL_checkudata(L, 2, "VideoInput");
+ bool deinterlace = checkbool(L, 3);
+
+ // These need to be nonowned, so that the LiveInputWrapper still exists
+ // and can feed frames to the right EffectChain even if the Lua code
+ // doesn't care about the object anymore. (If we change this, we'd need
+ // to also unregister the signal connection on __gc.)
+ int ret = wrap_lua_object_nonowned<LiveInputWrapper>(
+ L, "LiveInputWrapper", theme, chain, (*capture)->get_current_pixel_format(),
+ /*override_bounce=*/false, deinterlace);
+ if (ret == 1) {
+ Theme *theme = get_theme_updata(L);
+ LiveInputWrapper **live_input = (LiveInputWrapper **)lua_touserdata(L, -1);
+ theme->register_video_signal_connection(*live_input, *capture);
+ }
+ return ret;
}
+#ifdef HAVE_CEF
+int EffectChain_add_html_input(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ Theme *theme = get_theme_updata(L);
+ EffectChain *chain = (EffectChain *)luaL_checkudata(L, 1, "EffectChain");
+ CEFCapture **capture = (CEFCapture **)luaL_checkudata(L, 2, "HTMLInput");
+
+ // These need to be nonowned, so that the LiveInputWrapper still exists
+ // and can feed frames to the right EffectChain even if the Lua code
+ // doesn't care about the object anymore. (If we change this, we'd need
+ // to also unregister the signal connection on __gc.)
+ int ret = wrap_lua_object_nonowned<LiveInputWrapper>(
+ L, "LiveInputWrapper", theme, chain, (*capture)->get_current_pixel_format(),
+ /*override_bounce=*/false, /*deinterlace=*/false);
+ if (ret == 1) {
+ Theme *theme = get_theme_updata(L);
+ LiveInputWrapper **live_input = (LiveInputWrapper **)lua_touserdata(L, -1);
+ theme->register_html_signal_connection(*live_input, *capture);
+ }
+ return ret;
+}
+#endif
+
int EffectChain_add_effect(lua_State* L)
{
assert(lua_gettop(L) >= 2);
vector<Effect *> inputs;
for (int idx = 3; idx <= lua_gettop(L); ++idx) {
if (luaL_testudata(L, idx, "LiveInputWrapper")) {
- LiveInputWrapper *input = (LiveInputWrapper *)lua_touserdata(L, idx);
- inputs.push_back(input->get_effect());
+ LiveInputWrapper **input = (LiveInputWrapper **)lua_touserdata(L, idx);
+ inputs.push_back((*input)->get_effect());
} else {
inputs.push_back(get_effect(L, idx));
}
if (is_main_chain) {
YCbCrFormat output_ycbcr_format;
- // We actually output 4:2:0 in the end, but chroma subsampling
- // happens in a pass not run by Movit (see Mixer::subsample_chroma()).
+ // We actually output 4:2:0 and/or 4:2:2 in the end, but chroma subsampling
+ // happens in a pass not run by Movit (see ChromaSubsampler::subsample_chroma()).
output_ycbcr_format.chroma_subsampling_x = 1;
output_ycbcr_format.chroma_subsampling_y = 1;
- // Rec. 709 would be the sane thing to do, but it seems many players
- // (e.g. MPlayer and VLC) just default to BT.601 coefficients no matter
- // what (see discussions in e.g. https://trac.ffmpeg.org/ticket/4978).
- // We _do_ set the right flags, though, so that a player that works
- // properly doesn't have to guess.
- output_ycbcr_format.luma_coefficients = YCBCR_REC_601;
+ // This will be overridden if HDMI/SDI output is in force.
+ if (global_flags.ycbcr_rec709_coefficients) {
+ output_ycbcr_format.luma_coefficients = YCBCR_REC_709;
+ } else {
+ output_ycbcr_format.luma_coefficients = YCBCR_REC_601;
+ }
+
output_ycbcr_format.full_range = false;
- output_ycbcr_format.num_levels = 256;
+ output_ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
+
+ GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
- chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, output_ycbcr_format, YCBCR_OUTPUT_SPLIT_Y_AND_CBCR);
- chain->set_dither_bits(8);
+ chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, output_ycbcr_format, YCBCR_OUTPUT_SPLIT_Y_AND_CBCR, type);
+
+ // If we're using zerocopy video encoding (so the destination
+ // Y texture is owned by VA-API and will be unavailable for
+ // display), add a copy, where we'll only be using the Y component.
+ if (global_flags.use_zerocopy) {
+ chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, output_ycbcr_format, YCBCR_OUTPUT_INTERLEAVED, type); // Add a copy where we'll only be using the Y component.
+ }
+ chain->set_dither_bits(global_flags.x264_bit_depth > 8 ? 16 : 8);
chain->set_output_origin(OUTPUT_ORIGIN_TOP_LEFT);
+ } else {
+ chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
}
- chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
chain->finalize();
return 0;
int LiveInputWrapper_connect_signal(lua_State* L)
{
assert(lua_gettop(L) == 2);
- LiveInputWrapper *input = (LiveInputWrapper *)luaL_checkudata(L, 1, "LiveInputWrapper");
+ LiveInputWrapper **input = (LiveInputWrapper **)luaL_checkudata(L, 1, "LiveInputWrapper");
int signal_num = luaL_checknumber(L, 2);
- input->connect_signal(signal_num);
+ (*input)->connect_signal(signal_num);
return 0;
}
return wrap_lua_object_nonowned<ImageInput>(L, "ImageInput", filename);
}
+int VideoInput_new(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ string filename = checkstdstring(L, 1);
+ int pixel_format = luaL_checknumber(L, 2);
+ if (pixel_format != bmusb::PixelFormat_8BitYCbCrPlanar &&
+ pixel_format != bmusb::PixelFormat_8BitBGRA) {
+ fprintf(stderr, "WARNING: Invalid enum %d used for video format, choosing Y'CbCr.\n",
+ pixel_format);
+ pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
+ }
+ int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
+ if (ret == 1) {
+ FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
+ (*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
+
+ Theme *theme = get_theme_updata(L);
+ theme->register_video_input(*capture);
+ }
+ return ret;
+}
+
+int VideoInput_rewind(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ FFmpegCapture **video_input = (FFmpegCapture **)luaL_checkudata(L, 1, "VideoInput");
+ (*video_input)->rewind();
+ return 0;
+}
+
+int VideoInput_change_rate(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ FFmpegCapture **video_input = (FFmpegCapture **)luaL_checkudata(L, 1, "VideoInput");
+ double new_rate = luaL_checknumber(L, 2);
+ (*video_input)->change_rate(new_rate);
+ return 0;
+}
+
+int VideoInput_get_signal_num(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ FFmpegCapture **video_input = (FFmpegCapture **)luaL_checkudata(L, 1, "VideoInput");
+ lua_pushnumber(L, -1 - (*video_input)->get_card_index());
+ return 1;
+}
+
+int HTMLInput_new(lua_State* L)
+{
+#ifdef HAVE_CEF
+ assert(lua_gettop(L) == 1);
+ string url = checkstdstring(L, 1);
+ int ret = wrap_lua_object_nonowned<CEFCapture>(L, "HTMLInput", url, global_flags.width, global_flags.height);
+ if (ret == 1) {
+ CEFCapture **capture = (CEFCapture **)lua_touserdata(L, -1);
+ Theme *theme = get_theme_updata(L);
+ theme->register_html_input(*capture);
+ }
+ return ret;
+#else
+ fprintf(stderr, "This version of Nageru has been compiled without CEF support.\n");
+ fprintf(stderr, "HTMLInput is not available.\n");
+ exit(1);
+#endif
+}
+
+#ifdef HAVE_CEF
+int HTMLInput_set_url(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ string new_url = checkstdstring(L, 2);
+ (*video_input)->set_url(new_url);
+ return 0;
+}
+
+int HTMLInput_reload(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ (*video_input)->reload();
+ return 0;
+}
+
+int HTMLInput_set_max_fps(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ int max_fps = lrint(luaL_checknumber(L, 2));
+ (*video_input)->set_max_fps(max_fps);
+ return 0;
+}
+
+int HTMLInput_execute_javascript_async(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ string js = checkstdstring(L, 2);
+ (*video_input)->execute_javascript_async(js);
+ return 0;
+}
+
+int HTMLInput_get_signal_num(lua_State* L)
+{
+ assert(lua_gettop(L) == 1);
+ CEFCapture **video_input = (CEFCapture **)luaL_checkudata(L, 1, "HTMLInput");
+ lua_pushnumber(L, -1 - (*video_input)->get_card_index());
+ return 1;
+}
+#endif
+
int WhiteBalanceEffect_new(lua_State* L)
{
assert(lua_gettop(L) == 0);
return 1;
}
+int InputStateInfo_get_is_connected(lua_State* L)
+{
+ assert(lua_gettop(L) == 2);
+ InputStateInfo *input_state_info = get_input_state_info(L, 1);
+ Theme *theme = get_theme_updata(L);
+ int signal_num = theme->map_signal(luaL_checknumber(L, 2));
+ lua_pushboolean(L, input_state_info->last_is_connected[signal_num]);
+ return 1;
+}
+
int InputStateInfo_get_frame_rate_nom(lua_State* L)
{
assert(lua_gettop(L) == 2);
{ "new", EffectChain_new },
{ "__gc", EffectChain_gc },
{ "add_live_input", EffectChain_add_live_input },
+ { "add_video_input", EffectChain_add_video_input },
+#ifdef HAVE_CEF
+ { "add_html_input", EffectChain_add_html_input },
+#endif
{ "add_effect", EffectChain_add_effect },
{ "finalize", EffectChain_finalize },
{ NULL, NULL }
{ NULL, NULL }
};
+const luaL_Reg VideoInput_funcs[] = {
+ { "new", VideoInput_new },
+ { "rewind", VideoInput_rewind },
+ { "change_rate", VideoInput_change_rate },
+ { "get_signal_num", VideoInput_get_signal_num },
+ { NULL, NULL }
+};
+
+const luaL_Reg HTMLInput_funcs[] = {
+ { "new", HTMLInput_new },
+#ifdef HAVE_CEF
+ { "set_url", HTMLInput_set_url },
+ { "reload", HTMLInput_reload },
+ { "set_max_fps", HTMLInput_set_max_fps },
+ { "execute_javascript_async", HTMLInput_execute_javascript_async },
+ { "get_signal_num", HTMLInput_get_signal_num },
+#endif
+ { NULL, NULL }
+};
+
const luaL_Reg WhiteBalanceEffect_funcs[] = {
{ "new", WhiteBalanceEffect_new },
{ "set_float", Effect_set_float },
{ "get_height", InputStateInfo_get_height },
{ "get_interlaced", InputStateInfo_get_interlaced },
{ "get_has_signal", InputStateInfo_get_has_signal },
+ { "get_is_connected", InputStateInfo_get_is_connected },
{ "get_frame_rate_nom", InputStateInfo_get_frame_rate_nom },
{ "get_frame_rate_den", InputStateInfo_get_frame_rate_den },
{ NULL, NULL }
} // namespace
-LiveInputWrapper::LiveInputWrapper(Theme *theme, EffectChain *chain, bool override_bounce, bool deinterlace)
+LiveInputWrapper::LiveInputWrapper(Theme *theme, EffectChain *chain, bmusb::PixelFormat pixel_format, bool override_bounce, bool deinterlace)
: theme(theme),
+ pixel_format(pixel_format),
deinterlace(deinterlace)
{
ImageFormat inout_format;
// So we pick sRGB as the least evil here.
inout_format.gamma_curve = GAMMA_sRGB;
- // The Blackmagic driver docs claim that the device outputs Y'CbCr
- // according to Rec. 601, but practical testing indicates it definitely
- // is Rec. 709 (at least up to errors attributable to rounding errors).
- // Perhaps 601 was only to indicate the subsampling positions, not the
- // colorspace itself? Tested with a Lenovo X1 gen 3 as input.
- YCbCrFormat input_ycbcr_format;
- input_ycbcr_format.chroma_subsampling_x = 2;
- input_ycbcr_format.chroma_subsampling_y = 1;
- input_ycbcr_format.cb_x_position = 0.0;
- input_ycbcr_format.cr_x_position = 0.0;
- input_ycbcr_format.cb_y_position = 0.5;
- input_ycbcr_format.cr_y_position = 0.5;
- input_ycbcr_format.luma_coefficients = YCBCR_REC_709;
- input_ycbcr_format.full_range = false;
-
unsigned num_inputs;
if (deinterlace) {
deinterlace_effect = new movit::DeinterlaceEffect();
} else {
num_inputs = 1;
}
- for (unsigned i = 0; i < num_inputs; ++i) {
- if (override_bounce) {
- inputs.push_back(new NonBouncingYCbCrInput(inout_format, input_ycbcr_format, WIDTH, HEIGHT, YCBCR_INPUT_SPLIT_Y_AND_CBCR));
- } else {
- inputs.push_back(new YCbCrInput(inout_format, input_ycbcr_format, WIDTH, HEIGHT, YCBCR_INPUT_SPLIT_Y_AND_CBCR));
+
+ if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
+ for (unsigned i = 0; i < num_inputs; ++i) {
+ // We upload our textures ourselves, and Movit swaps
+ // R and B in the shader if we specify BGRA, so lie and say RGBA.
+ if (global_flags.can_disable_srgb_decoder) {
+ rgba_inputs.push_back(new sRGBSwitchingFlatInput(inout_format, FORMAT_RGBA_POSTMULTIPLIED_ALPHA, GL_UNSIGNED_BYTE, global_flags.width, global_flags.height));
+ } else {
+ rgba_inputs.push_back(new NonsRGBCapableFlatInput(inout_format, FORMAT_RGBA_POSTMULTIPLIED_ALPHA, GL_UNSIGNED_BYTE, global_flags.width, global_flags.height));
+ }
+ chain->add_input(rgba_inputs.back());
}
- chain->add_input(inputs.back());
- }
- if (deinterlace) {
- vector<Effect *> reverse_inputs(inputs.rbegin(), inputs.rend());
- chain->add_effect(deinterlace_effect, reverse_inputs);
+ if (deinterlace) {
+ vector<Effect *> reverse_inputs(rgba_inputs.rbegin(), rgba_inputs.rend());
+ chain->add_effect(deinterlace_effect, reverse_inputs);
+ }
+ } else {
+ assert(pixel_format == bmusb::PixelFormat_8BitYCbCr ||
+ pixel_format == bmusb::PixelFormat_10BitYCbCr ||
+ pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
+
+ // Most of these settings will be overridden later if using PixelFormat_8BitYCbCrPlanar.
+ input_ycbcr_format.chroma_subsampling_x = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? 1 : 2;
+ input_ycbcr_format.chroma_subsampling_y = 1;
+ input_ycbcr_format.num_levels = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? 1024 : 256;
+ input_ycbcr_format.cb_x_position = 0.0;
+ input_ycbcr_format.cr_x_position = 0.0;
+ input_ycbcr_format.cb_y_position = 0.5;
+ input_ycbcr_format.cr_y_position = 0.5;
+ input_ycbcr_format.luma_coefficients = YCBCR_REC_709; // Will be overridden later even if not planar.
+ input_ycbcr_format.full_range = false; // Will be overridden later even if not planar.
+
+ for (unsigned i = 0; i < num_inputs; ++i) {
+ // When using 10-bit input, we're converting to interleaved through v210Converter.
+ YCbCrInputSplitting splitting;
+ if (pixel_format == bmusb::PixelFormat_10BitYCbCr) {
+ splitting = YCBCR_INPUT_INTERLEAVED;
+ } else if (pixel_format == bmusb::PixelFormat_8BitYCbCr) {
+ splitting = YCBCR_INPUT_SPLIT_Y_AND_CBCR;
+ } else {
+ splitting = YCBCR_INPUT_PLANAR;
+ }
+ if (override_bounce) {
+ ycbcr_inputs.push_back(new NonBouncingYCbCrInput(inout_format, input_ycbcr_format, global_flags.width, global_flags.height, splitting));
+ } else {
+ ycbcr_inputs.push_back(new YCbCrInput(inout_format, input_ycbcr_format, global_flags.width, global_flags.height, splitting));
+ }
+ chain->add_input(ycbcr_inputs.back());
+ }
+
+ if (deinterlace) {
+ vector<Effect *> reverse_inputs(ycbcr_inputs.rbegin(), ycbcr_inputs.rend());
+ chain->add_effect(deinterlace_effect, reverse_inputs);
+ }
}
}
}
signal_num = theme->map_signal(signal_num);
+ connect_signal_raw(signal_num, *theme->input_state);
+}
- BufferedFrame first_frame = theme->input_state->buffered_frames[signal_num][0];
+void LiveInputWrapper::connect_signal_raw(int signal_num, const InputState &input_state)
+{
+ BufferedFrame first_frame = input_state.buffered_frames[signal_num][0];
if (first_frame.frame == nullptr) {
// No data yet.
return;
height = userdata->last_height[first_frame.field_number];
}
+ movit::YCbCrLumaCoefficients ycbcr_coefficients = input_state.ycbcr_coefficients[signal_num];
+ bool full_range = input_state.full_range[signal_num];
+
+ if (input_state.ycbcr_coefficients_auto[signal_num]) {
+ full_range = false;
+
+ // The Blackmagic driver docs claim that the device outputs Y'CbCr
+ // according to Rec. 601, but this seems to indicate the subsampling
+ // positions only, as they publish Y'CbCr → RGB formulas that are
+ // different for HD and SD (corresponding to Rec. 709 and 601, respectively),
+ // and a Lenovo X1 gen 3 I used to test definitely outputs Rec. 709
+ // (at least up to rounding error). Other devices seem to use Rec. 601
+ // even on HD resolutions. Nevertheless, Rec. 709 _is_ the right choice
+ // for HD, so we default to that if the user hasn't set anything.
+ if (height >= 720) {
+ ycbcr_coefficients = YCBCR_REC_709;
+ } else {
+ ycbcr_coefficients = YCBCR_REC_601;
+ }
+ }
+
+ // This is a global, but it doesn't really matter.
+ input_ycbcr_format.luma_coefficients = ycbcr_coefficients;
+ input_ycbcr_format.full_range = full_range;
+
BufferedFrame last_good_frame = first_frame;
- for (unsigned i = 0; i < inputs.size(); ++i) {
- BufferedFrame frame = theme->input_state->buffered_frames[signal_num][i];
+ for (unsigned i = 0; i < max(ycbcr_inputs.size(), rgba_inputs.size()); ++i) {
+ BufferedFrame frame = input_state.buffered_frames[signal_num][i];
if (frame.frame == nullptr) {
// Not enough data; reuse last frame (well, field).
// This is suboptimal, but we have nothing better.
}
const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)frame.frame->userdata;
- if (userdata->last_width[frame.field_number] != width ||
- userdata->last_height[frame.field_number] != height) {
+ unsigned this_width = userdata->last_width[frame.field_number];
+ unsigned this_height = userdata->last_height[frame.field_number];
+ if (this_width != width || this_height != height) {
// Resolution changed; reuse last frame/field.
frame = last_good_frame;
userdata = (const PBOFrameAllocator::Userdata *)frame.frame->userdata;
}
- inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
- inputs[i]->set_texture_num(1, userdata->tex_cbcr[frame.field_number]);
- inputs[i]->set_width(userdata->last_width[frame.field_number]);
- inputs[i]->set_height(userdata->last_height[frame.field_number]);
+ assert(userdata->pixel_format == pixel_format);
+ switch (pixel_format) {
+ case bmusb::PixelFormat_8BitYCbCr:
+ ycbcr_inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(1, userdata->tex_cbcr[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(input_ycbcr_format);
+ ycbcr_inputs[i]->set_width(width);
+ ycbcr_inputs[i]->set_height(height);
+ break;
+ case bmusb::PixelFormat_8BitYCbCrPlanar:
+ ycbcr_inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(1, userdata->tex_cb[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(2, userdata->tex_cr[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(userdata->ycbcr_format);
+ ycbcr_inputs[i]->set_width(width);
+ ycbcr_inputs[i]->set_height(height);
+ break;
+ case bmusb::PixelFormat_10BitYCbCr:
+ ycbcr_inputs[i]->set_texture_num(0, userdata->tex_444[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(input_ycbcr_format);
+ ycbcr_inputs[i]->set_width(width);
+ ycbcr_inputs[i]->set_height(height);
+ break;
+ case bmusb::PixelFormat_8BitBGRA:
+ rgba_inputs[i]->set_texture_num(userdata->tex_rgba[frame.field_number]);
+ rgba_inputs[i]->set_width(width);
+ rgba_inputs[i]->set_height(height);
+ break;
+ default:
+ assert(false);
+ }
last_good_frame = frame;
}
if (deinterlace) {
- BufferedFrame frame = theme->input_state->buffered_frames[signal_num][0];
+ BufferedFrame frame = input_state.buffered_frames[signal_num][0];
CHECK(deinterlace_effect->set_int("current_field_position", frame.field_number));
}
}
} // namespace
-Theme::Theme(const char *filename, ResourcePool *resource_pool, unsigned num_cards)
+Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool, unsigned num_cards)
: resource_pool(resource_pool), num_cards(num_cards), signal_to_card_mapping(global_flags.default_stream_mapping)
{
L = luaL_newstate();
luaL_openlibs(L);
+ register_constants();
register_class("EffectChain", EffectChain_funcs);
register_class("LiveInputWrapper", LiveInputWrapper_funcs);
register_class("ImageInput", ImageInput_funcs);
+ register_class("VideoInput", VideoInput_funcs);
+ register_class("HTMLInput", HTMLInput_funcs);
register_class("WhiteBalanceEffect", WhiteBalanceEffect_funcs);
register_class("ResampleEffect", ResampleEffect_funcs);
register_class("PaddingEffect", PaddingEffect_funcs);
register_class("MixEffect", MixEffect_funcs);
register_class("InputStateInfo", InputStateInfo_funcs);
- // Run script.
+ // Run script. Search through all directories until we find a file that will load
+ // (as in, does not return LUA_ERRFILE); then run it. We store load errors
+ // from all the attempts, and show them once we know we can't find any of them.
lua_settop(L, 0);
- if (luaL_dofile(L, filename)) {
- fprintf(stderr, "error: %s\n", lua_tostring(L, -1));
+ vector<string> errors;
+ bool success = false;
+ for (size_t i = 0; i < search_dirs.size(); ++i) {
+ string path = search_dirs[i] + "/" + filename;
+ int err = luaL_loadfile(L, path.c_str());
+ if (err == 0) {
+ // Success; actually call the code.
+ if (lua_pcall(L, 0, LUA_MULTRET, 0)) {
+ fprintf(stderr, "Error when running %s: %s\n", path.c_str(), lua_tostring(L, -1));
+ exit(1);
+ }
+ success = true;
+ break;
+ }
+ errors.push_back(lua_tostring(L, -1));
lua_pop(L, 1);
+ if (err != LUA_ERRFILE) {
+ // The file actually loaded, but failed to parse somehow. Abort; don't try the next one.
+ break;
+ }
+ }
+
+ if (!success) {
+ for (const string &error : errors) {
+ fprintf(stderr, "%s\n", error.c_str());
+ }
exit(1);
}
assert(lua_gettop(L) == 0);
lua_close(L);
}
+void Theme::register_constants()
+{
+ // Set Nageru.VIDEO_FORMAT_BGRA = bmusb::PixelFormat_8BitBGRA, etc.
+ const vector<pair<string, int>> constants = {
+ { "VIDEO_FORMAT_BGRA", bmusb::PixelFormat_8BitBGRA },
+ { "VIDEO_FORMAT_YCBCR", bmusb::PixelFormat_8BitYCbCrPlanar },
+ };
+
+ lua_newtable(L); // t = {}
+
+ for (const pair<string, int> &constant : constants) {
+ lua_pushstring(L, constant.first.c_str());
+ lua_pushinteger(L, constant.second);
+ lua_settable(L, 1); // t[key] = value
+ }
+
+ lua_setglobal(L, "Nageru"); // Nageru = t
+ assert(lua_gettop(L) == 0);
+}
+
void Theme::register_class(const char *class_name, const luaL_Reg *funcs)
{
assert(lua_gettop(L) == 0);
chain.setup_chain = [this, funcref, input_state]{
unique_lock<mutex> lock(m);
+ assert(this->input_state == nullptr);
this->input_state = &input_state;
// Set up state, including connecting signals.
exit(1);
}
assert(lua_gettop(L) == 0);
+
+ this->input_state = nullptr;
};
// TODO: Can we do better, e.g. by running setup_chain() and seeing what it references?
// Actually, setup_chain does maybe hold all the references we need now anyway?
+ chain.input_frames.reserve(num_cards * FRAME_HISTORY_LENGTH);
for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
chain.input_frames.push_back(input_state.buffered_frames[card_index][frame_num].frame);
fprintf(stderr, "error running function `channel_name': %s\n", lua_tostring(L, -1));
exit(1);
}
+ const char *ret = lua_tostring(L, -1);
+ if (ret == nullptr) {
+ fprintf(stderr, "function `channel_name' returned nil for channel %d\n", channel);
+ exit(1);
+ }
- string ret = lua_tostring(L, -1);
+ string retstr = ret;
lua_pop(L, 1);
assert(lua_gettop(L) == 0);
- return ret;
+ return retstr;
}
int Theme::get_channel_signal(unsigned channel)
exit(1);
}
- std::string ret = checkstdstring(L, -1);
+ const char *ret = lua_tostring(L, -1);
+ if (ret == nullptr) {
+ fprintf(stderr, "function `channel_color' returned nil for channel %d\n", channel);
+ exit(1);
+ }
+
+ string retstr = ret;
lua_pop(L, 1);
assert(lua_gettop(L) == 0);
- return ret;
+ return retstr;
}
bool Theme::get_supports_set_wb(unsigned channel)
int Theme::map_signal(int signal_num)
{
+ // Negative numbers map to raw signals.
+ if (signal_num < 0) {
+ return -1 - signal_num;
+ }
+
unique_lock<mutex> lock(map_m);
if (signal_to_card_mapping.count(signal_num)) {
return signal_to_card_mapping[signal_num];
}
- if (signal_num >= int(num_cards)) {
- fprintf(stderr, "WARNING: Theme asked for input %d, but we only have %u card(s).\n", signal_num, num_cards);
- fprintf(stderr, "Mapping to card %d instead.\n", signal_num % num_cards);
+
+ int card_index;
+ if (global_flags.output_card != -1 && num_cards > 1) {
+ // Try to exclude the output card from the default card_index.
+ card_index = signal_num % (num_cards - 1);
+ if (card_index >= global_flags.output_card) {
+ ++card_index;
+ }
+ if (signal_num >= int(num_cards - 1)) {
+ fprintf(stderr, "WARNING: Theme asked for input %d, but we only have %u input card(s) (card %d is busy with output).\n",
+ signal_num, num_cards - 1, global_flags.output_card);
+ fprintf(stderr, "Mapping to card %d instead.\n", card_index);
+ }
+ } else {
+ card_index = signal_num % num_cards;
+ if (signal_num >= int(num_cards)) {
+ fprintf(stderr, "WARNING: Theme asked for input %d, but we only have %u card(s).\n", signal_num, num_cards);
+ fprintf(stderr, "Mapping to card %d instead.\n", card_index);
+ }
}
- signal_to_card_mapping[signal_num] = signal_num % num_cards;
- return signal_num % num_cards;
+ signal_to_card_mapping[signal_num] = card_index;
+ return card_index;
}
void Theme::set_signal_mapping(int signal_num, int card_num)