#include <epoxy/gl.h>
#include <lauxlib.h>
#include <lua.hpp>
+#include <movit/deinterlace_effect.h>
#include <movit/effect.h>
#include <movit/effect_chain.h>
#include <movit/image_format.h>
+#include <movit/input.h>
#include <movit/mix_effect.h>
#include <movit/multiply_effect.h>
#include <movit/overlay_effect.h>
#include <utility>
#include "defs.h"
-#include "deinterlace_effect.h"
#include "ffmpeg_capture.h"
#include "flags.h"
#include "image_input.h"
-#include "input.h"
#include "input_state.h"
#include "pbo_frame_allocator.h"
// doesn't care about the object anymore. (If we change this, we'd need
// to also unregister the signal connection on __gc.)
int ret = wrap_lua_object_nonowned<LiveInputWrapper>(
- L, "LiveInputWrapper", theme, chain, bmusb::PixelFormat_8BitBGRA,
+ L, "LiveInputWrapper", theme, chain, (*capture)->get_current_pixel_format(),
/*override_bounce=*/false, deinterlace);
if (ret == 1) {
Theme *theme = get_theme_updata(L);
int VideoInput_new(lua_State* L)
{
- assert(lua_gettop(L) == 1);
+ assert(lua_gettop(L) == 2);
string filename = checkstdstring(L, 1);
+ int pixel_format = luaL_checknumber(L, 2);
+ if (pixel_format != bmusb::PixelFormat_8BitYCbCrPlanar &&
+ pixel_format != bmusb::PixelFormat_8BitBGRA) {
+ fprintf(stderr, "WARNING: Invalid enum %d used for video format, choosing Y'CbCr.\n",
+ pixel_format);
+ pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
+ }
int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
if (ret == 1) {
- Theme *theme = get_theme_updata(L);
FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
+ (*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
+
+ Theme *theme = get_theme_updata(L);
theme->register_video_input(*capture);
}
return ret;
chain->add_effect(deinterlace_effect, reverse_inputs);
}
} else {
- assert(pixel_format == bmusb::PixelFormat_8BitYCbCr || pixel_format == bmusb::PixelFormat_10BitYCbCr);
- // The Blackmagic driver docs claim that the device outputs Y'CbCr
- // according to Rec. 601, but practical testing indicates it definitely
- // is Rec. 709 (at least up to errors attributable to rounding errors).
- // Perhaps 601 was only to indicate the subsampling positions, not the
- // colorspace itself? Tested with a Lenovo X1 gen 3 as input.
- YCbCrFormat input_ycbcr_format;
+ assert(pixel_format == bmusb::PixelFormat_8BitYCbCr ||
+ pixel_format == bmusb::PixelFormat_10BitYCbCr ||
+ pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
+
+ // Most of these settings will be overridden later if using PixelFormat_8BitYCbCrPlanar.
input_ycbcr_format.chroma_subsampling_x = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? 1 : 2;
input_ycbcr_format.chroma_subsampling_y = 1;
input_ycbcr_format.num_levels = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? 1024 : 256;
input_ycbcr_format.cr_x_position = 0.0;
input_ycbcr_format.cb_y_position = 0.5;
input_ycbcr_format.cr_y_position = 0.5;
- input_ycbcr_format.luma_coefficients = YCBCR_REC_709;
- input_ycbcr_format.full_range = false;
+ input_ycbcr_format.luma_coefficients = YCBCR_REC_709; // Will be overridden later even if not planar.
+ input_ycbcr_format.full_range = false; // Will be overridden later even if not planar.
for (unsigned i = 0; i < num_inputs; ++i) {
// When using 10-bit input, we're converting to interleaved through v210Converter.
- YCbCrInputSplitting splitting = (pixel_format == bmusb::PixelFormat_10BitYCbCr) ? YCBCR_INPUT_INTERLEAVED : YCBCR_INPUT_SPLIT_Y_AND_CBCR;
+ YCbCrInputSplitting splitting;
+ if (pixel_format == bmusb::PixelFormat_10BitYCbCr) {
+ splitting = YCBCR_INPUT_INTERLEAVED;
+ } else if (pixel_format == bmusb::PixelFormat_8BitYCbCr) {
+ splitting = YCBCR_INPUT_SPLIT_Y_AND_CBCR;
+ } else {
+ splitting = YCBCR_INPUT_PLANAR;
+ }
if (override_bounce) {
ycbcr_inputs.push_back(new NonBouncingYCbCrInput(inout_format, input_ycbcr_format, global_flags.width, global_flags.height, splitting));
} else {
}
signal_num = theme->map_signal(signal_num);
- connect_signal_raw(signal_num);
+ connect_signal_raw(signal_num, *theme->input_state);
}
-void LiveInputWrapper::connect_signal_raw(int signal_num)
+void LiveInputWrapper::connect_signal_raw(int signal_num, const InputState &input_state)
{
- BufferedFrame first_frame = theme->input_state->buffered_frames[signal_num][0];
+ BufferedFrame first_frame = input_state.buffered_frames[signal_num][0];
if (first_frame.frame == nullptr) {
// No data yet.
return;
height = userdata->last_height[first_frame.field_number];
}
+ movit::YCbCrLumaCoefficients ycbcr_coefficients = input_state.ycbcr_coefficients[signal_num];
+ bool full_range = input_state.full_range[signal_num];
+
+ if (input_state.ycbcr_coefficients_auto[signal_num]) {
+ full_range = false;
+
+ // The Blackmagic driver docs claim that the device outputs Y'CbCr
+ // according to Rec. 601, but this seems to indicate the subsampling
+ // positions only, as they publish Y'CbCr → RGB formulas that are
+ // different for HD and SD (corresponding to Rec. 709 and 601, respectively),
+ // and a Lenovo X1 gen 3 I used to test definitely outputs Rec. 709
+ // (at least up to rounding error). Other devices seem to use Rec. 601
+ // even on HD resolutions. Nevertheless, Rec. 709 _is_ the right choice
+ // for HD, so we default to that if the user hasn't set anything.
+ if (height >= 720) {
+ ycbcr_coefficients = YCBCR_REC_709;
+ } else {
+ ycbcr_coefficients = YCBCR_REC_601;
+ }
+ }
+
+ // This is a global, but it doesn't really matter.
+ input_ycbcr_format.luma_coefficients = ycbcr_coefficients;
+ input_ycbcr_format.full_range = full_range;
+
BufferedFrame last_good_frame = first_frame;
for (unsigned i = 0; i < max(ycbcr_inputs.size(), rgba_inputs.size()); ++i) {
- BufferedFrame frame = theme->input_state->buffered_frames[signal_num][i];
+ BufferedFrame frame = input_state.buffered_frames[signal_num][i];
if (frame.frame == nullptr) {
// Not enough data; reuse last frame (well, field).
// This is suboptimal, but we have nothing better.
case bmusb::PixelFormat_8BitYCbCr:
ycbcr_inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
ycbcr_inputs[i]->set_texture_num(1, userdata->tex_cbcr[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(input_ycbcr_format);
+ ycbcr_inputs[i]->set_width(width);
+ ycbcr_inputs[i]->set_height(height);
+ break;
+ case bmusb::PixelFormat_8BitYCbCrPlanar:
+ ycbcr_inputs[i]->set_texture_num(0, userdata->tex_y[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(1, userdata->tex_cb[frame.field_number]);
+ ycbcr_inputs[i]->set_texture_num(2, userdata->tex_cr[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(userdata->ycbcr_format);
ycbcr_inputs[i]->set_width(width);
ycbcr_inputs[i]->set_height(height);
break;
case bmusb::PixelFormat_10BitYCbCr:
ycbcr_inputs[i]->set_texture_num(0, userdata->tex_444[frame.field_number]);
+ ycbcr_inputs[i]->change_ycbcr_format(input_ycbcr_format);
ycbcr_inputs[i]->set_width(width);
ycbcr_inputs[i]->set_height(height);
break;
}
if (deinterlace) {
- BufferedFrame frame = theme->input_state->buffered_frames[signal_num][0];
+ BufferedFrame frame = input_state.buffered_frames[signal_num][0];
CHECK(deinterlace_effect->set_int("current_field_position", frame.field_number));
}
}
L = luaL_newstate();
luaL_openlibs(L);
+ register_constants();
register_class("EffectChain", EffectChain_funcs);
register_class("LiveInputWrapper", LiveInputWrapper_funcs);
register_class("ImageInput", ImageInput_funcs);
lua_close(L);
}
+void Theme::register_constants()
+{
+ // Set Nageru.VIDEO_FORMAT_BGRA = bmusb::PixelFormat_8BitBGRA, etc.
+ const vector<pair<string, int>> constants = {
+ { "VIDEO_FORMAT_BGRA", bmusb::PixelFormat_8BitBGRA },
+ { "VIDEO_FORMAT_YCBCR", bmusb::PixelFormat_8BitYCbCrPlanar },
+ };
+
+ lua_newtable(L); // t = {}
+
+ for (const pair<string, int> &constant : constants) {
+ lua_pushstring(L, constant.first.c_str());
+ lua_pushinteger(L, constant.second);
+ lua_settable(L, 1); // t[key] = value
+ }
+
+ lua_setglobal(L, "Nageru"); // Nageru = t
+ assert(lua_gettop(L) == 0);
+}
+
void Theme::register_class(const char *class_name, const luaL_Reg *funcs)
{
assert(lua_gettop(L) == 0);
chain.setup_chain = [this, funcref, input_state]{
unique_lock<mutex> lock(m);
+ assert(this->input_state == nullptr);
this->input_state = &input_state;
// Set up state, including connecting signals.
exit(1);
}
assert(lua_gettop(L) == 0);
+
+ this->input_state = nullptr;
};
// TODO: Can we do better, e.g. by running setup_chain() and seeing what it references?