assert(stride == width * 2);
}
- current_video_frame = video_frame_allocator->create_frame(width, height, stride);
+ if (width * stride > FRAME_SIZE) {
+ // TODO: If we had an OpenGL context here, calling create_frame()
+ // would be completely fine.
+ fprintf(stderr, "Card %u: Captured frame %d x %d (stride %d) would be larger than supported frame size (%d > %d), skipping.\n",
+ card_index, width, height, stride, width * stride, FRAME_SIZE);
+ } else {
+ current_video_frame = video_frame_allocator->create_frame(width, height, stride);
+ }
if (current_video_frame.data != nullptr) {
const uint8_t *src;
video_frame->GetBytes((void **)&src);
// #define MAX_VIDEO_CARDS 16 // defined in shared_defs.h.
#define MAX_ALSA_CARDS 16
#define MAX_BUSES 256 // Audio buses.
-#define FRAME_SIZE (8 << 20) // 8 MB. (FIXME: Not enough for a 2160p frame!)
+
+// FRAME_SIZE is the default frame size, in bytes. FFmpeg inputs (video files and SRT streams)
+// can allocate larger frames as needed; USB and DeckLink outputs always use FRAME_SIZE.
+// We should eventually add support for at least DeckLink outputs, allowing us to capture
+// 2160p frames. Also, it would allow us to lower the default frame size to the maximum
+// bmusb supports (2 MB just about covers 1080i 4:2:2, then add some for 10-bit?) to waste
+// less memory.
+//
+// As a general sanity check, we also have a MAX_FRAME_SIZE that even dynamic allocation
+// will not go past.
+#define FRAME_SIZE (8 << 20) // 8 MB (just enough for 1080p RGBA).
+#define MAX_FRAME_SIZE (140 << 20) // 140 MB; enough for 8192*4320 RGBA and then some.
// For deinterlacing. See also comments on InputState.
#define FRAME_HISTORY_LENGTH 5
#include <assert.h>
#include <errno.h>
+#include <epoxy/egl.h>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <sys/stat.h>
#include <thread>
+#include <QSurface>
extern "C" {
#include <libavcodec/avcodec.h>
#include <movit/colorspace_conversion_effect.h>
#include "bmusb/bmusb.h"
+#include "shared/context.h"
#include "shared/ffmpeg_raii.h"
#include "ffmpeg_util.h"
#include "flags.h"
} // namespace
-FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
- : filename(filename), width(width), height(height), video_timebase{1, 1}
+FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height, QSurface *surface)
+ : filename(filename), width(width), height(height), video_timebase{1, 1}, surface(surface)
{
description = "Video: " + filename;
}
#ifdef HAVE_SRT
-FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
+FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id, QSurface *surface)
: srt_sock(srt_sock),
width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
height(0),
pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
- video_timebase{1, 1}
+ video_timebase{1, 1},
+ surface(surface)
{
if (stream_id.empty()) {
description = "SRT stream";
srt_close(srt_sock);
}
#endif
+ delete surface;
}
void FFmpegCapture::configure_card()
snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
pthread_setname_np(pthread_self(), thread_name);
+ // We need a context in case create_frame() needs to reallocate something.
+ // (If none is given, we are probably in Kaeru, which uses MallocFrameAllocator
+ // anyway, which doesn't reallocate currently and definitely doesn't need
+ // an active OpenGL context to do so.)
+ QOpenGLContext *context = nullptr;
+ if (surface != nullptr) {
+ context = create_context(this->surface);
+ eglBindAPI(EGL_OPENGL_API);
+ if (!make_current(context, this->surface)) {
+ printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
+ eglGetError());
+ abort();
+ }
+ }
+
while (!producer_thread_should_quit.should_quit()) {
string filename_copy;
{
dequeue_cleanup_callback();
has_dequeue_callbacks = false;
}
+
+ delete_context(context);
}
void FFmpegCapture::send_disconnected_frame()
{
*error = false;
- UniqueFrame video_frame(video_frame_allocator->alloc_frame());
+ UniqueFrame video_frame(video_frame_allocator->create_frame(frame->width, frame->height, frame->width));
if (video_frame->data == nullptr) {
return video_frame;
}
// FIXME: Currently, if the video is too high-res for one of the allocated
// frames, we simply refuse to scale it here to avoid crashes. It would be better
// if we could somehow signal getting larger frames, especially as 4K is a thing now.
- if (video_frame->len > FRAME_SIZE) {
- fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
+ if (video_frame->len > video_frame->size) {
+ fprintf(stderr, "%s: Decoded frame would be larger than supported frame size (%zu > %zu), not decoding.\n", pathname.c_str(), video_frame->len, video_frame->size);
*error = true;
return video_frame;
}
struct AVFrame;
struct AVRational;
struct AVPacket;
+class QSurface;
class FFmpegCapture : public bmusb::CaptureInterface
{
public:
- FFmpegCapture(const std::string &filename, unsigned width, unsigned height);
+ FFmpegCapture(const std::string &filename, unsigned width, unsigned height, QSurface *surface);
#ifdef HAVE_SRT
// Takes ownership of the SRT client socket.
- FFmpegCapture(int srt_sock, const std::string &stream_id);
+ FFmpegCapture(int srt_sock, const std::string &stream_id, QSurface *surface);
#endif
~FFmpegCapture();
// -1 is strictly speaking outside the range of the enum, but hopefully, it will be alright.
AVColorSpace last_colorspace = static_cast<AVColorSpace>(-1);
AVChromaLocation last_chroma_location = static_cast<AVChromaLocation>(-1);
+ QSurface *const surface;
};
#endif // !defined(_FFMPEG_CAPTURE_H)
}
global_x264_encoder = x264_encoder.get();
- FFmpegCapture video(argv[optind], global_flags.width, global_flags.height);
+ FFmpegCapture video(argv[optind], global_flags.width, global_flags.height, /*surface=*/nullptr);
video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
if (global_flags.transcode_video) {
video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
}
// Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
- theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get()));
+ theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), create_surface(format)));
// Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
fprintf(stderr, "New SRT stream connected (%s), choosing slot %d.\n", stream_id.c_str(), free_card_index);
}
CaptureCard *card = &cards[free_card_index];
- FFmpegCapture *capture = new FFmpegCapture(sock, stream_id);
+ FFmpegCapture *capture = new FFmpegCapture(sock, stream_id, create_surface_with_same_format(mixer_surface));
capture->set_card_index(free_card_index);
configure_card(free_card_index, capture, CardType::FFMPEG_INPUT, /*output=*/nullptr, /*is_srt_card=*/true);
card->srt_metrics.update_srt_stats(sock); // Initial zero stats.
#include <va/va.h>
#include "mjpeg_encoder.h"
+#include "defs.h"
#include "shared/va_resource_pool.h"
#include "v210_converter.h"
#include "shared/va_display.h"
{
Frame vf;
+ size_t desired_frame_bytes = width * stride;
+ if (stride > 8192 * 4 || height > 8192 || desired_frame_bytes > MAX_FRAME_SIZE) {
+ return vf;
+ }
+
{
lock_guard<mutex> lock(freelist_mutex);
if (freelist.empty()) {
freelist.pop();
}
}
- vf.len = 0;
- vf.overflow = 0;
Userdata *userdata = (Userdata *)vf.userdata;
+ assert(generation == userdata->generation);
+ if (vf.size < desired_frame_bytes || (vf.size > FRAME_SIZE && vf.size > desired_frame_bytes * 2)) {
+ // Frame is either too small or way too large, so reallocate it.
+ // Note that width and height now automatically becomes the right size
+ // (the one we just asked for, instead of the default for the allocator,
+ // which is generally the global resolution); it doesn't matter
+ // for correctness, since we'll recreate the texture on upload if needed,
+ // but it is nice to save that step.
+ destroy_frame(&vf);
+ init_frame(vf, userdata, this, pixel_format, std::max<size_t>(desired_frame_bytes, FRAME_SIZE), width, height, permissions, map_bits, buffer, generation);
+ };
+
+ vf.len = 0;
+ vf.overflow = 0;
if (mjpeg_encoder != nullptr &&
mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) {
#include "mainwindow.h"
#include "pbo_frame_allocator.h"
#include "scene.h"
+#include "shared/context.h"
class Mixer;
return wrap_lua_object_nonowned<ImageInput>(L, "ImageInput", filename);
}
+} // namespace
+
+// Must be non-namespaced due to friend declaration.
int VideoInput_new(lua_State* L)
{
assert(lua_gettop(L) == 2);
print_warning(L, "Invalid enum %d used for video format, choosing Y'CbCr.\n", pixel_format);
pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
}
- int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
+ Theme *theme = get_theme_updata(L);
+ int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height, create_surface_with_same_format(theme->surface));
if (ret == 1) {
FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
(*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
- Theme *theme = get_theme_updata(L);
theme->register_video_input(*capture);
}
return ret;
}
+namespace {
+
int VideoInput_rewind(lua_State* L)
{
assert(lua_gettop(L) == 1);
return 0;
}
-Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool)
- : resource_pool(resource_pool), signal_to_card_mapping(global_flags.default_stream_mapping)
+Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool, QSurface *surface)
+ : resource_pool(resource_pool), signal_to_card_mapping(global_flags.default_stream_mapping), surface(surface)
{
// Defaults.
channel_names[0] = "Live";
{
theme_menu.reset();
lua_close(L);
+ // Leak the surface.
}
void Theme::register_globals()
class CEFCapture;
class FFmpegCapture;
class LiveInputWrapper;
+class QSurface;
struct InputState;
namespace movit {
class Theme {
public:
- Theme(const std::string &filename, const std::vector<std::string> &search_dirs, movit::ResourcePool *resource_pool);
+ Theme(const std::string &filename, const std::vector<std::string> &search_dirs, movit::ResourcePool *resource_pool, QSurface *surface);
~Theme();
struct Chain {
std::map<unsigned, int> channel_signals; // Set using Nageru.set_channel_signal(). Protected by <m>.
std::map<unsigned, bool> channel_supports_wb; // Set using Nageru.set_supports_wb(). Protected by <m>.
+ // Used to construct OpenGL contexts for VideoInputs. Needs to be available
+ // during the entire lifetime of Theme, since they may be created basically
+ // at any time.
+ const QSurface *surface;
+
friend class LiveInputWrapper;
friend class Scene;
friend int ThemeMenu_set(lua_State *L);
friend int Nageru_set_num_channels(lua_State *L);
friend int Nageru_set_channel_signal(lua_State *L);
friend int Nageru_set_supports_wb(lua_State *L);
+ friend int VideoInput_new(lua_State* L);
};
// LiveInputWrapper is a facade on top of an YCbCrInput, exposed to