]> git.sesse.net Git - nageru/commitdiff
Add support for FFmpeg inputs.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Wed, 12 Apr 2017 17:06:48 +0000 (19:06 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Wed, 12 Apr 2017 17:06:48 +0000 (19:06 +0200)
This has a variety of uses; most notably to play video, but it can
also be used to feed a CasparCG feed into Nageru. For this reason,
it uses RGBA as a format (for now), not Y'CbCr. There are some
limitations, but hopefully we can figure them out eventually.

Makefile
ffmpeg_capture.cpp [new file with mode: 0644]
ffmpeg_capture.h [new file with mode: 0644]
mixer.cpp
mixer.h
theme.cpp
theme.h

index 5733c473790b2c1d244df1a10363ace5ecf07541..20b5f2549b612f9955fca59675ba2a24ad11fc3a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -35,6 +35,9 @@ ifeq ($(EMBEDDED_BMUSB),yes)
   OBJS += bmusb/bmusb.o bmusb/fake_capture.o
 endif
 
+# FFmpeg input
+OBJS += ffmpeg_capture.o
+
 # Benchmark program.
 BM_OBJS = benchmark_audio_mixer.o $(AUDIO_MIXER_OBJS) flags.o
 
diff --git a/ffmpeg_capture.cpp b/ffmpeg_capture.cpp
new file mode 100644 (file)
index 0000000..647b7cb
--- /dev/null
@@ -0,0 +1,278 @@
+#include "ffmpeg_capture.h"
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+extern "C" {
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavutil/avutil.h>
+#include <libavutil/error.h>
+#include <libavutil/frame.h>
+#include <libavutil/imgutils.h>
+#include <libavutil/mem.h>
+#include <libavutil/pixfmt.h>
+#include <libswscale/swscale.h>
+}
+
+#include <chrono>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+#include "bmusb/bmusb.h"
+#include "ffmpeg_raii.h"
+#include "flags.h"
+#include "image_input.h"
+
+#define FRAME_SIZE (8 << 20)  // 8 MB.
+
+using namespace std;
+using namespace std::chrono;
+using namespace bmusb;
+
+FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
+       : filename(filename), width(width), height(height)
+{
+       // Not really used for anything.
+       description = "Video: " + filename;
+}
+
+FFmpegCapture::~FFmpegCapture()
+{
+       if (has_dequeue_callbacks) {
+               dequeue_cleanup_callback();
+       }
+}
+
+void FFmpegCapture::configure_card()
+{
+       if (video_frame_allocator == nullptr) {
+               owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
+               set_video_frame_allocator(owned_video_frame_allocator.get());
+       }
+       if (audio_frame_allocator == nullptr) {
+               owned_audio_frame_allocator.reset(new MallocFrameAllocator(65536, NUM_QUEUED_AUDIO_FRAMES));
+               set_audio_frame_allocator(owned_audio_frame_allocator.get());
+       }
+}
+
+void FFmpegCapture::start_bm_capture()
+{
+       if (running) {
+               return;
+       }
+       running = true;
+       producer_thread_should_quit = false;
+       producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
+}
+
+void FFmpegCapture::stop_dequeue_thread()
+{
+       if (!running) {
+               return;
+       }
+       running = false;
+       producer_thread_should_quit = true;
+       producer_thread.join();
+}
+
+std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
+{
+       // Note: This will never really be shown in the UI.
+       VideoMode mode;
+
+       char buf[256];
+       snprintf(buf, sizeof(buf), "%ux%u", width, height);
+       mode.name = buf;
+       
+       mode.autodetect = false;
+       mode.width = width;
+       mode.height = height;
+       mode.frame_rate_num = 60;
+       mode.frame_rate_den = 1;
+       mode.interlaced = false;
+
+       return {{ 0, mode }};
+}
+
+void FFmpegCapture::producer_thread_func()
+{
+       char thread_name[16];
+       snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
+       pthread_setname_np(pthread_self(), thread_name);
+
+       while (!producer_thread_should_quit) {
+               string pathname = search_for_file(filename);
+               if (filename.empty()) {
+                       fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename.c_str());
+                       sleep(1);
+                       continue;
+               }
+               if (!play_video(pathname)) {
+                       // Error.
+                       fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
+                       sleep(1);
+                       continue;
+               }
+
+               // Probably just EOF, will exit the loop above on next test.
+       }
+
+       if (has_dequeue_callbacks) {
+                dequeue_cleanup_callback();
+               has_dequeue_callbacks = false;
+        }
+}
+
+bool FFmpegCapture::play_video(const string &pathname)
+{
+       auto format_ctx = avformat_open_input_unique(pathname.c_str(), nullptr, nullptr);
+       if (format_ctx == nullptr) {
+               fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
+               return false;
+       }
+
+       if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
+               fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
+               return false;
+       }
+
+       int video_stream_index = -1, audio_stream_index = -1;
+       AVRational video_timebase{ 1, 1 };
+       for (unsigned i = 0; i < format_ctx->nb_streams; ++i) {
+               if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
+                   video_stream_index == -1) {
+                       video_stream_index = i;
+                       video_timebase = format_ctx->streams[i]->time_base;
+               }
+               if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
+                   audio_stream_index == -1) {
+                       audio_stream_index = i;
+               }
+       }
+       if (video_stream_index == -1) {
+               fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
+               return false;
+       }
+
+       const AVCodecParameters *codecpar = format_ctx->streams[video_stream_index]->codecpar;
+       AVCodecContextWithDeleter codec_ctx = avcodec_alloc_context3_unique(nullptr);
+       if (avcodec_parameters_to_context(codec_ctx.get(), codecpar) < 0) {
+               fprintf(stderr, "%s: Cannot fill codec parameters\n", pathname.c_str());
+               return false;
+       }
+       AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
+       if (codec == nullptr) {
+               fprintf(stderr, "%s: Cannot find decoder\n", pathname.c_str());
+               return false;
+       }
+       if (avcodec_open2(codec_ctx.get(), codec, nullptr) < 0) {
+               fprintf(stderr, "%s: Cannot open decoder\n", pathname.c_str());
+               return false;
+       }
+       unique_ptr<AVCodecContext, decltype(avcodec_close)*> codec_ctx_cleanup(
+               codec_ctx.get(), avcodec_close);
+
+       steady_clock::time_point start = steady_clock::now();
+
+       unique_ptr<SwsContext, decltype(sws_freeContext)*> sws_ctx(nullptr, sws_freeContext);
+       int sws_last_width = -1, sws_last_height = -1;
+
+       // Main loop.
+       while (!producer_thread_should_quit) {
+               // Read packets until we have a frame or there are none left.
+               int frame_finished = 0;
+               AVFrameWithDeleter frame = av_frame_alloc_unique();
+               bool eof = false;
+               do {
+                       AVPacket pkt;
+                       unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
+                               &pkt, av_packet_unref);
+                       av_init_packet(&pkt);
+                       pkt.data = nullptr;
+                       pkt.size = 0;
+                       if (av_read_frame(format_ctx.get(), &pkt) == 0) {
+                               if (pkt.stream_index != video_stream_index) {
+                                       // Ignore audio for now.
+                                       continue;
+                               }
+                               if (avcodec_send_packet(codec_ctx.get(), &pkt) < 0) {
+                                       fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str());
+                                       return false;
+                               }
+                       } else {
+                               eof = true;  // Or error, but ignore that for the time being.
+                       }
+
+                       int err = avcodec_receive_frame(codec_ctx.get(), frame.get());
+                       if (err == 0) {
+                               frame_finished = true;
+                               break;
+                       } else if (err != AVERROR(EAGAIN)) {
+                               fprintf(stderr, "%s: Cannot receive frame from codec.\n", pathname.c_str());
+                               return false;
+                       }
+               } while (!eof);
+
+               if (!frame_finished) {
+                       // EOF. Loop back to the start if we can.
+                       if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
+                               fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
+                               return true;
+                       }
+                       start = steady_clock::now();
+                       continue;
+               }
+
+               if (sws_ctx == nullptr || sws_last_width != frame->width || sws_last_height != frame->height) {
+                       sws_ctx.reset(
+                               sws_getContext(frame->width, frame->height, (AVPixelFormat)frame->format,
+                                       width, height, AV_PIX_FMT_RGBA,
+                                       SWS_BICUBIC, nullptr, nullptr, nullptr));
+                       sws_last_width = frame->width;
+                       sws_last_height = frame->height;
+               }
+               if (sws_ctx == nullptr) {
+                       fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
+                       return false;
+               }
+
+               VideoFormat video_format;
+               video_format.width = width;
+               video_format.height = height;
+               video_format.stride = width * 4;
+               video_format.frame_rate_nom = video_timebase.den;
+               video_format.frame_rate_den = av_frame_get_pkt_duration(frame.get()) * video_timebase.num;
+               video_format.has_signal = true;
+               video_format.is_connected = true;
+
+               const duration<double> pts(frame->pts * double(video_timebase.num) / double(video_timebase.den));
+               const steady_clock::time_point frame_start = start + duration_cast<steady_clock::duration>(pts);
+
+               FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
+               if (video_frame.data != nullptr) {
+                       uint8_t *pic_data[4] = { video_frame.data, nullptr, nullptr, nullptr };
+                       int linesizes[4] = { int(video_format.stride), 0, 0, 0 };
+                       sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
+                       video_frame.len = video_format.stride * height;
+                       video_frame.received_timestamp = frame_start;
+               }
+
+               FrameAllocator::Frame audio_frame;
+               AudioFormat audio_format;
+                audio_format.bits_per_sample = 32;
+                audio_format.num_channels = 8;
+
+               this_thread::sleep_until(frame_start);
+               frame_callback(timecode++,
+                       video_frame, 0, video_format,
+                       audio_frame, 0, audio_format);
+       }
+       return true;
+}
diff --git a/ffmpeg_capture.h b/ffmpeg_capture.h
new file mode 100644 (file)
index 0000000..8c75b4f
--- /dev/null
@@ -0,0 +1,152 @@
+#ifndef _FFMPEG_CAPTURE_H
+#define _FFMPEG_CAPTURE_H 1
+
+// FFmpegCapture looks much like a capture card, but the frames it spits out
+// come from a video in real time, looping. Because it decodes the video using
+// FFmpeg (thus the name), this means it can handle a very wide array of video
+// formats, and also things like network streaming and V4L capture, but it is
+// also significantly less integrated and optimized than the regular capture
+// cards.  In particular, the frames are always scaled and converted to 8-bit
+// RGBA on the CPU before being sent on to the GPU.
+//
+// Since we don't really know much about the video when building the chains,
+// there are some limitations. In particular, frames are always assumed to be
+// sRGB even if the video container says something else. We could probably
+// try to load the video on startup and pick out the parameters at that point
+// (which would probably also allow us to send Y'CbCr video through without
+// CPU conversion), but it would require some more plumbing, and it would also
+// fail if the file changes parameters midway, which is allowed in some formats.
+//
+// There is currently no audio support. There is also no support for changing
+// the video underway (unlike images), although there really should be.
+// Finally, there is currently no support for controlling the video from Lua.
+
+#include <assert.h>
+#include <stdint.h>
+#include <functional>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <thread>
+
+#include "bmusb/bmusb.h"
+
+class FFmpegCapture : public bmusb::CaptureInterface
+{
+public:
+       FFmpegCapture(const std::string &filename, unsigned width, unsigned height);
+       ~FFmpegCapture();
+
+       void set_card_index(int card_index)
+       {
+               this->card_index = card_index;
+       }
+
+       int get_card_index() const
+       {
+               return card_index;
+       }
+
+       // CaptureInterface.
+       void set_video_frame_allocator(bmusb::FrameAllocator *allocator) override
+       {
+               video_frame_allocator = allocator;
+               if (owned_video_frame_allocator.get() != allocator) {
+                       owned_video_frame_allocator.reset();
+               }
+       }
+
+       bmusb::FrameAllocator *get_video_frame_allocator() override
+       {
+               return video_frame_allocator;
+       }
+
+       // Does not take ownership.
+       void set_audio_frame_allocator(bmusb::FrameAllocator *allocator) override
+       {
+               audio_frame_allocator = allocator;
+               if (owned_audio_frame_allocator.get() != allocator) {
+                       owned_audio_frame_allocator.reset();
+               }
+       }
+
+       bmusb::FrameAllocator *get_audio_frame_allocator() override
+       {
+               return audio_frame_allocator;
+       }
+
+       void set_frame_callback(bmusb::frame_callback_t callback) override
+       {
+               frame_callback = callback;
+       }
+
+       void set_dequeue_thread_callbacks(std::function<void()> init, std::function<void()> cleanup) override
+       {
+               dequeue_init_callback = init;
+               dequeue_cleanup_callback = cleanup;
+               has_dequeue_callbacks = true;
+       }
+
+       std::string get_description() const override
+       {
+               return description;
+       }
+
+       void configure_card() override;
+       void start_bm_capture() override;
+       void stop_dequeue_thread() override;
+
+       // TODO: Specify error status through this.
+       bool get_disconnected() const override { return false; }
+
+       std::map<uint32_t, bmusb::VideoMode> get_available_video_modes() const;
+       void set_video_mode(uint32_t video_mode_id) override {}  // Ignore.
+       uint32_t get_current_video_mode() const override { return 0; }
+
+       std::set<bmusb::PixelFormat> get_available_pixel_formats() const override {
+               return std::set<bmusb::PixelFormat>{ bmusb::PixelFormat_8BitRGBA };
+       }
+       void set_pixel_format(bmusb::PixelFormat pixel_format) override {
+               assert(pixel_format == bmusb::PixelFormat_8BitRGBA);
+       }       
+       bmusb::PixelFormat get_current_pixel_format() const override {
+               return bmusb::PixelFormat_8BitRGBA;
+       }
+
+       std::map<uint32_t, std::string> get_available_video_inputs() const override {
+               return { { 0, "Auto" } }; }
+       void set_video_input(uint32_t video_input_id) override {}  // Ignore.
+       uint32_t get_current_video_input() const override { return 0; }
+
+       std::map<uint32_t, std::string> get_available_audio_inputs() const override {
+               return { { 0, "Embedded" } };
+       }
+       void set_audio_input(uint32_t audio_input_id) override {}  // Ignore.
+       uint32_t get_current_audio_input() const override { return 0; }
+
+private:
+       void producer_thread_func();
+       bool play_video(const std::string &pathname);
+
+       std::string description, filename;
+       uint16_t timecode = 0;
+       unsigned width, height;
+       bool running = false;
+       int card_index = -1;
+
+       bool has_dequeue_callbacks = false;
+       std::function<void()> dequeue_init_callback = nullptr;
+       std::function<void()> dequeue_cleanup_callback = nullptr;
+
+       bmusb::FrameAllocator *video_frame_allocator = nullptr;
+       bmusb::FrameAllocator *audio_frame_allocator = nullptr;
+       std::unique_ptr<bmusb::FrameAllocator> owned_video_frame_allocator;
+       std::unique_ptr<bmusb::FrameAllocator> owned_audio_frame_allocator;
+       bmusb::frame_callback_t frame_callback = nullptr;
+
+       std::atomic<bool> producer_thread_should_quit{false};
+       std::thread producer_thread;
+};
+
+#endif  // !defined(_FFMPEG_CAPTURE_H)
index 80426c2c8db8b62afaa03d8957102f14024016c6..84419f41518fc4c092df7c6afc527e23c83c5121 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -39,6 +39,7 @@
 #include "decklink_output.h"
 #include "defs.h"
 #include "disk_space_estimator.h"
+#include "ffmpeg_capture.h"
 #include "flags.h"
 #include "input_mapping.h"
 #include "pbo_frame_allocator.h"
@@ -306,10 +307,23 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
                fprintf(stderr, "Initialized %u fake cards.\n", num_fake_cards);
        }
 
+       // Initialize all video inputs the theme asked for. Note that these are
+       // all put _after_ the regular cards, which stop at <num_cards> - 1.
+       std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
+       for (unsigned video_card_index = 0; video_card_index < video_inputs.size(); ++card_index, ++video_card_index) {
+               if (card_index >= MAX_VIDEO_CARDS) {
+                       fprintf(stderr, "ERROR: Not enough card slots available for the videos the theme requested.\n");
+                       exit(1);
+               }
+               configure_card(card_index, video_inputs[video_card_index], CardType::FFMPEG_INPUT, /*output=*/nullptr);
+               video_inputs[video_card_index]->set_card_index(card_index);
+       }
+       num_video_inputs = video_inputs.size();
+
        BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
        BMUSBCapture::start_bm_thread();
 
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
                cards[card_index].queue_length_policy.reset(card_index);
        }
 
@@ -357,7 +371,7 @@ Mixer::~Mixer()
 {
        BMUSBCapture::stop_bm_thread();
 
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
                {
                        unique_lock<mutex> lock(card_mutex);
                        cards[card_index].should_quit = true;  // Unblock thread.
@@ -387,7 +401,14 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardT
                card->output.reset(output);
        }
 
-       bmusb::PixelFormat pixel_format = global_flags.ten_bit_input ? PixelFormat_10BitYCbCr : PixelFormat_8BitYCbCr;
+       bmusb::PixelFormat pixel_format;
+       if (card_type == CardType::FFMPEG_INPUT) {
+               pixel_format = bmusb::PixelFormat_8BitRGBA;
+       } else if (global_flags.ten_bit_input) {
+               pixel_format = PixelFormat_10BitYCbCr;
+       } else {
+               pixel_format = PixelFormat_8BitYCbCr;
+       }
 
        card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
        if (card->frame_allocator == nullptr) {
@@ -704,7 +725,7 @@ void Mixer::thread_func()
 
        // Start the actual capture. (We don't want to do it before we're actually ready
        // to process output frames.)
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
                if (int(card_index) != output_card_index) {
                        cards[card_index].capture->start_bm_capture();
                }
@@ -747,7 +768,7 @@ void Mixer::thread_func()
 
                handle_hotplugged_cards();
 
-               for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+               for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
                        if (card_index == master_card_index || !has_new_frame[card_index]) {
                                continue;
                        }
@@ -768,7 +789,7 @@ void Mixer::thread_func()
                        continue;
                }
 
-               for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+               for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
                        if (!has_new_frame[card_index] || new_frames[card_index].frame->len == 0)
                                continue;
 
@@ -935,7 +956,7 @@ start:
                        cards[master_card_index].new_frames.front().received_timestamp;
        }
 
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
                CaptureCard *card = &cards[card_index];
                if (input_card_is_master_clock(card_index, master_card_index)) {
                        // We don't use the queue length policy for the master card,
@@ -1064,6 +1085,12 @@ void Mixer::render_one_frame(int64_t duration)
        theme_main_chain.setup_chain();
        //theme_main_chain.chain->enable_phase_timing(true);
 
+       // The theme can't (or at least shouldn't!) call connect_signal() on
+       // each FFmpeg input, so we'll do it here.
+       for (const pair<LiveInputWrapper *, FFmpegCapture *> &conn : theme->get_signal_connections()) {
+               conn.first->connect_signal_raw(conn.second->get_card_index());
+       }
+
        // If HDMI/SDI output is active and the user has requested auto mode,
        // its mode overrides the existing Y'CbCr setting for the chain.
        YCbCrLumaCoefficients ycbcr_output_coefficients;
diff --git a/mixer.h b/mixer.h
index c4f9e6bfebb2470fc41ddd12d4bec89665f346d6..5fb195ec77793faf0a8e9d3989758caf6650b7de 100644 (file)
--- a/mixer.h
+++ b/mixer.h
@@ -336,7 +336,8 @@ private:
 
        enum class CardType {
                LIVE_CARD,
-               FAKE_CAPTURE
+               FAKE_CAPTURE,
+               FFMPEG_INPUT
        };
        void configure_card(unsigned card_index, bmusb::CaptureInterface *capture, CardType card_type, DeckLinkOutput *output);
        void set_output_card_internal(int card_index);  // Should only be called from the mixer thread.
@@ -359,7 +360,7 @@ private:
        void trim_queue(CaptureCard *card, unsigned card_index);
 
        HTTPD httpd;
-       unsigned num_cards;
+       unsigned num_cards, num_video_inputs;
 
        QSurface *mixer_surface, *h264_encoder_surface, *decklink_output_surface;
        std::unique_ptr<movit::ResourcePool> resource_pool;
index f92143e954573b5c121c05c26585b5c484f113cd..46803ba92394321412a8617bef8d34acfc082ecf 100644 (file)
--- a/theme.cpp
+++ b/theme.cpp
@@ -27,6 +27,7 @@
 
 #include "defs.h"
 #include "deinterlace_effect.h"
+#include "ffmpeg_capture.h"
 #include "flags.h"
 #include "image_input.h"
 #include "input.h"
@@ -203,7 +204,32 @@ int EffectChain_add_live_input(lua_State* L)
        bool override_bounce = checkbool(L, 2);
        bool deinterlace = checkbool(L, 3);
        bmusb::PixelFormat pixel_format = global_flags.ten_bit_input ? bmusb::PixelFormat_10BitYCbCr : bmusb::PixelFormat_8BitYCbCr;
-       return wrap_lua_object<LiveInputWrapper>(L, "LiveInputWrapper", theme, chain, pixel_format, override_bounce, deinterlace);
+
+       // Needs to be nonowned to match add_video_input (see below).
+       return wrap_lua_object_nonowned<LiveInputWrapper>(L, "LiveInputWrapper", theme, chain, pixel_format, override_bounce, deinterlace);
+}
+
+int EffectChain_add_video_input(lua_State* L)
+{
+       assert(lua_gettop(L) == 3);
+       Theme *theme = get_theme_updata(L);
+       EffectChain *chain = (EffectChain *)luaL_checkudata(L, 1, "EffectChain");
+       FFmpegCapture **capture = (FFmpegCapture **)luaL_checkudata(L, 2, "VideoInput");
+       bool deinterlace = checkbool(L, 3);
+
+       // These need to be nonowned, so that the LiveInputWrapper still exists
+       // and can feed frames to the right EffectChain even if the Lua code
+       // doesn't care about the object anymore. (If we change this, we'd need
+       // to also unregister the signal connection on __gc.)
+       int ret = wrap_lua_object_nonowned<LiveInputWrapper>(
+               L, "LiveInputWrapper", theme, chain, bmusb::PixelFormat_8BitRGBA,
+               /*override_bounce=*/false, deinterlace);
+       if (ret == 1) {
+               Theme *theme = get_theme_updata(L);
+               LiveInputWrapper **live_input = (LiveInputWrapper **)lua_touserdata(L, -1);
+               theme->register_signal_connection(*live_input, *capture);
+       }
+       return ret;
 }
 
 int EffectChain_add_effect(lua_State* L)
@@ -223,8 +249,8 @@ int EffectChain_add_effect(lua_State* L)
                vector<Effect *> inputs;
                for (int idx = 3; idx <= lua_gettop(L); ++idx) {
                        if (luaL_testudata(L, idx, "LiveInputWrapper")) {
-                               LiveInputWrapper *input = (LiveInputWrapper *)lua_touserdata(L, idx);
-                               inputs.push_back(input->get_effect());
+                               LiveInputWrapper **input = (LiveInputWrapper **)lua_touserdata(L, idx);
+                               inputs.push_back((*input)->get_effect());
                        } else {
                                inputs.push_back(get_effect(L, idx));
                        }
@@ -300,9 +326,9 @@ int EffectChain_finalize(lua_State* L)
 int LiveInputWrapper_connect_signal(lua_State* L)
 {
        assert(lua_gettop(L) == 2);
-       LiveInputWrapper *input = (LiveInputWrapper *)luaL_checkudata(L, 1, "LiveInputWrapper");
+       LiveInputWrapper **input = (LiveInputWrapper **)luaL_checkudata(L, 1, "LiveInputWrapper");
        int signal_num = luaL_checknumber(L, 2);
-       input->connect_signal(signal_num);
+       (*input)->connect_signal(signal_num);
        return 0;
 }
 
@@ -313,6 +339,19 @@ int ImageInput_new(lua_State* L)
        return wrap_lua_object_nonowned<ImageInput>(L, "ImageInput", filename);
 }
 
+int VideoInput_new(lua_State* L)
+{
+       assert(lua_gettop(L) == 1);
+       string filename = checkstdstring(L, 1);
+       int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
+       if (ret == 1) {
+               Theme *theme = get_theme_updata(L);
+               FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
+               theme->register_video_input(*capture);
+       }
+       return ret;
+}
+
 int WhiteBalanceEffect_new(lua_State* L)
 {
        assert(lua_gettop(L) == 0);
@@ -492,6 +531,7 @@ const luaL_Reg EffectChain_funcs[] = {
        { "new", EffectChain_new },
        { "__gc", EffectChain_gc },
        { "add_live_input", EffectChain_add_live_input },
+       { "add_video_input", EffectChain_add_video_input },
        { "add_effect", EffectChain_add_effect },
        { "finalize", EffectChain_finalize },
        { NULL, NULL }
@@ -511,6 +551,11 @@ const luaL_Reg ImageInput_funcs[] = {
        { NULL, NULL }
 };
 
+const luaL_Reg VideoInput_funcs[] = {
+       { "new", VideoInput_new },
+       { NULL, NULL }
+};
+
 const luaL_Reg WhiteBalanceEffect_funcs[] = {
        { "new", WhiteBalanceEffect_new },
        { "set_float", Effect_set_float },
@@ -686,7 +731,11 @@ void LiveInputWrapper::connect_signal(int signal_num)
        }
 
        signal_num = theme->map_signal(signal_num);
+       connect_signal_raw(signal_num);
+}
 
+void LiveInputWrapper::connect_signal_raw(int signal_num)
+{
        BufferedFrame first_frame = theme->input_state->buffered_frames[signal_num][0];
        if (first_frame.frame == nullptr) {
                // No data yet.
@@ -776,6 +825,7 @@ Theme::Theme(const string &filename, const vector<string> &search_dirs, Resource
        register_class("EffectChain", EffectChain_funcs); 
        register_class("LiveInputWrapper", LiveInputWrapper_funcs); 
        register_class("ImageInput", ImageInput_funcs);
+       register_class("VideoInput", VideoInput_funcs);
        register_class("WhiteBalanceEffect", WhiteBalanceEffect_funcs);
        register_class("ResampleEffect", ResampleEffect_funcs);
        register_class("PaddingEffect", PaddingEffect_funcs);
diff --git a/theme.h b/theme.h
index 5580909f549396fb891f6c0f3b82304c3847394c..b04050e51d05a6331b21d65409adda3311a61fa0 100644 (file)
--- a/theme.h
+++ b/theme.h
@@ -15,6 +15,8 @@
 #include "ref_counted_frame.h"
 #include "tweaked_inputs.h"
 
+class FFmpegCapture;
+class LiveInputWrapper;
 struct InputState;
 
 namespace movit {
@@ -54,6 +56,27 @@ public:
 
        movit::ResourcePool *get_resource_pool() const { return resource_pool; }
 
+       // Should be called as part of VideoInput.new() only.
+       void register_video_input(FFmpegCapture *capture)
+       {
+               video_inputs.push_back(capture);
+       }
+
+       std::vector<FFmpegCapture *> get_video_inputs() const
+       {
+               return video_inputs;
+       }
+
+       void register_signal_connection(LiveInputWrapper *live_input, FFmpegCapture *capture)
+       {
+               signal_connections.emplace_back(live_input, capture);
+       }
+
+       std::vector<std::pair<LiveInputWrapper *, FFmpegCapture *>> get_signal_connections() const
+       {
+               return signal_connections;
+       }
+
 private:
        void register_class(const char *class_name, const luaL_Reg *funcs);
 
@@ -67,6 +90,9 @@ private:
        std::mutex map_m;
        std::map<int, int> signal_to_card_mapping;  // Protected by <map_m>.
 
+       std::vector<FFmpegCapture *> video_inputs;
+       std::vector<std::pair<LiveInputWrapper *, FFmpegCapture *>> signal_connections;
+
        friend class LiveInputWrapper;
 };
 
@@ -81,6 +107,7 @@ public:
        LiveInputWrapper(Theme *theme, movit::EffectChain *chain, bmusb::PixelFormat pixel_format, bool override_bounce, bool deinterlace);
 
        void connect_signal(int signal_num);
+       void connect_signal_raw(int signal_num);
        movit::Effect *get_effect() const
        {
                if (deinterlace) {