]> git.sesse.net Git - nageru/commitdiff
Start pulling video orchestration logic into VideoEncoder.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Sat, 23 Apr 2016 20:16:20 +0000 (22:16 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Sat, 23 Apr 2016 20:16:20 +0000 (22:16 +0200)
Makefile
mixer.cpp
mixer.h
video_encoder.cpp [new file with mode: 0644]
video_encoder.h [new file with mode: 0644]

index 531362f98cad13c7120da35ce91966ed104d1485..71a110c1a8fcb2934292d53fee3a2f8ddb8faf4a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,10 @@ OBJS=glwidget.o main.o mainwindow.o vumeter.o lrameter.o vu_common.o correlation
 OBJS += glwidget.moc.o mainwindow.moc.o vumeter.moc.o lrameter.moc.o correlation_meter.moc.o aboutdialog.moc.o
 
 # Mixer objects
-OBJS += quicksync_encoder.o x264_encoder.o mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o metacube2.o httpd.o mux.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o
+OBJS += mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o httpd.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o
+
+# Streaming and encoding objects
+OBJS += quicksync_encoder.o x264_encoder.o video_encoder.o metacube2.o mux.o
 
 # DeckLink
 OBJS += decklink_capture.o decklink/DeckLinkAPIDispatch.o
index efd4952b7e565f379128673e11f2a7089d46c644..dc9eea63fc0473f99d9260bffb8c99bb10aa485d 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -33,7 +33,7 @@
 #include "decklink_capture.h"
 #include "defs.h"
 #include "flags.h"
-#include "quicksync_encoder.h"
+#include "video_encoder.h"
 #include "pbo_frame_allocator.h"
 #include "ref_counted_gl_sync.h"
 #include "timebase.h"
@@ -92,23 +92,6 @@ void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced
        }
 }
 
-string generate_local_dump_filename(int frame)
-{
-       time_t now = time(NULL);
-       tm now_tm;
-       localtime_r(&now, &now_tm);
-
-       char timestamp[256];
-       strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
-
-       // Use the frame number to disambiguate between two cuts starting
-       // on the same second.
-       char filename[256];
-       snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
-               LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
-       return filename;
-}
-
 }  // namespace
 
 void QueueLengthPolicy::update_policy(int queue_length)
@@ -174,10 +157,9 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
        display_chain->set_dither_bits(0);  // Don't bother.
        display_chain->finalize();
 
-       quicksync_encoder.reset(new QuickSyncEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
-       quicksync_encoder->open_output_file(generate_local_dump_filename(/*frame=*/0).c_str());
+       video_encoder.reset(new VideoEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
 
-       // Start listening for clients only once H264Encoder has written its header, if any.
+       // Start listening for clients only once VideoEncoder has written its header, if any.
        httpd.start(9095);
 
        // First try initializing the PCI devices, then USB, until we have the desired number of cards.
@@ -292,7 +274,7 @@ Mixer::~Mixer()
                cards[card_index].capture->stop_dequeue_thread();
        }
 
-       quicksync_encoder.reset(nullptr);
+       video_encoder.reset(nullptr);
 }
 
 void Mixer::configure_card(unsigned card_index, const QSurfaceFormat &format, CaptureInterface *capture)
@@ -692,12 +674,7 @@ void Mixer::thread_func()
                }
 
                if (should_cut.exchange(false)) {  // Test and clear.
-                       string filename = generate_local_dump_filename(frame);
-                       printf("Starting new recording: %s\n", filename.c_str());
-                       quicksync_encoder->close_output_file();
-                       quicksync_encoder->shutdown();
-                       quicksync_encoder.reset(new QuickSyncEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
-                       quicksync_encoder->open_output_file(filename.c_str());
+                       video_encoder->do_cut(frame);
                }
 
 #if 0
@@ -784,7 +761,7 @@ void Mixer::render_one_frame(int64_t duration)
        //theme_main_chain.chain->enable_phase_timing(true);
 
        GLuint y_tex, cbcr_tex;
-       bool got_frame = quicksync_encoder->begin_frame(&y_tex, &cbcr_tex);
+       bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
        assert(got_frame);
 
        // Render main chain.
@@ -806,7 +783,7 @@ void Mixer::render_one_frame(int64_t duration)
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
 
        const int64_t av_delay = TIMEBASE / 10;  // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
-       RefCountedGLsync fence = quicksync_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
+       RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
 
        // The live frame just shows the RGBA texture we just rendered.
        // It owns rgba_tex now.
@@ -1027,7 +1004,7 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
        }
 
        // And finally add them to the output.
-       quicksync_encoder->add_audio(frame_pts_int, move(samples_out));
+       video_encoder->add_audio(frame_pts_int, move(samples_out));
 }
 
 void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
diff --git a/mixer.h b/mixer.h
index 552aad533471458ee60b8ef40552a477172eb3ea..cb635f631c9a401f1ae6d484ce3898972153484f 100644 (file)
--- a/mixer.h
+++ b/mixer.h
@@ -24,7 +24,7 @@
 #include "bmusb/bmusb.h"
 #include "alsa_output.h"
 #include "ebu_r128_proc.h"
-#include "quicksync_encoder.h"
+#include "video_encoder.h"
 #include "httpd.h"
 #include "pbo_frame_allocator.h"
 #include "ref_counted_frame.h"
@@ -376,7 +376,7 @@ private:
        GLuint cbcr_program_num;  // Owned by <resource_pool>.
        GLuint cbcr_vbo;  // Holds position and texcoord data.
        GLuint cbcr_position_attribute_index, cbcr_texcoord_attribute_index;
-       std::unique_ptr<QuickSyncEncoder> quicksync_encoder;
+       std::unique_ptr<VideoEncoder> video_encoder;
 
        // Effects part of <display_chain>. Owned by <display_chain>.
        movit::FlatInput *display_input;
diff --git a/video_encoder.cpp b/video_encoder.cpp
new file mode 100644 (file)
index 0000000..39bc264
--- /dev/null
@@ -0,0 +1,66 @@
+#include "video_encoder.h"
+
+#include <string>
+
+#include "defs.h"
+#include "quicksync_encoder.h"
+
+using namespace std;
+
+namespace {
+
+string generate_local_dump_filename(int frame)
+{
+       time_t now = time(NULL);
+       tm now_tm;
+       localtime_r(&now, &now_tm);
+
+       char timestamp[256];
+       strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
+
+       // Use the frame number to disambiguate between two cuts starting
+       // on the same second.
+       char filename[256];
+       snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
+               LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
+       return filename;
+}
+
+}  // namespace
+
+VideoEncoder::VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd)
+       : surface(surface), va_display(va_display), width(width), height(height), httpd(httpd)
+{
+       quicksync_encoder.reset(new QuickSyncEncoder(surface, va_display, width, height, httpd));
+       quicksync_encoder->open_output_file(generate_local_dump_filename(/*frame=*/0).c_str());
+}
+
+VideoEncoder::~VideoEncoder()
+{
+       quicksync_encoder.reset(nullptr);
+}
+
+void VideoEncoder::do_cut(int frame)
+{
+       string filename = generate_local_dump_filename(frame);
+       printf("Starting new recording: %s\n", filename.c_str());
+       quicksync_encoder->close_output_file();
+       quicksync_encoder->shutdown();
+       quicksync_encoder.reset(new QuickSyncEncoder(surface, va_display, width, height, httpd));
+       quicksync_encoder->open_output_file(filename.c_str());
+}
+
+void VideoEncoder::add_audio(int64_t pts, std::vector<float> audio)
+{
+       quicksync_encoder->add_audio(pts, audio);
+}
+
+bool VideoEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+{
+       return quicksync_encoder->begin_frame(y_tex, cbcr_tex);
+}
+
+RefCountedGLsync VideoEncoder::end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames)
+{
+       return quicksync_encoder->end_frame(pts, duration, input_frames);
+}
diff --git a/video_encoder.h b/video_encoder.h
new file mode 100644 (file)
index 0000000..1a5c97d
--- /dev/null
@@ -0,0 +1,40 @@
+// A class to orchestrate the concept of video encoding. Will keep track of
+// the muxes to stream and disk, the QuickSyncEncoder, and also the X264Encoder
+// (for the stream) if there is one.
+
+#ifndef _VIDEO_ENCODER_H
+#define _VIDEO_ENCODER_H
+
+#include <stdint.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ref_counted_frame.h"
+#include "ref_counted_gl_sync.h"
+
+class HTTPD;
+class QSurface;
+class QuickSyncEncoder;
+
+class VideoEncoder {
+public:
+       VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd);
+       ~VideoEncoder();
+
+       void add_audio(int64_t pts, std::vector<float> audio);
+       bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
+       RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames);
+
+       // Does a cut of the disk stream immediately ("frame" is used for the filename only).
+       void do_cut(int frame);
+
+private:
+       std::unique_ptr<QuickSyncEncoder> quicksync_encoder;
+       QSurface *surface;
+       std::string va_display;
+       int width, height;
+       HTTPD *httpd;
+};
+
+#endif