From c4c5e2df2b1ef88fa20b71ae981ae6462d84b63c Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Sat, 23 Apr 2016 22:16:20 +0200 Subject: [PATCH] Start pulling video orchestration logic into VideoEncoder. --- Makefile | 5 +++- mixer.cpp | 39 ++++++---------------------- mixer.h | 4 +-- video_encoder.cpp | 66 +++++++++++++++++++++++++++++++++++++++++++++++ video_encoder.h | 40 ++++++++++++++++++++++++++++ 5 files changed, 120 insertions(+), 34 deletions(-) create mode 100644 video_encoder.cpp create mode 100644 video_encoder.h diff --git a/Makefile b/Makefile index 531362f..71a110c 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,10 @@ OBJS=glwidget.o main.o mainwindow.o vumeter.o lrameter.o vu_common.o correlation OBJS += glwidget.moc.o mainwindow.moc.o vumeter.moc.o lrameter.moc.o correlation_meter.moc.o aboutdialog.moc.o # Mixer objects -OBJS += quicksync_encoder.o x264_encoder.o mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o metacube2.o httpd.o mux.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o +OBJS += mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o httpd.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o + +# Streaming and encoding objects +OBJS += quicksync_encoder.o x264_encoder.o video_encoder.o metacube2.o mux.o # DeckLink OBJS += decklink_capture.o decklink/DeckLinkAPIDispatch.o diff --git a/mixer.cpp b/mixer.cpp index efd4952..dc9eea6 100644 --- a/mixer.cpp +++ b/mixer.cpp @@ -33,7 +33,7 @@ #include "decklink_capture.h" #include "defs.h" #include "flags.h" -#include "quicksync_encoder.h" +#include "video_encoder.h" #include "pbo_frame_allocator.h" #include "ref_counted_gl_sync.h" #include "timebase.h" @@ -92,23 +92,6 @@ void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced } } -string generate_local_dump_filename(int frame) -{ - time_t now = time(NULL); - tm now_tm; - localtime_r(&now, &now_tm); - - char timestamp[256]; - strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm); - - // Use the frame number to disambiguate between two cuts starting - // on the same second. - char filename[256]; - snprintf(filename, sizeof(filename), "%s%s-f%02d%s", - LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX); - return filename; -} - } // namespace void QueueLengthPolicy::update_policy(int queue_length) @@ -174,10 +157,9 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) display_chain->set_dither_bits(0); // Don't bother. display_chain->finalize(); - quicksync_encoder.reset(new QuickSyncEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd)); - quicksync_encoder->open_output_file(generate_local_dump_filename(/*frame=*/0).c_str()); + video_encoder.reset(new VideoEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd)); - // Start listening for clients only once H264Encoder has written its header, if any. + // Start listening for clients only once VideoEncoder has written its header, if any. httpd.start(9095); // First try initializing the PCI devices, then USB, until we have the desired number of cards. @@ -292,7 +274,7 @@ Mixer::~Mixer() cards[card_index].capture->stop_dequeue_thread(); } - quicksync_encoder.reset(nullptr); + video_encoder.reset(nullptr); } void Mixer::configure_card(unsigned card_index, const QSurfaceFormat &format, CaptureInterface *capture) @@ -692,12 +674,7 @@ void Mixer::thread_func() } if (should_cut.exchange(false)) { // Test and clear. - string filename = generate_local_dump_filename(frame); - printf("Starting new recording: %s\n", filename.c_str()); - quicksync_encoder->close_output_file(); - quicksync_encoder->shutdown(); - quicksync_encoder.reset(new QuickSyncEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd)); - quicksync_encoder->open_output_file(filename.c_str()); + video_encoder->do_cut(frame); } #if 0 @@ -784,7 +761,7 @@ void Mixer::render_one_frame(int64_t duration) //theme_main_chain.chain->enable_phase_timing(true); GLuint y_tex, cbcr_tex; - bool got_frame = quicksync_encoder->begin_frame(&y_tex, &cbcr_tex); + bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex); assert(got_frame); // Render main chain. @@ -806,7 +783,7 @@ void Mixer::render_one_frame(int64_t duration) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded. - RefCountedGLsync fence = quicksync_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames); + RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames); // The live frame just shows the RGBA texture we just rendered. // It owns rgba_tex now. @@ -1027,7 +1004,7 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples) } // And finally add them to the output. - quicksync_encoder->add_audio(frame_pts_int, move(samples_out)); + video_encoder->add_audio(frame_pts_int, move(samples_out)); } void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex) diff --git a/mixer.h b/mixer.h index 552aad5..cb635f6 100644 --- a/mixer.h +++ b/mixer.h @@ -24,7 +24,7 @@ #include "bmusb/bmusb.h" #include "alsa_output.h" #include "ebu_r128_proc.h" -#include "quicksync_encoder.h" +#include "video_encoder.h" #include "httpd.h" #include "pbo_frame_allocator.h" #include "ref_counted_frame.h" @@ -376,7 +376,7 @@ private: GLuint cbcr_program_num; // Owned by . GLuint cbcr_vbo; // Holds position and texcoord data. GLuint cbcr_position_attribute_index, cbcr_texcoord_attribute_index; - std::unique_ptr quicksync_encoder; + std::unique_ptr video_encoder; // Effects part of . Owned by . movit::FlatInput *display_input; diff --git a/video_encoder.cpp b/video_encoder.cpp new file mode 100644 index 0000000..39bc264 --- /dev/null +++ b/video_encoder.cpp @@ -0,0 +1,66 @@ +#include "video_encoder.h" + +#include + +#include "defs.h" +#include "quicksync_encoder.h" + +using namespace std; + +namespace { + +string generate_local_dump_filename(int frame) +{ + time_t now = time(NULL); + tm now_tm; + localtime_r(&now, &now_tm); + + char timestamp[256]; + strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm); + + // Use the frame number to disambiguate between two cuts starting + // on the same second. + char filename[256]; + snprintf(filename, sizeof(filename), "%s%s-f%02d%s", + LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX); + return filename; +} + +} // namespace + +VideoEncoder::VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd) + : surface(surface), va_display(va_display), width(width), height(height), httpd(httpd) +{ + quicksync_encoder.reset(new QuickSyncEncoder(surface, va_display, width, height, httpd)); + quicksync_encoder->open_output_file(generate_local_dump_filename(/*frame=*/0).c_str()); +} + +VideoEncoder::~VideoEncoder() +{ + quicksync_encoder.reset(nullptr); +} + +void VideoEncoder::do_cut(int frame) +{ + string filename = generate_local_dump_filename(frame); + printf("Starting new recording: %s\n", filename.c_str()); + quicksync_encoder->close_output_file(); + quicksync_encoder->shutdown(); + quicksync_encoder.reset(new QuickSyncEncoder(surface, va_display, width, height, httpd)); + quicksync_encoder->open_output_file(filename.c_str()); +} + +void VideoEncoder::add_audio(int64_t pts, std::vector audio) +{ + quicksync_encoder->add_audio(pts, audio); +} + +bool VideoEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) +{ + return quicksync_encoder->begin_frame(y_tex, cbcr_tex); +} + +RefCountedGLsync VideoEncoder::end_frame(int64_t pts, int64_t duration, const std::vector &input_frames) +{ + return quicksync_encoder->end_frame(pts, duration, input_frames); +} diff --git a/video_encoder.h b/video_encoder.h new file mode 100644 index 0000000..1a5c97d --- /dev/null +++ b/video_encoder.h @@ -0,0 +1,40 @@ +// A class to orchestrate the concept of video encoding. Will keep track of +// the muxes to stream and disk, the QuickSyncEncoder, and also the X264Encoder +// (for the stream) if there is one. + +#ifndef _VIDEO_ENCODER_H +#define _VIDEO_ENCODER_H + +#include +#include +#include +#include + +#include "ref_counted_frame.h" +#include "ref_counted_gl_sync.h" + +class HTTPD; +class QSurface; +class QuickSyncEncoder; + +class VideoEncoder { +public: + VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd); + ~VideoEncoder(); + + void add_audio(int64_t pts, std::vector audio); + bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex); + RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector &input_frames); + + // Does a cut of the disk stream immediately ("frame" is used for the filename only). + void do_cut(int frame); + +private: + std::unique_ptr quicksync_encoder; + QSurface *surface; + std::string va_display; + int width, height; + HTTPD *httpd; +}; + +#endif -- 2.39.2