OBJS += glwidget.moc.o mainwindow.moc.o vumeter.moc.o lrameter.moc.o correlation_meter.moc.o aboutdialog.moc.o
# Mixer objects
-OBJS += quicksync_encoder.o x264_encoder.o mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o metacube2.o httpd.o mux.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o
+OBJS += mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o httpd.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o
+
+# Streaming and encoding objects
+OBJS += quicksync_encoder.o x264_encoder.o video_encoder.o metacube2.o mux.o
# DeckLink
OBJS += decklink_capture.o decklink/DeckLinkAPIDispatch.o
#include "decklink_capture.h"
#include "defs.h"
#include "flags.h"
-#include "quicksync_encoder.h"
+#include "video_encoder.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_gl_sync.h"
#include "timebase.h"
}
}
-string generate_local_dump_filename(int frame)
-{
- time_t now = time(NULL);
- tm now_tm;
- localtime_r(&now, &now_tm);
-
- char timestamp[256];
- strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
-
- // Use the frame number to disambiguate between two cuts starting
- // on the same second.
- char filename[256];
- snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
- LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
- return filename;
-}
-
} // namespace
void QueueLengthPolicy::update_policy(int queue_length)
display_chain->set_dither_bits(0); // Don't bother.
display_chain->finalize();
- quicksync_encoder.reset(new QuickSyncEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
- quicksync_encoder->open_output_file(generate_local_dump_filename(/*frame=*/0).c_str());
+ video_encoder.reset(new VideoEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
- // Start listening for clients only once H264Encoder has written its header, if any.
+ // Start listening for clients only once VideoEncoder has written its header, if any.
httpd.start(9095);
// First try initializing the PCI devices, then USB, until we have the desired number of cards.
cards[card_index].capture->stop_dequeue_thread();
}
- quicksync_encoder.reset(nullptr);
+ video_encoder.reset(nullptr);
}
void Mixer::configure_card(unsigned card_index, const QSurfaceFormat &format, CaptureInterface *capture)
}
if (should_cut.exchange(false)) { // Test and clear.
- string filename = generate_local_dump_filename(frame);
- printf("Starting new recording: %s\n", filename.c_str());
- quicksync_encoder->close_output_file();
- quicksync_encoder->shutdown();
- quicksync_encoder.reset(new QuickSyncEncoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
- quicksync_encoder->open_output_file(filename.c_str());
+ video_encoder->do_cut(frame);
}
#if 0
//theme_main_chain.chain->enable_phase_timing(true);
GLuint y_tex, cbcr_tex;
- bool got_frame = quicksync_encoder->begin_frame(&y_tex, &cbcr_tex);
+ bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
assert(got_frame);
// Render main chain.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
- RefCountedGLsync fence = quicksync_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
+ RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
// The live frame just shows the RGBA texture we just rendered.
// It owns rgba_tex now.
}
// And finally add them to the output.
- quicksync_encoder->add_audio(frame_pts_int, move(samples_out));
+ video_encoder->add_audio(frame_pts_int, move(samples_out));
}
void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
#include "bmusb/bmusb.h"
#include "alsa_output.h"
#include "ebu_r128_proc.h"
-#include "quicksync_encoder.h"
+#include "video_encoder.h"
#include "httpd.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_frame.h"
GLuint cbcr_program_num; // Owned by <resource_pool>.
GLuint cbcr_vbo; // Holds position and texcoord data.
GLuint cbcr_position_attribute_index, cbcr_texcoord_attribute_index;
- std::unique_ptr<QuickSyncEncoder> quicksync_encoder;
+ std::unique_ptr<VideoEncoder> video_encoder;
// Effects part of <display_chain>. Owned by <display_chain>.
movit::FlatInput *display_input;
--- /dev/null
+#include "video_encoder.h"
+
+#include <string>
+
+#include "defs.h"
+#include "quicksync_encoder.h"
+
+using namespace std;
+
+namespace {
+
+string generate_local_dump_filename(int frame)
+{
+ time_t now = time(NULL);
+ tm now_tm;
+ localtime_r(&now, &now_tm);
+
+ char timestamp[256];
+ strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
+
+ // Use the frame number to disambiguate between two cuts starting
+ // on the same second.
+ char filename[256];
+ snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
+ LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
+ return filename;
+}
+
+} // namespace
+
+VideoEncoder::VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd)
+ : surface(surface), va_display(va_display), width(width), height(height), httpd(httpd)
+{
+ quicksync_encoder.reset(new QuickSyncEncoder(surface, va_display, width, height, httpd));
+ quicksync_encoder->open_output_file(generate_local_dump_filename(/*frame=*/0).c_str());
+}
+
+VideoEncoder::~VideoEncoder()
+{
+ quicksync_encoder.reset(nullptr);
+}
+
+void VideoEncoder::do_cut(int frame)
+{
+ string filename = generate_local_dump_filename(frame);
+ printf("Starting new recording: %s\n", filename.c_str());
+ quicksync_encoder->close_output_file();
+ quicksync_encoder->shutdown();
+ quicksync_encoder.reset(new QuickSyncEncoder(surface, va_display, width, height, httpd));
+ quicksync_encoder->open_output_file(filename.c_str());
+}
+
+void VideoEncoder::add_audio(int64_t pts, std::vector<float> audio)
+{
+ quicksync_encoder->add_audio(pts, audio);
+}
+
+bool VideoEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+{
+ return quicksync_encoder->begin_frame(y_tex, cbcr_tex);
+}
+
+RefCountedGLsync VideoEncoder::end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames)
+{
+ return quicksync_encoder->end_frame(pts, duration, input_frames);
+}
--- /dev/null
+// A class to orchestrate the concept of video encoding. Will keep track of
+// the muxes to stream and disk, the QuickSyncEncoder, and also the X264Encoder
+// (for the stream) if there is one.
+
+#ifndef _VIDEO_ENCODER_H
+#define _VIDEO_ENCODER_H
+
+#include <stdint.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ref_counted_frame.h"
+#include "ref_counted_gl_sync.h"
+
+class HTTPD;
+class QSurface;
+class QuickSyncEncoder;
+
+class VideoEncoder {
+public:
+ VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd);
+ ~VideoEncoder();
+
+ void add_audio(int64_t pts, std::vector<float> audio);
+ bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
+ RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames);
+
+ // Does a cut of the disk stream immediately ("frame" is used for the filename only).
+ void do_cut(int frame);
+
+private:
+ std::unique_ptr<QuickSyncEncoder> quicksync_encoder;
+ QSurface *surface;
+ std::string va_display;
+ int width, height;
+ HTTPD *httpd;
+};
+
+#endif