class QOpenGLContext;
class QSurface;
+namespace {
+
+// These need to survive several QuickSyncEncoderImpl instances,
+// so they are outside.
+once_flag quick_sync_metrics_inited;
+LatencyHistogram mixer_latency_histogram, qs_latency_histogram;
+MuxMetrics current_file_mux_metrics, total_mux_metrics;
+std::atomic<double> metric_current_file_start_time_seconds{0.0 / 0.0};
+std::atomic<int64_t> metric_quick_sync_stalled_frames{0};
+
+} // namespace
+
#define CHECK_VASTATUS(va_status, func) \
if (va_status != VA_STATUS_SUCCESS) { \
fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
} else {
use_zerocopy = true;
}
+ global_flags.use_zerocopy = use_zerocopy;
}
VADisplay QuickSyncEncoderImpl::va_open_display(const string &va_display)
gl_surfaces[i].y_tex = resource_pool->create_2d_texture(GL_R8, 1, 1);
gl_surfaces[i].cbcr_tex = resource_pool->create_2d_texture(GL_RG8, 1, 1);
} else {
- gl_surfaces[i].y_tex = resource_pool->create_2d_texture(GL_R8, frame_width, frame_height);
- gl_surfaces[i].cbcr_tex = resource_pool->create_2d_texture(GL_RG8, frame_width / 2, frame_height / 2);
+ size_t bytes_per_pixel = (global_flags.x264_bit_depth > 8) ? 2 : 1;
// Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API
// buffers, due to potentially differing pitch.
glGenBuffers(1, &gl_surfaces[i].pbo);
glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
- glBufferStorage(GL_PIXEL_PACK_BUFFER, frame_width * frame_height * 2, nullptr, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
- uint8_t *ptr = (uint8_t *)glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, frame_width * frame_height * 2, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ glBufferStorage(GL_PIXEL_PACK_BUFFER, frame_width * frame_height * 2 * bytes_per_pixel, nullptr, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+ uint8_t *ptr = (uint8_t *)glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, frame_width * frame_height * 2 * bytes_per_pixel, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
gl_surfaces[i].y_offset = 0;
- gl_surfaces[i].cbcr_offset = frame_width * frame_height;
+ gl_surfaces[i].cbcr_offset = frame_width * frame_height * bytes_per_pixel;
gl_surfaces[i].y_ptr = ptr + gl_surfaces[i].y_offset;
gl_surfaces[i].cbcr_ptr = ptr + gl_surfaces[i].cbcr_offset;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
vaUnmapBuffer(va_dpy, surf->coded_buf);
static int frameno = 0;
- print_latency("Current QuickSync latency (video inputs → disk mux):",
- task.received_ts, (task.frame_type == FRAME_B), &frameno);
+ print_latency("Current Quick Sync latency (video inputs → disk mux):",
+ task.received_ts, (task.frame_type == FRAME_B), &frameno, &qs_latency_histogram);
{
// Add video.
}
for (unsigned i = 0; i < SURFACE_NUM; i++) {
- if (!use_zerocopy) {
+ if (use_zerocopy) {
+ resource_pool->release_2d_texture(gl_surfaces[i].y_tex);
+ resource_pool->release_2d_texture(gl_surfaces[i].cbcr_tex);
+ } else {
glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glDeleteBuffers(1, &gl_surfaces[i].pbo);
}
- resource_pool->release_2d_texture(gl_surfaces[i].y_tex);
- resource_pool->release_2d_texture(gl_surfaces[i].cbcr_tex);
}
has_released_gl_resources = true;
memset(&slice_param, 0, sizeof(slice_param));
}
+ call_once(quick_sync_metrics_inited, [](){
+ mixer_latency_histogram.init("mixer");
+ qs_latency_histogram.init("quick_sync");
+ current_file_mux_metrics.init({{ "destination", "current_file" }});
+ total_mux_metrics.init({{ "destination", "files_total" }});
+ global_metrics.add("current_file_start_time_seconds", &metric_current_file_start_time_seconds, Metrics::TYPE_GAUGE);
+ global_metrics.add("quick_sync_stalled_frames", &metric_quick_sync_stalled_frames);
+ });
+
storage_thread = thread(&QuickSyncEncoderImpl::storage_task_thread, this);
encode_thread = thread([this]{
}
}
+bool QuickSyncEncoderImpl::is_zerocopy() const
+{
+ return use_zerocopy;
+}
+
bool QuickSyncEncoderImpl::begin_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients ycbcr_coefficients, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
{
assert(!is_shutdown);
if (surf == nullptr) {
fprintf(stderr, "Warning: No free slots for frame %d, rendering has to wait for H.264 encoder\n",
current_storage_frame);
+ ++metric_quick_sync_stalled_frames;
storage_task_queue_changed.wait(lock, [this, &surf]{
if (storage_thread_should_quit)
return true;
surface_for_frame[current_storage_frame] = surf;
}
- *y_tex = surf->y_tex;
- *cbcr_tex = surf->cbcr_tex;
+ if (use_zerocopy) {
+ *y_tex = surf->y_tex;
+ *cbcr_tex = surf->cbcr_tex;
+ } else {
+ surf->y_tex = *y_tex;
+ surf->cbcr_tex = *cbcr_tex;
+ }
if (!global_flags.x264_video_to_disk) {
VAStatus va_status = vaDeriveImage(va_dpy, surf->src_surface, &surf->surface_image);
assert(!is_shutdown);
if (!use_zerocopy) {
+ GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
GLSurface *surf;
{
unique_lock<mutex> lock(storage_task_queue_mutex);
glBindTexture(GL_TEXTURE_2D, surf->y_tex);
check_error();
- glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->y_offset));
+ glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, type, BUFFER_OFFSET(surf->y_offset));
check_error();
glBindTexture(GL_TEXTURE_2D, surf->cbcr_tex);
check_error();
- glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->cbcr_offset));
+ glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, type, BUFFER_OFFSET(surf->cbcr_offset));
check_error();
+ // We don't own these; the caller does.
+ surf->y_tex = surf->cbcr_tex = 0;
+
glBindTexture(GL_TEXTURE_2D, 0);
check_error();
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
void QuickSyncEncoderImpl::close_file()
{
file_mux.reset();
+ metric_current_file_start_time_seconds = 0.0 / 0.0;
}
void QuickSyncEncoderImpl::open_output_file(const std::string &filename)
video_extradata = x264_encoder->get_global_headers();
}
+ current_file_mux_metrics.reset();
+
AVCodecParametersWithDeleter audio_codecpar = file_audio_encoder->get_codec_parameters();
file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, audio_codecpar.get(), TIMEBASE,
- std::bind(&DiskSpaceEstimator::report_write, disk_space_estimator, filename, _1)));
+ std::bind(&DiskSpaceEstimator::report_write, disk_space_estimator, filename, _1),
+ { ¤t_file_mux_metrics, &total_mux_metrics }));
+ metric_current_file_start_time_seconds = get_timestamp_for_metrics();
if (global_flags.x264_video_to_disk) {
x264_encoder->add_mux(file_mux.get());
ReceivedTimestamps received_ts = find_received_timestamp(frame.input_frames);
static int frameno = 0;
print_latency("Current mixer latency (video inputs → ready for encode):",
- received_ts, false, &frameno);
+ received_ts, false, &frameno, &mixer_latency_histogram);
// Release back any input frames we needed to render this frame.
frame.input_frames.clear();
impl->add_audio(pts, audio);
}
+bool QuickSyncEncoder::is_zerocopy() const
+{
+ return impl->is_zerocopy();
+}
+
bool QuickSyncEncoder::begin_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients ycbcr_coefficients, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
{
return impl->begin_frame(pts, duration, ycbcr_coefficients, input_frames, y_tex, cbcr_tex);