--- /dev/null
- global_httpd->set_header(stream_mux_header);
+#include "video_stream.h"
+
+extern "C" {
+#include <libavformat/avformat.h>
+#include <libavformat/avio.h>
+}
+
+#include "chroma_subsampler.h"
+#include "shared/context.h"
+#include "flags.h"
+#include "flow.h"
+#include "shared/httpd.h"
+#include "jpeg_frame_view.h"
+#include "movit/util.h"
+#include "shared/mux.h"
+#include "player.h"
+#include "util.h"
+#include "ycbcr_converter.h"
+
+#include <epoxy/glx.h>
+#include <jpeglib.h>
+#include <unistd.h>
+
+using namespace std;
+using namespace std::chrono;
+
+extern HTTPD *global_httpd;
+
+struct VectorDestinationManager {
+ jpeg_destination_mgr pub;
+ std::vector<uint8_t> dest;
+
+ VectorDestinationManager()
+ {
+ pub.init_destination = init_destination_thunk;
+ pub.empty_output_buffer = empty_output_buffer_thunk;
+ pub.term_destination = term_destination_thunk;
+ }
+
+ static void init_destination_thunk(j_compress_ptr ptr)
+ {
+ ((VectorDestinationManager *)(ptr->dest))->init_destination();
+ }
+
+ inline void init_destination()
+ {
+ make_room(0);
+ }
+
+ static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
+ {
+ return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
+ }
+
+ inline bool empty_output_buffer()
+ {
+ make_room(dest.size()); // Should ignore pub.free_in_buffer!
+ return true;
+ }
+
+ inline void make_room(size_t bytes_used)
+ {
+ dest.resize(bytes_used + 4096);
+ dest.resize(dest.capacity());
+ pub.next_output_byte = dest.data() + bytes_used;
+ pub.free_in_buffer = dest.size() - bytes_used;
+ }
+
+ static void term_destination_thunk(j_compress_ptr ptr)
+ {
+ ((VectorDestinationManager *)(ptr->dest))->term_destination();
+ }
+
+ inline void term_destination()
+ {
+ dest.resize(dest.size() - pub.free_in_buffer);
+ }
+};
+static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
+
+vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
+{
+ VectorDestinationManager dest;
+
+ jpeg_compress_struct cinfo;
+ jpeg_error_mgr jerr;
+ cinfo.err = jpeg_std_error(&jerr);
+ jpeg_create_compress(&cinfo);
+
+ cinfo.dest = (jpeg_destination_mgr *)&dest;
+ cinfo.input_components = 3;
+ cinfo.in_color_space = JCS_RGB;
+ jpeg_set_defaults(&cinfo);
+ constexpr int quality = 90;
+ jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
+
+ cinfo.image_width = width;
+ cinfo.image_height = height;
+ cinfo.raw_data_in = true;
+ jpeg_set_colorspace(&cinfo, JCS_YCbCr);
+ cinfo.comp_info[0].h_samp_factor = 2;
+ cinfo.comp_info[0].v_samp_factor = 1;
+ cinfo.comp_info[1].h_samp_factor = 1;
+ cinfo.comp_info[1].v_samp_factor = 1;
+ cinfo.comp_info[2].h_samp_factor = 1;
+ cinfo.comp_info[2].v_samp_factor = 1;
+ cinfo.CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though.
+ jpeg_start_compress(&cinfo, true);
+
+ JSAMPROW yptr[8], cbptr[8], crptr[8];
+ JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+ for (unsigned y = 0; y < height; y += 8) {
+ for (unsigned yy = 0; yy < 8; ++yy) {
+ yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
+ cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
+ crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
+ }
+
+ jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
+ }
+
+ jpeg_finish_compress(&cinfo);
+ jpeg_destroy_compress(&cinfo);
+
+ return move(dest.dest);
+}
+
+VideoStream::VideoStream()
+{
+ ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
+ ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
+
+ GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
+ GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
+ GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
+
+ glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex);
+ glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex);
+ glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex);
+ glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex);
+ glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex);
+ glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
+ check_error();
+
+ constexpr size_t width = 1280, height = 720; // FIXME: adjustable width, height
+ int levels = find_num_levels(width, height);
+ for (size_t i = 0; i < num_interpolate_slots; ++i) {
+ glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
+ check_error();
+ glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
+ check_error();
+ glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
+ check_error();
+ glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
+ check_error();
+ glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
+ check_error();
+ glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
+ check_error();
+
+ unique_ptr<InterpolatedFrameResources> resource(new InterpolatedFrameResources);
+ resource->owner = this;
+ resource->input_tex = input_tex[i];
+ resource->gray_tex = gray_tex[i];
+ resource->fade_y_output_tex = fade_y_output_tex[i];
+ resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i];
+ resource->cb_tex = cb_tex[i];
+ resource->cr_tex = cr_tex[i];
+ glCreateFramebuffers(2, resource->input_fbos);
+ check_error();
+ glCreateFramebuffers(1, &resource->fade_fbo);
+ check_error();
+
+ glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
+ check_error();
+ glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
+ check_error();
+ glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
+ check_error();
+ glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
+ check_error();
+ glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
+ check_error();
+ glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
+ check_error();
+
+ GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
+ glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs);
+ check_error();
+ glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs);
+ check_error();
+ glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs);
+ check_error();
+
+ glCreateBuffers(1, &resource->pbo);
+ check_error();
+ glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ check_error();
+ resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ interpolate_resources.push_back(move(resource));
+ }
+
+ check_error();
+
+ OperatingPoint op;
+ if (global_flags.interpolation_quality == 1) {
+ op = operating_point1;
+ } else if (global_flags.interpolation_quality == 2) {
+ op = operating_point2;
+ } else if (global_flags.interpolation_quality == 3) {
+ op = operating_point3;
+ } else if (global_flags.interpolation_quality == 4) {
+ op = operating_point4;
+ } else {
+ assert(false);
+ }
+
+ compute_flow.reset(new DISComputeFlow(width, height, op));
+ interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
+ interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
+ chroma_subsampler.reset(new ChromaSubsampler);
+ check_error();
+
+ // The “last frame” is initially black.
+ unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
+ unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
+ memset(y.get(), 16, 1280 * 720);
+ memset(cb_or_cr.get(), 128, 640 * 720);
+ last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
+}
+
+VideoStream::~VideoStream() {}
+
+void VideoStream::start()
+{
+ AVFormatContext *avctx = avformat_alloc_context();
+ avctx->oformat = av_guess_format("nut", nullptr, nullptr);
+
+ uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+ avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
+ avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
+ avctx->pb->ignore_boundary_point = 1;
+
+ Mux::Codec video_codec = Mux::CODEC_MJPEG;
+
+ avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+
+ string video_extradata;
+
+ constexpr int width = 1280, height = 720; // Doesn't matter for MJPEG.
+ stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr,
+ AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO,
+ COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
+
+
+ encode_thread = thread(&VideoStream::encode_thread_func, this);
+}
+
+void VideoStream::stop()
+{
+ encode_thread.join();
+}
+
+void VideoStream::clear_queue()
+{
+ deque<QueuedFrame> q;
+
+ {
+ unique_lock<mutex> lock(queue_lock);
+ q = move(frame_queue);
+ }
+
+ // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves.
+ // Note that release_texture() is thread-safe.
+ for (const QueuedFrame &qf : q) {
+ if (qf.type == QueuedFrame::INTERPOLATED ||
+ qf.type == QueuedFrame::FADED_INTERPOLATED) {
+ compute_flow->release_texture(qf.flow_tex);
+ }
+ if (qf.type == QueuedFrame::INTERPOLATED) {
+ interpolate->release_texture(qf.output_tex);
+ interpolate->release_texture(qf.cbcr_tex);
+ }
+ }
+
+ // Destroy q outside the mutex, as that would be a double-lock.
+}
+
+void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
+ int64_t output_pts, function<void()> &&display_func,
+ QueueSpotHolder &&queue_spot_holder,
+ FrameOnDisk frame)
+{
+ fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, frame.pts);
+
+ // Preload the file from disk, so that the encoder thread does not get stalled.
+ // TODO: Consider sending it through the queue instead.
+ (void)frame_reader.read_frame(frame);
+
+ QueuedFrame qf;
+ qf.local_pts = local_pts;
+ qf.type = QueuedFrame::ORIGINAL;
+ qf.output_pts = output_pts;
+ qf.frame1 = frame;
+ qf.display_func = move(display_func);
+ qf.queue_spot_holder = move(queue_spot_holder);
+
+ unique_lock<mutex> lock(queue_lock);
+ frame_queue.push_back(move(qf));
+ queue_changed.notify_all();
+}
+
+void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts,
+ function<void()> &&display_func,
+ QueueSpotHolder &&queue_spot_holder,
+ FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
+ float fade_alpha)
+{
+ fprintf(stderr, "output_pts=%ld faded input_pts=%ld,%ld fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
+
+ // Get the temporary OpenGL resources we need for doing the fade.
+ // (We share these with interpolated frames, which is slightly
+ // overkill, but there's no need to waste resources on keeping
+ // separate pools around.)
+ BorrowedInterpolatedFrameResources resources;
+ {
+ unique_lock<mutex> lock(queue_lock);
+ if (interpolate_resources.empty()) {
+ fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
+ return;
+ }
+ resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
+ interpolate_resources.pop_front();
+ }
+
+ bool did_decode;
+
+ shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+
+ ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
+
+ QueuedFrame qf;
+ qf.local_pts = local_pts;
+ qf.type = QueuedFrame::FADED;
+ qf.output_pts = output_pts;
+ qf.frame1 = frame1_spec;
+ qf.display_func = move(display_func);
+ qf.queue_spot_holder = move(queue_spot_holder);
+
+ qf.secondary_frame = frame2_spec;
+
+ // Subsample and split Cb/Cr.
+ chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+
+ // Read it down (asynchronously) to the CPU.
+ glPixelStorei(GL_PACK_ROW_LENGTH, 0);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
+ check_error();
+ glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+ check_error();
+ glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+ check_error();
+ glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+ check_error();
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ // Set a fence we can wait for to make sure the CPU sees the read.
+ glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+ check_error();
+ qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ check_error();
+ qf.resources = move(resources);
+ qf.local_pts = local_pts;
+
+ unique_lock<mutex> lock(queue_lock);
+ frame_queue.push_back(move(qf));
+ queue_changed.notify_all();
+}
+
+void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts,
+ int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
+ QueueSpotHolder &&queue_spot_holder,
+ FrameOnDisk frame1, FrameOnDisk frame2,
+ float alpha, FrameOnDisk secondary_frame, float fade_alpha)
+{
+ if (secondary_frame.pts != -1) {
+ fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f secondary_pts=%ld fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
+ } else {
+ fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
+ }
+
+ // Get the temporary OpenGL resources we need for doing the interpolation.
+ BorrowedInterpolatedFrameResources resources;
+ {
+ unique_lock<mutex> lock(queue_lock);
+ if (interpolate_resources.empty()) {
+ fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
+ return;
+ }
+ resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
+ interpolate_resources.pop_front();
+ }
+
+ QueuedFrame qf;
+ qf.type = (secondary_frame.pts == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
+ qf.output_pts = output_pts;
+ qf.display_decoded_func = move(display_func);
+ qf.queue_spot_holder = move(queue_spot_holder);
+ qf.local_pts = local_pts;
+
+ check_error();
+
+ // Convert frame0 and frame1 to OpenGL textures.
+ for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
+ FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
+ bool did_decode;
+ shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720);
+ }
+
+ glGenerateTextureMipmap(resources->input_tex);
+ check_error();
+ glGenerateTextureMipmap(resources->gray_tex);
+ check_error();
+
+ // Compute the interpolated frame.
+ qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
+ check_error();
+
+ if (secondary_frame.pts != -1) {
+ // Fade. First kick off the interpolation.
+ tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
+ check_error();
+
+ // Now decode the image we are fading against.
+ bool did_decode;
+ shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+
+ // Then fade against it, putting it into the fade Y' and CbCr textures.
+ ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
+
+ // Subsample and split Cb/Cr.
+ chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+
+ interpolate_no_split->release_texture(qf.output_tex);
+ } else {
+ tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
+ check_error();
+
+ // Subsample and split Cb/Cr.
+ chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+ }
+
+ // We could have released qf.flow_tex here, but to make sure we don't cause a stall
+ // when trying to reuse it for the next frame, we can just as well hold on to it
+ // and release it only when the readback is done.
+
+ // Read it down (asynchronously) to the CPU.
+ glPixelStorei(GL_PACK_ROW_LENGTH, 0);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
+ check_error();
+ if (secondary_frame.pts != -1) {
+ glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+ } else {
+ glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+ }
+ check_error();
+ glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+ check_error();
+ glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+ check_error();
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ // Set a fence we can wait for to make sure the CPU sees the read.
+ glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+ check_error();
+ qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ check_error();
+ qf.resources = move(resources);
+
+ unique_lock<mutex> lock(queue_lock);
+ frame_queue.push_back(move(qf));
+ queue_changed.notify_all();
+}
+
+void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
+ int64_t output_pts, function<void()> &&display_func,
+ QueueSpotHolder &&queue_spot_holder)
+{
+ QueuedFrame qf;
+ qf.type = QueuedFrame::REFRESH;
+ qf.output_pts = output_pts;
+ qf.display_func = move(display_func);
+ qf.queue_spot_holder = move(queue_spot_holder);
+
+ unique_lock<mutex> lock(queue_lock);
+ frame_queue.push_back(move(qf));
+ queue_changed.notify_all();
+}
+
+namespace {
+
+shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
+{
+ size_t chroma_width = width / 2;
+
+ const uint8_t *y = (const uint8_t *)contents;
+ const uint8_t *cb = (const uint8_t *)contents + width * height;
+ const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
+
+ shared_ptr<Frame> frame(new Frame);
+ frame->y.reset(new uint8_t[width * height]);
+ frame->cb.reset(new uint8_t[chroma_width * height]);
+ frame->cr.reset(new uint8_t[chroma_width * height]);
+ for (unsigned yy = 0; yy < height; ++yy) {
+ memcpy(frame->y.get() + width * yy, y + width * yy, width);
+ memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
+ memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
+ }
+ frame->is_semiplanar = false;
+ frame->width = width;
+ frame->height = height;
+ frame->chroma_subsampling_x = 2;
+ frame->chroma_subsampling_y = 1;
+ frame->pitch_y = width;
+ frame->pitch_chroma = chroma_width;
+ return frame;
+}
+
+} // namespace
+
+void VideoStream::encode_thread_func()
+{
+ pthread_setname_np(pthread_self(), "VideoStream");
+ QSurface *surface = create_surface();
+ QOpenGLContext *context = create_context(surface);
+ bool ok = make_current(context, surface);
+ if (!ok) {
+ fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
+ exit(1);
+ }
+
+ for ( ;; ) {
+ QueuedFrame qf;
+ {
+ unique_lock<mutex> lock(queue_lock);
+
+ // Wait until we have a frame to play.
+ queue_changed.wait(lock, [this]{
+ return !frame_queue.empty();
+ });
+ steady_clock::time_point frame_start = frame_queue.front().local_pts;
+
+ // Now sleep until the frame is supposed to start (the usual case),
+ // _or_ clear_queue() happened.
+ bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
+ return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
+ });
+ if (aborted) {
+ // clear_queue() happened, so don't play this frame after all.
+ continue;
+ }
+ qf = move(frame_queue.front());
+ frame_queue.pop_front();
+ }
+
+ if (qf.type == QueuedFrame::ORIGINAL) {
+ // Send the JPEG frame on, unchanged.
+ string jpeg = frame_reader.read_frame(qf.frame1);
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 0;
+ pkt.data = (uint8_t *)jpeg.data();
+ pkt.size = jpeg.size();
+ stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+
+ last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
+ } else if (qf.type == QueuedFrame::FADED) {
+ glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
+
+ shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
+
+ // Now JPEG encode it, and send it on to the stream.
+ vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
+
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 0;
+ pkt.data = (uint8_t *)jpeg.data();
+ pkt.size = jpeg.size();
+ stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ last_frame = move(jpeg);
+ } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
+ glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
+
+ // Send it on to display.
+ shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
+ if (qf.display_decoded_func != nullptr) {
+ qf.display_decoded_func(frame);
+ }
+
+ // Now JPEG encode it, and send it on to the stream.
+ vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
+ compute_flow->release_texture(qf.flow_tex);
+ if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
+ interpolate->release_texture(qf.output_tex);
+ interpolate->release_texture(qf.cbcr_tex);
+ }
+
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 0;
+ pkt.data = (uint8_t *)jpeg.data();
+ pkt.size = jpeg.size();
+ stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ last_frame = move(jpeg);
+ } else if (qf.type == QueuedFrame::REFRESH) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 0;
+ pkt.data = (uint8_t *)last_frame.data();
+ pkt.size = last_frame.size();
+ stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ } else {
+ assert(false);
+ }
+ if (qf.display_func != nullptr) {
+ qf.display_func();
+ }
+ }
+}
+
+int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+{
+ VideoStream *video_stream = (VideoStream *)opaque;
+ return video_stream->write_packet2(buf, buf_size, type, time);
+}
+
+int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+{
+ if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
+ seen_sync_markers = true;
+ } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
+ // We don't know if this is a keyframe or not (the muxer could
+ // avoid marking it), so we just have to make the best of it.
+ type = AVIO_DATA_MARKER_SYNC_POINT;
+ }
+
+ if (type == AVIO_DATA_MARKER_HEADER) {
+ stream_mux_header.append((char *)buf, buf_size);
- global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
++ global_httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
+ } else {
++ global_httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
+ }
+ return buf_size;
+}
#include "bmusb/bmusb.h"
#include "decklink_util.h"
#include "flags.h"
-#include "memcpy_interleaved.h"
+#include "shared/memcpy_interleaved.h"
#include "v210_converter.h"
#define FRAME_SIZE (8 << 20) // 8 MB.
} else {
memcpy(current_video_frame.data, frame_bytes, num_bytes);
}
+ if (current_video_frame.data_copy != nullptr) {
+ memcpy(current_video_frame.data_copy, frame_bytes, num_bytes);
+ }
current_video_frame.len += num_bytes;
video_format.width = width;
#include "flags.h"
#include "ffmpeg_capture.h"
#include "mixer.h"
-#include "mux.h"
+#include "shared/mux.h"
#include "quittable_sleeper.h"
-#include "timebase.h"
+#include "shared/timebase.h"
#include "x264_encoder.h"
#include <assert.h>
QuittableSleeper should_quit;
MuxMetrics stream_mux_metrics;
+ namespace {
+
int write_packet(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
{
static bool seen_sync_markers = false;
if (type == AVIO_DATA_MARKER_HEADER) {
stream_mux_header.append((char *)buf, buf_size);
- httpd->set_header(stream_mux_header);
+ httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
} else {
- httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
+ httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
}
return buf_size;
}
+ } // namespace
+
unique_ptr<Mux> create_mux(HTTPD *httpd, AVOutputFormat *oformat, X264Encoder *x264_encoder, AudioEncoder *audio_encoder)
{
AVFormatContext *avctx = avformat_alloc_context();
string video_extradata = x264_encoder->get_global_headers();
unique_ptr<Mux> mux;
- mux.reset(new Mux(avctx, global_flags.width, global_flags.height, Mux::CODEC_H264, video_extradata, audio_encoder->get_codec_parameters().get(), COARSE_TIMEBASE,
+ mux.reset(new Mux(avctx, global_flags.width, global_flags.height, Mux::CODEC_H264, video_extradata, audio_encoder->get_codec_parameters().get(),
+ get_color_space(global_flags.ycbcr_rec709_coefficients), Mux::WITH_AUDIO, COARSE_TIMEBASE,
/*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, { &stream_mux_metrics }));
stream_mux_metrics.init({{ "destination", "http" }});
return mux;
--- /dev/null
- 'timecode_renderer.cpp', 'tweaked_inputs.cpp']
+qt5 = import('qt5')
+protoc = find_program('protoc')
+cxx = meson.get_compiler('cpp')
+
+embedded_bmusb = get_option('embedded_bmusb')
+
+alsadep = dependency('alsa')
+bmusbdep = dependency('bmusb', required: not embedded_bmusb)
+dldep = cxx.find_library('dl')
+epoxydep = dependency('epoxy')
+libavcodecdep = dependency('libavcodec')
+libavformatdep = dependency('libavformat')
+libavresampledep = dependency('libavresample')
+libavutildep = dependency('libavutil')
+libjpegdep = dependency('libjpeg')
+libswscaledep = dependency('libswscale')
+libusbdep = dependency('libusb-1.0')
+luajitdep = dependency('luajit')
+movitdep = dependency('movit')
+protobufdep = dependency('protobuf')
+qcustomplotdep = cxx.find_library('qcustomplot')
+qt5deps = dependency('qt5', modules: ['Core', 'Gui', 'Widgets', 'OpenGLExtensions', 'OpenGL', 'PrintSupport'])
+threaddep = dependency('threads')
+vadrmdep = dependency('libva-drm')
+vax11dep = dependency('libva-x11')
+x11dep = dependency('x11')
+x264dep = dependency('x264')
+zitaresamplerdep = cxx.find_library('zita-resampler')
+
+srcs = []
+nageru_deps = [shareddep, qt5deps, libjpegdep, movitdep, protobufdep,
+ vax11dep, vadrmdep, x11dep, libavformatdep, libavresampledep, libavcodecdep, libavutildep,
+ libswscaledep, libusbdep, luajitdep, dldep, x264dep, alsadep, zitaresamplerdep,
+ qcustomplotdep, threaddep]
+nageru_include_dirs = []
+nageru_link_with = []
+nageru_build_rpath = ''
+nageru_install_rpath = ''
+
+kaeru_link_with = []
+kaeru_extra_deps = []
+
+# CEF.
+exe_dir = join_paths(get_option('prefix'), 'lib/nageru')
+cef_dir = get_option('cef_dir')
+cef_build_type = get_option('cef_build_type')
+have_cef = (cef_dir != '')
+if have_cef
+ # This is done in the top-level file; just kept here for reference.
+ # add_project_arguments('-DHAVE_CEF=1', language: 'cpp')
+
+ system_cef = (cef_build_type == 'system')
+ if system_cef
+ cef_lib_dir = cef_dir
+ cef_resource_dir = '/usr/share/cef/Resources'
+ else
+ cef_lib_dir = join_paths(cef_dir, cef_build_type)
+ cef_resource_dir = join_paths(cef_dir, 'Resources')
+
+ nageru_include_dirs += include_directories(cef_dir)
+ nageru_include_dirs += include_directories(join_paths(cef_dir, 'include'))
+ nageru_build_rpath = cef_lib_dir
+ nageru_install_rpath = '$ORIGIN/'
+ endif
+
+ cefdep = cxx.find_library('cef')
+ nageru_deps += cefdep
+
+ # CEF wrapper library; not built as part of the CEF binary distribution,
+ # but should be if CEF is installed as a system library.
+ if system_cef
+ cefdlldep = cxx.find_library('cef_dll_wrapper')
+ nageru_deps += cefdlldep
+ else
+ cmake = find_program('cmake')
+ cef_compile_script = find_program('scripts/compile_cef_dll_wrapper.sh')
+
+ cef_dll_target = custom_target('libcef_dll_wrapper',
+ input: join_paths(cef_dir, 'libcef_dll/CMakeLists.txt'),
+ output: ['libcef_dll_wrapper.a', 'cef-stamp'],
+ command: [cef_compile_script, '@BUILD_DIR@', cef_dir, cmake, '@OUTPUT@'])
+
+ # Putting the .a in sources seemingly hits a bug where the .a files get sorted
+ # in the wrong order. This is a workaround; see
+ # https://github.com/mesonbuild/meson/issues/3613#issuecomment-408276296 .
+ cefdlldep = declare_dependency(sources: cef_dll_target[1], link_args: cef_dll_target.full_path())
+ nageru_deps += cefdlldep
+ endif
+
+ cef_libs = ['libEGL.so', 'libGLESv2.so', 'natives_blob.bin', 'snapshot_blob.bin', 'v8_context_snapshot.bin']
+ cef_resources = ['cef.pak', 'cef_100_percent.pak', 'cef_200_percent.pak', 'cef_extensions.pak', 'devtools_resources.pak']
+ if not get_option('cef_no_icudtl')
+ cef_resources += ['icudtl.dat']
+ endif
+ if cef_build_type != 'system'
+ cef_libs += ['libcef.so']
+ endif
+
+ # Symlink the files into the build directory, so that running nageru without ninja install works.
+ run_command('mkdir', join_paths(meson.current_build_dir(), 'locales/'))
+ foreach file : cef_libs
+ run_command('ln', '-s', join_paths(cef_lib_dir, file), meson.current_build_dir())
+ install_data(join_paths(cef_lib_dir, file), install_dir: exe_dir)
+ endforeach
+ foreach file : cef_resources
+ run_command('ln', '-s', join_paths(cef_resource_dir, file), meson.current_build_dir())
+ install_data(join_paths(cef_resource_dir, file), install_dir: exe_dir)
+ endforeach
+ run_command('ln', '-s', join_paths(cef_resource_dir, 'locales/en-US.pak'), join_paths(meson.current_build_dir(), 'locales/'))
+ install_data(join_paths(cef_resource_dir, 'locales/en-US.pak'), install_dir: join_paths(exe_dir, 'locales'))
+endif
+
+# bmusb.
+if embedded_bmusb
+ bmusb_dir = include_directories('bmusb')
+ nageru_include_dirs += bmusb_dir
+
+ bmusb = static_library('bmusb', 'bmusb/bmusb.cpp', 'bmusb/fake_capture.cpp',
+ dependencies: [libusbdep],
+ include_directories: [bmusb_dir])
+ nageru_link_with += bmusb
+ kaeru_link_with += bmusb
+else
+ nageru_deps += bmusbdep
+ kaeru_extra_deps += bmusbdep
+endif
+
+# Protobuf compilation.
+gen = generator(protoc, \
+ output : ['@BASENAME@.pb.cc', '@BASENAME@.pb.h'],
+ arguments : ['--proto_path=@CURRENT_SOURCE_DIR@', '--cpp_out=@BUILD_DIR@', '@INPUT@'])
+proto_generated = gen.process(['state.proto', 'midi_mapping.proto', 'json.proto'])
+protobuf_lib = static_library('protobufs', proto_generated, dependencies: nageru_deps, include_directories: nageru_include_dirs)
+protobuf_hdrs = declare_dependency(sources: proto_generated)
+nageru_link_with += protobuf_lib
+
+# Preprocess Qt as needed.
+qt_files = qt5.preprocess(
+ moc_headers: ['aboutdialog.h', 'analyzer.h', 'clickable_label.h', 'compression_reduction_meter.h', 'correlation_meter.h',
+ 'ellipsis_label.h', 'glwidget.h', 'input_mapping_dialog.h', 'lrameter.h', 'mainwindow.h', 'midi_mapping_dialog.h',
+ 'nonlinear_fader.h', 'vumeter.h'],
+ ui_files: ['aboutdialog.ui', 'analyzer.ui', 'audio_expanded_view.ui', 'audio_miniview.ui', 'display.ui',
+ 'input_mapping.ui', 'mainwindow.ui', 'midi_mapping.ui'],
+ dependencies: qt5deps)
+
+# Qt objects.
+srcs += ['glwidget.cpp', 'mainwindow.cpp', 'vumeter.cpp', 'lrameter.cpp', 'compression_reduction_meter.cpp',
+ 'correlation_meter.cpp', 'aboutdialog.cpp', 'analyzer.cpp', 'input_mapping_dialog.cpp', 'midi_mapping_dialog.cpp',
+ 'nonlinear_fader.cpp', 'context_menus.cpp', 'vu_common.cpp', 'piecewise_interpolator.cpp', 'midi_mapper.cpp']
+
+# Auxiliary objects used for nearly everything.
+aux_srcs = ['flags.cpp']
+aux = static_library('aux', aux_srcs, dependencies: nageru_deps, include_directories: nageru_include_dirs)
+nageru_link_with += aux
+
+# Audio objects.
+audio_mixer_srcs = ['audio_mixer.cpp', 'alsa_input.cpp', 'alsa_pool.cpp', 'ebu_r128_proc.cc', 'stereocompressor.cpp',
+ 'resampling_queue.cpp', 'flags.cpp', 'correlation_measurer.cpp', 'filter.cpp', 'input_mapping.cpp']
+audio = static_library('audio', audio_mixer_srcs, dependencies: [nageru_deps, protobuf_hdrs], include_directories: nageru_include_dirs)
+nageru_link_with += audio
+
+# Mixer objects.
+srcs += ['chroma_subsampler.cpp', 'v210_converter.cpp', 'mixer.cpp', 'pbo_frame_allocator.cpp',
+ 'theme.cpp', 'image_input.cpp', 'alsa_output.cpp',
++ 'timecode_renderer.cpp', 'tweaked_inputs.cpp', 'mjpeg_encoder.cpp']
+
+# Streaming and encoding objects (largely the set that is shared between Nageru and Kaeru).
+stream_srcs = ['quicksync_encoder.cpp', 'x264_encoder.cpp', 'x264_dynamic.cpp', 'x264_speed_control.cpp', 'video_encoder.cpp',
+ 'audio_encoder.cpp', 'ffmpeg_util.cpp', 'ffmpeg_capture.cpp',
+ 'print_latency.cpp', 'basic_stats.cpp', 'ref_counted_frame.cpp']
+stream = static_library('stream', stream_srcs, dependencies: nageru_deps, include_directories: nageru_include_dirs)
+nageru_link_with += stream
+
+# DeckLink.
+srcs += ['decklink_capture.cpp', 'decklink_util.cpp', 'decklink_output.cpp',
+ 'decklink/DeckLinkAPIDispatch.cpp']
+decklink_dir = include_directories('decklink')
+nageru_include_dirs += decklink_dir
+
+# CEF input.
+if have_cef
+ srcs += ['nageru_cef_app.cpp', 'cef_capture.cpp']
+endif
+
+srcs += qt_files
+srcs += proto_generated
+
+# Shaders needed at runtime.
+shaders = ['cbcr_subsample.vert', 'cbcr_subsample.frag', 'uyvy_subsample.vert', 'uyvy_subsample.frag', 'v210_subsample.comp', 'timecode.vert', 'timecode.frag', 'timecode_10bit.frag']
+foreach shader : shaders
+ run_command('ln', '-s', join_paths(meson.current_source_dir(), shader), meson.current_build_dir())
+endforeach
+
+shader_srcs = bin2h_gen.process(shaders)
+srcs += shader_srcs
+
+# Everything except main.cpp. (We do this because if you specify a .cpp file in
+# both Nageru and Kaeru, it gets compiled twice. In the older Makefiles, Kaeru
+# depended on a smaller set of objects.)
+core = static_library('core', srcs, dependencies: nageru_deps, include_directories: nageru_include_dirs)
+nageru_link_with += core
+
+# Nageru executable; it goes into /usr/lib/nageru since CEF files go there, too
+# (we can't put them straight into /usr/bin).
+executable('nageru', 'main.cpp',
+ dependencies: nageru_deps,
+ include_directories: nageru_include_dirs,
+ link_with: nageru_link_with,
+ build_rpath: nageru_build_rpath,
+ install_rpath: nageru_install_rpath,
+ install: true,
+ install_dir: exe_dir
+)
+meson.add_install_script('scripts/setup_nageru_symlink.sh')
+
+# Kaeru executable.
+executable('kaeru', 'kaeru.cpp',
+ dependencies: [nageru_deps, kaeru_extra_deps],
+ include_directories: nageru_include_dirs,
+ link_with: [stream, aux, kaeru_link_with],
+ install: true)
+
+# Audio mixer microbenchmark.
+executable('benchmark_audio_mixer', 'benchmark_audio_mixer.cpp', dependencies: nageru_deps, include_directories: nageru_include_dirs, link_with: [audio, aux])
+
+# These are needed for a default run.
+data_files = ['theme.lua', 'simple.lua', 'bg.jpeg', 'akai_midimix.midimapping']
+install_data(data_files, install_dir: join_paths(get_option('prefix'), 'share/nageru'))
+foreach file : data_files
+ run_command('ln', '-s', join_paths(meson.current_source_dir(), file), meson.current_build_dir())
+endforeach
#include "cef_capture.h"
#endif
#include "chroma_subsampler.h"
-#include "context.h"
+#include "shared/context.h"
#include "decklink_capture.h"
#include "decklink_output.h"
#include "defs.h"
-#include "disk_space_estimator.h"
+#include "shared/disk_space_estimator.h"
#include "ffmpeg_capture.h"
#include "flags.h"
#include "input_mapping.h"
-#include "metrics.h"
+#include "shared/metrics.h"
+ #include "mjpeg_encoder.h"
#include "pbo_frame_allocator.h"
-#include "ref_counted_gl_sync.h"
+#include "shared/ref_counted_gl_sync.h"
#include "resampling_queue.h"
-#include "timebase.h"
+#include "shared/timebase.h"
#include "timecode_renderer.h"
#include "v210_converter.h"
+ #include "va_display_with_cleanup.h"
#include "video_encoder.h"
#undef Status
display_chain->finalize();
video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, global_flags.width, global_flags.height, &httpd, global_disk_space_estimator));
+ mjpeg_encoder.reset(new MJPEGEncoder(&httpd, global_flags.va_display));
// Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
Mixer::~Mixer()
{
+ mjpeg_encoder->stop();
httpd.stop();
BMUSBCapture::stop_bm_thread();
new_frame.upload_func = upload_func;
new_frame.dropped_frames = dropped_frames;
new_frame.received_timestamp = video_frame.received_timestamp; // Ignore the audio timestamp.
+ new_frame.video_format = video_format;
+ new_frame.y_offset = y_offset;
+ new_frame.cbcr_offset = cbcr_offset;
card->new_frames.push_back(move(new_frame));
card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
card->may_have_dropped_last_frame = false;
new_frame->upload_func();
new_frame->upload_func = nullptr;
}
+
+ // There are situations where we could possibly want to
+ // include FFmpeg inputs (CEF inputs are unlikely),
+ // but they're not necessarily in 4:2:2 Y'CbCr, so it would
+ // require more functionality the the JPEG encoder.
+ if (card_index < num_cards) {
+ mjpeg_encoder->upload_frame(pts_int, card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset);
+ }
}
int64_t frame_duration = output_frame_info.frame_duration;
#include "audio_mixer.h"
#include "bmusb/bmusb.h"
#include "defs.h"
-#include "httpd.h"
+#include "shared/httpd.h"
#include "input_state.h"
#include "libusb.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_frame.h"
-#include "ref_counted_gl_sync.h"
+#include "shared/ref_counted_gl_sync.h"
#include "theme.h"
-#include "timebase.h"
+#include "shared/timebase.h"
#include "video_encoder.h"
#include "ycbcr_interpretation.h"
class ALSAOutput;
class ChromaSubsampler;
class DeckLinkOutput;
+ class MJPEGEncoder;
class QSurface;
class QSurfaceFormat;
class TimecodeRenderer;
std::unique_ptr<ChromaSubsampler> chroma_subsampler;
std::unique_ptr<v210Converter> v210_converter;
std::unique_ptr<VideoEncoder> video_encoder;
+ std::unique_ptr<MJPEGEncoder> mjpeg_encoder;
std::unique_ptr<TimecodeRenderer> timecode_renderer;
std::atomic<bool> display_timecode_in_stream{false};
std::function<void()> upload_func; // Needs to be called to actually upload the texture to OpenGL.
unsigned dropped_frames = 0; // Number of dropped frames before this one.
std::chrono::steady_clock::time_point received_timestamp = std::chrono::steady_clock::time_point::min();
+
+ // Used for MJPEG encoding. (upload_func packs everything it needs
+ // into the functor, but would otherwise also use these.)
+ // width=0 or height=0 means a broken frame, ie., do not upload.
+ bmusb::VideoFormat video_format;
+ size_t y_offset, cbcr_offset;
};
std::deque<NewFrame> new_frames;
std::condition_variable new_frames_changed; // Set whenever new_frames is changed.
--- /dev/null
-#include "ffmpeg_raii.h"
+ #include "mjpeg_encoder.h"
+
+ #include <jpeglib.h>
+ #include <unistd.h>
+ #if __SSE2__
+ #include <immintrin.h>
+ #endif
+ #include <list>
+
+ extern "C" {
+ #include <libavformat/avformat.h>
+ }
+
+ #include "defs.h"
-#include "httpd.h"
-#include "memcpy_interleaved.h"
++#include "shared/ffmpeg_raii.h"
+ #include "flags.h"
-#include "timebase.h"
++#include "shared/httpd.h"
++#include "shared/memcpy_interleaved.h"
+ #include "pbo_frame_allocator.h"
++#include "shared/timebase.h"
+ #include "va_display_with_cleanup.h"
+
+ #include <va/va.h>
+ #include <va/va_drm.h>
+ #include <va/va_x11.h>
+
+ using namespace bmusb;
+ using namespace std;
+
+ extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
+
+ #define CHECK_VASTATUS(va_status, func) \
+ if (va_status != VA_STATUS_SUCCESS) { \
+ fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
+ exit(1); \
+ }
+
+ // From libjpeg (although it's of course identical between implementations).
+ static const int jpeg_natural_order[DCTSIZE2] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63,
+ };
+
+ struct VectorDestinationManager {
+ jpeg_destination_mgr pub;
+ std::vector<uint8_t> dest;
+
+ VectorDestinationManager()
+ {
+ pub.init_destination = init_destination_thunk;
+ pub.empty_output_buffer = empty_output_buffer_thunk;
+ pub.term_destination = term_destination_thunk;
+ }
+
+ static void init_destination_thunk(j_compress_ptr ptr)
+ {
+ ((VectorDestinationManager *)(ptr->dest))->init_destination();
+ }
+
+ inline void init_destination()
+ {
+ make_room(0);
+ }
+
+ static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
+ {
+ return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
+ }
+
+ inline bool empty_output_buffer()
+ {
+ make_room(dest.size()); // Should ignore pub.free_in_buffer!
+ return true;
+ }
+
+ inline void make_room(size_t bytes_used)
+ {
+ dest.resize(bytes_used + 4096);
+ dest.resize(dest.capacity());
+ pub.next_output_byte = dest.data() + bytes_used;
+ pub.free_in_buffer = dest.size() - bytes_used;
+ }
+
+ static void term_destination_thunk(j_compress_ptr ptr)
+ {
+ ((VectorDestinationManager *)(ptr->dest))->term_destination();
+ }
+
+ inline void term_destination()
+ {
+ dest.resize(dest.size() - pub.free_in_buffer);
+ }
+ };
+ static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
+
+ int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+ {
+ MJPEGEncoder *engine = (MJPEGEncoder *)opaque;
+ return engine->write_packet2(buf, buf_size, type, time);
+ }
+
+ int MJPEGEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+ {
+ if (type == AVIO_DATA_MARKER_HEADER) {
+ mux_header.append((char *)buf, buf_size);
+ httpd->set_header(HTTPD::MULTICAM_STREAM, mux_header);
+ } else {
+ httpd->add_data(HTTPD::MULTICAM_STREAM, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
+ }
+ return buf_size;
+ }
+
+ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
+ : httpd(httpd)
+ {
+ encoder_thread = thread(&MJPEGEncoder::encoder_thread_func, this);
+
+ // Set up the mux. We don't use the Mux wrapper, because it's geared towards
+ // a situation with only one video stream (and possibly one audio stream)
+ // with known width/height, and we don't need the extra functionality it provides.
+ avctx.reset(avformat_alloc_context());
+ avctx->oformat = av_guess_format("mp4", nullptr, nullptr);
+
+ uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+ avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
+ avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
+ avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+
+ for (int card_idx = 0; card_idx < global_flags.num_cards; ++card_idx) {
+ AVStream *stream = avformat_new_stream(avctx.get(), nullptr);
+ if (stream == nullptr) {
+ fprintf(stderr, "avformat_new_stream() failed\n");
+ exit(1);
+ }
+ stream->time_base = AVRational{ 1, TIMEBASE };
+ stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+ stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
+
+ // Used for aspect ratio only. Can change without notice (the mux won't care).
+ stream->codecpar->width = global_flags.width;
+ stream->codecpar->height = global_flags.height;
+
+ // TODO: We could perhaps use the interpretation for each card here
+ // (or at least the command-line flags) instead of the defaults,
+ // but what would we do when they change?
+ stream->codecpar->color_primaries = AVCOL_PRI_BT709;
+ stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
+ stream->codecpar->color_space = AVCOL_SPC_BT709;
+ stream->codecpar->color_range = AVCOL_RANGE_MPEG;
+ stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
+ stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
+ }
+
+ AVDictionary *options = NULL;
+ vector<pair<string, string>> opts = MUX_OPTS;
+ for (pair<string, string> opt : opts) {
+ av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
+ }
+ if (avformat_write_header(avctx.get(), &options) < 0) {
+ fprintf(stderr, "avformat_write_header() failed\n");
+ exit(1);
+ }
+
+ // Initialize VA-API.
+ string error;
+ va_dpy = try_open_va(va_display, &error, &config_id);
+ if (va_dpy == nullptr) {
+ fprintf(stderr, "Could not initialize VA-API for MJPEG encoding: %s. JPEGs will be encoded in software if needed.\n", error.c_str());
+ }
+
+ running = true;
+ }
+
+ void MJPEGEncoder::stop()
+ {
+ if (!running) {
+ return;
+ }
+ running = false;
+ should_quit = true;
+ any_frames_to_be_encoded.notify_all();
+ encoder_thread.join();
+ }
+
+ unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_display, string *error, VAConfigID *config_id)
+ {
+ unique_ptr<VADisplayWithCleanup> va_dpy = va_open_display(va_display);
+ if (va_dpy == nullptr) {
+ if (error) *error = "Opening VA display failed";
+ return nullptr;
+ }
+ int major_ver, minor_ver;
+ VAStatus va_status = vaInitialize(va_dpy->va_dpy, &major_ver, &minor_ver);
+ if (va_status != VA_STATUS_SUCCESS) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "vaInitialize() failed with status %d\n", va_status);
+ if (error != nullptr) *error = buf;
+ return nullptr;
+ }
+
+ VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 };
+ va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture,
+ &attr, 1, config_id);
+ if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) {
+ if (error != nullptr) *error = "No hardware support";
+ return nullptr;
+ } else if (va_status != VA_STATUS_SUCCESS) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "vaCreateConfig() failed with status %d\n", va_status);
+ if (error != nullptr) *error = buf;
+ return nullptr;
+ }
+
+ int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
+ assert(num_formats > 0);
+
+ unique_ptr<VAImageFormat[]> formats(new VAImageFormat[num_formats]);
+ va_status = vaQueryImageFormats(va_dpy->va_dpy, formats.get(), &num_formats);
+ if (va_status != VA_STATUS_SUCCESS) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "vaQueryImageFormats() failed with status %d\n", va_status);
+ if (error != nullptr) *error = buf;
+ return nullptr;
+ }
+
+ return va_dpy;
+ }
+
+ void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset)
+ {
+ PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
+ if (video_format.width == 0 || video_format.height == 0) {
+ return;
+ }
+ if (video_format.interlaced) {
+ fprintf(stderr, "Card %u: Ignoring JPEG encoding for interlaced frame\n", card_index);
+ return;
+ }
+ if (userdata->pixel_format != PixelFormat_8BitYCbCr ||
+ !frame->interleaved) {
+ fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index);
+ return;
+ }
+ if (video_format.width > 4096 || video_format.height > 4096) {
+ fprintf(stderr, "Card %u: Ignoring JPEG encoding for oversized frame\n", card_index);
+ return;
+ }
+
+ lock_guard<mutex> lock(mu);
+ frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset });
+ any_frames_to_be_encoded.notify_all();
+ }
+
+ void MJPEGEncoder::encoder_thread_func()
+ {
+ pthread_setname_np(pthread_self(), "MJPEG_Encode");
+ posix_memalign((void **)&tmp_y, 4096, 4096 * 8);
+ posix_memalign((void **)&tmp_cbcr, 4096, 4096 * 8);
+ posix_memalign((void **)&tmp_cb, 4096, 4096 * 8);
+ posix_memalign((void **)&tmp_cr, 4096, 4096 * 8);
+
+ unique_lock<mutex> lock(mu);
+ for (;;) {
+ any_frames_to_be_encoded.wait(lock, [this] { return !frames_to_be_encoded.empty() || should_quit; });
+ if (should_quit) return;
+ QueuedFrame qf = move(frames_to_be_encoded.front());
+ frames_to_be_encoded.pop();
+
+ vector<uint8_t> jpeg = encode_jpeg(qf);
+
+ AVPacket pkt;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.buf = nullptr;
+ pkt.data = &jpeg[0];
+ pkt.size = jpeg.size();
+ pkt.stream_index = qf.card_index;
+ pkt.flags = AV_PKT_FLAG_KEY;
+ pkt.pts = pkt.dts = qf.pts;
+
+ if (av_write_frame(avctx.get(), &pkt) < 0) {
+ fprintf(stderr, "av_write_frame() failed\n");
+ exit(1);
+ }
+ }
+ }
+
+ class VABufferDestroyer {
+ public:
+ VABufferDestroyer(VADisplay dpy, VABufferID buf)
+ : dpy(dpy), buf(buf) {}
+
+ ~VABufferDestroyer() {
+ VAStatus va_status = vaDestroyBuffer(dpy, buf);
+ CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+ }
+
+ private:
+ VADisplay dpy;
+ VABufferID buf;
+ };
+
+ MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigned height)
+ {
+ {
+ lock_guard<mutex> lock(va_resources_mutex);
+ for (auto it = va_resources_freelist.begin(); it != va_resources_freelist.end(); ++it) {
+ if (it->width == width && it->height == height) {
+ VAResources ret = *it;
+ va_resources_freelist.erase(it);
+ return ret;
+ }
+ }
+ }
+
+ VAResources ret;
+
+ ret.width = width;
+ ret.height = height;
+
+ VASurfaceAttrib attrib;
+ attrib.flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attrib.type = VASurfaceAttribPixelFormat;
+ attrib.value.type = VAGenericValueTypeInteger;
+ attrib.value.value.i = VA_FOURCC_UYVY;
+
+ VAStatus va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422,
+ width, height,
+ &ret.surface, 1, &attrib, 1);
+ CHECK_VASTATUS(va_status, "vaCreateSurfaces");
+
+ va_status = vaCreateContext(va_dpy->va_dpy, config_id, width, height, 0, &ret.surface, 1, &ret.context);
+ CHECK_VASTATUS(va_status, "vaCreateContext");
+
+ va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+ return ret;
+ }
+
+ void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources)
+ {
+ lock_guard<mutex> lock(va_resources_mutex);
+ if (va_resources_freelist.size() > 10) {
+ auto it = va_resources_freelist.end();
+ --it;
+
+ VAStatus va_status = vaDestroyBuffer(va_dpy->va_dpy, it->data_buffer);
+ CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+
+ va_status = vaDestroyContext(va_dpy->va_dpy, it->context);
+ CHECK_VASTATUS(va_status, "vaDestroyContext");
+
+ va_status = vaDestroySurfaces(va_dpy->va_dpy, &it->surface, 1);
+ CHECK_VASTATUS(va_status, "vaDestroySurfaces");
+
+ va_resources_freelist.erase(it);
+ }
+
+ va_resources_freelist.push_front(resources);
+ }
+
+ void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
+ {
+ jpeg_error_mgr jerr;
+ cinfo->err = jpeg_std_error(&jerr);
+ jpeg_create_compress(cinfo);
+
+ cinfo->dest = (jpeg_destination_mgr *)dest;
+
+ cinfo->input_components = 3;
+ jpeg_set_defaults(cinfo);
+ jpeg_set_quality(cinfo, quality, /*force_baseline=*/false);
+
+ cinfo->image_width = width;
+ cinfo->image_height = height;
+ cinfo->raw_data_in = true;
+ jpeg_set_colorspace(cinfo, JCS_YCbCr);
+ cinfo->comp_info[0].h_samp_factor = 2;
+ cinfo->comp_info[0].v_samp_factor = 1;
+ cinfo->comp_info[1].h_samp_factor = 1;
+ cinfo->comp_info[1].v_samp_factor = 1;
+ cinfo->comp_info[2].h_samp_factor = 1;
+ cinfo->comp_info[2].v_samp_factor = 1;
+ cinfo->CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though.
+ jpeg_start_compress(cinfo, true);
+ }
+
+ vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo)
+ {
+ VectorDestinationManager dest;
+ init_jpeg_422(width, height, &dest, cinfo);
+
+ // Make a dummy black image; there's seemingly no other easy way of
+ // making libjpeg outputting all of its headers.
+ JSAMPROW yptr[8], cbptr[8], crptr[8];
+ JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+ memset(tmp_y, 0, 4096);
+ memset(tmp_cb, 0, 4096);
+ memset(tmp_cr, 0, 4096);
+ for (unsigned yy = 0; yy < 8; ++yy) {
+ yptr[yy] = tmp_y;
+ cbptr[yy] = tmp_cb;
+ crptr[yy] = tmp_cr;
+ }
+ for (unsigned y = 0; y < height; y += 8) {
+ jpeg_write_raw_data(cinfo, data, /*num_lines=*/8);
+ }
+ jpeg_finish_compress(cinfo);
+
+ // We're only interested in the header, not the data after it.
+ dest.term_destination();
+ for (size_t i = 0; i < dest.dest.size() - 1; ++i) {
+ if (dest.dest[i] == 0xff && dest.dest[i + 1] == 0xda) { // Start of scan (SOS).
+ unsigned len = dest.dest[i + 2] * 256 + dest.dest[i + 3];
+ dest.dest.resize(i + len + 2);
+ break;
+ }
+ }
+
+ return dest.dest;
+ }
+
+ MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height)
+ {
+ pair<unsigned, unsigned> key(width, height);
+ if (va_data_for_resolution.count(key)) {
+ return va_data_for_resolution[key];
+ }
+
+ // Use libjpeg to generate a header and set sane defaults for e.g.
+ // quantization tables. Then do the actual encode with VA-API.
+ jpeg_compress_struct cinfo;
+ vector<uint8_t> jpeg_header = get_jpeg_header(width, height, &cinfo);
+
+ // Picture parameters.
+ VAEncPictureParameterBufferJPEG pic_param;
+ memset(&pic_param, 0, sizeof(pic_param));
+ pic_param.reconstructed_picture = VA_INVALID_ID;
+ pic_param.picture_width = cinfo.image_width;
+ pic_param.picture_height = cinfo.image_height;
+ for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
+ const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
+ pic_param.component_id[component_idx] = comp->component_id;
+ pic_param.quantiser_table_selector[component_idx] = comp->quant_tbl_no;
+ }
+ pic_param.num_components = cinfo.num_components;
+ pic_param.num_scan = 1;
+ pic_param.sample_bit_depth = 8;
+ pic_param.coded_buf = VA_INVALID_ID; // To be filled out by caller.
+ pic_param.pic_flags.bits.huffman = 1;
+ pic_param.quality = 50; // Don't scale the given quantization matrices. (See gen8_mfc_jpeg_fqm_state)
+
+ // Quantization matrices.
+ VAQMatrixBufferJPEG q;
+ memset(&q, 0, sizeof(q));
+
+ q.load_lum_quantiser_matrix = true;
+ q.load_chroma_quantiser_matrix = true;
+ for (int quant_tbl_idx = 0; quant_tbl_idx < min(4, NUM_QUANT_TBLS); ++quant_tbl_idx) {
+ const JQUANT_TBL *qtbl = cinfo.quant_tbl_ptrs[quant_tbl_idx];
+ assert((qtbl == nullptr) == (quant_tbl_idx >= 2));
+ if (qtbl == nullptr) continue;
+
+ uint8_t *qmatrix = (quant_tbl_idx == 0) ? q.lum_quantiser_matrix : q.chroma_quantiser_matrix;
+ for (int i = 0; i < 64; ++i) {
+ if (qtbl->quantval[i] > 255) {
+ fprintf(stderr, "Baseline JPEG only!\n");
+ abort();
+ }
+ qmatrix[i] = qtbl->quantval[jpeg_natural_order[i]];
+ }
+ }
+
+ // Huffman tables (arithmetic is not supported).
+ VAHuffmanTableBufferJPEGBaseline huff;
+ memset(&huff, 0, sizeof(huff));
+
+ for (int huff_tbl_idx = 0; huff_tbl_idx < min(2, NUM_HUFF_TBLS); ++huff_tbl_idx) {
+ const JHUFF_TBL *ac_hufftbl = cinfo.ac_huff_tbl_ptrs[huff_tbl_idx];
+ const JHUFF_TBL *dc_hufftbl = cinfo.dc_huff_tbl_ptrs[huff_tbl_idx];
+ if (ac_hufftbl == nullptr) {
+ assert(dc_hufftbl == nullptr);
+ huff.load_huffman_table[huff_tbl_idx] = 0;
+ } else {
+ assert(dc_hufftbl != nullptr);
+ huff.load_huffman_table[huff_tbl_idx] = 1;
+
+ for (int i = 0; i < 16; ++i) {
+ huff.huffman_table[huff_tbl_idx].num_dc_codes[i] = dc_hufftbl->bits[i + 1];
+ }
+ for (int i = 0; i < 12; ++i) {
+ huff.huffman_table[huff_tbl_idx].dc_values[i] = dc_hufftbl->huffval[i];
+ }
+ for (int i = 0; i < 16; ++i) {
+ huff.huffman_table[huff_tbl_idx].num_ac_codes[i] = ac_hufftbl->bits[i + 1];
+ }
+ for (int i = 0; i < 162; ++i) {
+ huff.huffman_table[huff_tbl_idx].ac_values[i] = ac_hufftbl->huffval[i];
+ }
+ }
+ }
+
+ // Slice parameters (metadata about the slice).
+ VAEncSliceParameterBufferJPEG parms;
+ memset(&parms, 0, sizeof(parms));
+ for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
+ const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
+ parms.components[component_idx].component_selector = comp->component_id;
+ parms.components[component_idx].dc_table_selector = comp->dc_tbl_no;
+ parms.components[component_idx].ac_table_selector = comp->ac_tbl_no;
+ if (parms.components[component_idx].dc_table_selector > 1 ||
+ parms.components[component_idx].ac_table_selector > 1) {
+ fprintf(stderr, "Uses too many Huffman tables\n");
+ abort();
+ }
+ }
+ parms.num_components = cinfo.num_components;
+ parms.restart_interval = cinfo.restart_interval;
+
+ jpeg_destroy_compress(&cinfo);
+
+ VAData ret;
+ ret.jpeg_header = move(jpeg_header);
+ ret.pic_param = pic_param;
+ ret.q = q;
+ ret.huff = huff;
+ ret.parms = parms;
+ va_data_for_resolution[key] = ret;
+ return ret;
+ }
+
+ vector<uint8_t> MJPEGEncoder::encode_jpeg(const QueuedFrame &qf)
+ {
+ if (va_dpy != nullptr) {
+ return encode_jpeg_va(qf);
+ } else {
+ return encode_jpeg_libjpeg(qf);
+ }
+ }
+
+ vector<uint8_t> MJPEGEncoder::encode_jpeg_va(const QueuedFrame &qf)
+ {
+ unsigned width = qf.video_format.width;
+ unsigned height = qf.video_format.height;
+
+ VAResources resources = get_va_resources(width, height);
+ ReleaseVAResources release(this, resources);
+
+ VAData va_data = get_va_data_for_resolution(width, height);
+ va_data.pic_param.coded_buf = resources.data_buffer;
+
+ VABufferID pic_param_buffer;
+ VAStatus va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncPictureParameterBufferType, sizeof(va_data.pic_param), 1, &va_data.pic_param, &pic_param_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ VABufferDestroyer destroy_pic_param(va_dpy->va_dpy, pic_param_buffer);
+
+ VABufferID q_buffer;
+ va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAQMatrixBufferType, sizeof(va_data.q), 1, &va_data.q, &q_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ VABufferDestroyer destroy_iq(va_dpy->va_dpy, q_buffer);
+
+ VABufferID huff_buffer;
+ va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAHuffmanTableBufferType, sizeof(va_data.huff), 1, &va_data.huff, &huff_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ VABufferDestroyer destroy_huff(va_dpy->va_dpy, huff_buffer);
+
+ VABufferID slice_param_buffer;
+ va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncSliceParameterBufferType, sizeof(va_data.parms), 1, &va_data.parms, &slice_param_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
+
+ VAImage image;
+ va_status = vaDeriveImage(va_dpy->va_dpy, resources.surface, &image);
+ CHECK_VASTATUS(va_status, "vaDeriveImage");
+
+ // Upload the pixel data.
+ uint8_t *surface_p = nullptr;
+ vaMapBuffer(va_dpy->va_dpy, image.buf, (void **)&surface_p);
+
+ size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support.
+ size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
+
+ {
+ const uint8_t *src = qf.frame->data_copy + field_start;
+ uint8_t *dst = (unsigned char *)surface_p + image.offsets[0];
+ memcpy_with_pitch(dst, src, qf.video_format.width * 2, image.pitches[0], qf.video_format.height);
+ }
+
+ va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf);
+ CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+ va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id);
+ CHECK_VASTATUS(va_status, "vaDestroyImage");
+
+ // Finally, stick in the JPEG header.
+ VAEncPackedHeaderParameterBuffer header_parm;
+ header_parm.type = VAEncPackedHeaderRawData;
+ header_parm.bit_length = 8 * va_data.jpeg_header.size();
+
+ VABufferID header_parm_buffer;
+ va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncPackedHeaderParameterBufferType, sizeof(header_parm), 1, &header_parm, &header_parm_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ VABufferDestroyer destroy_header(va_dpy->va_dpy, header_parm_buffer);
+
+ VABufferID header_data_buffer;
+ va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncPackedHeaderDataBufferType, va_data.jpeg_header.size(), 1, va_data.jpeg_header.data(), &header_data_buffer);
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ VABufferDestroyer destroy_header_data(va_dpy->va_dpy, header_data_buffer);
+
+ va_status = vaBeginPicture(va_dpy->va_dpy, resources.context, resources.surface);
+ CHECK_VASTATUS(va_status, "vaBeginPicture");
+ va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &pic_param_buffer, 1);
+ CHECK_VASTATUS(va_status, "vaRenderPicture(pic_param)");
+ va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &q_buffer, 1);
+ CHECK_VASTATUS(va_status, "vaRenderPicture(q)");
+ va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &huff_buffer, 1);
+ CHECK_VASTATUS(va_status, "vaRenderPicture(huff)");
+ va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &slice_param_buffer, 1);
+ CHECK_VASTATUS(va_status, "vaRenderPicture(slice_param)");
+ va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_parm_buffer, 1);
+ CHECK_VASTATUS(va_status, "vaRenderPicture(header_parm)");
+ va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_data_buffer, 1);
+ CHECK_VASTATUS(va_status, "vaRenderPicture(header_data)");
+ va_status = vaEndPicture(va_dpy->va_dpy, resources.context);
+ CHECK_VASTATUS(va_status, "vaEndPicture");
+
+ va_status = vaSyncSurface(va_dpy->va_dpy, resources.surface);
+ CHECK_VASTATUS(va_status, "vaSyncSurface");
+
+ VACodedBufferSegment *segment;
+ va_status = vaMapBuffer(va_dpy->va_dpy, resources.data_buffer, (void **)&segment);
+ CHECK_VASTATUS(va_status, "vaMapBuffer");
+
+ const char *coded_buf = reinterpret_cast<char *>(segment->buf);
+ vector<uint8_t> jpeg(coded_buf, coded_buf + segment->size);
+
+ va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.data_buffer);
+ CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+
+ return jpeg;
+ }
+
+ vector<uint8_t> MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf)
+ {
+ unsigned width = qf.video_format.width;
+ unsigned height = qf.video_format.height;
+
+ VectorDestinationManager dest;
+ jpeg_compress_struct cinfo;
+ init_jpeg_422(width, height, &dest, &cinfo);
+
+ size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support.
+ size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
+
+ JSAMPROW yptr[8], cbptr[8], crptr[8];
+ JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+ for (unsigned y = 0; y < qf.video_format.height; y += 8) {
+ const uint8_t *src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2;
+
+ memcpy_interleaved(tmp_y, tmp_cbcr, src, qf.video_format.width * 8 * 2);
+ memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8);
+ for (unsigned yy = 0; yy < 8; ++yy) {
+ yptr[yy] = tmp_y + yy * width;
+ cbptr[yy] = tmp_cb + yy * width / 2;
+ crptr[yy] = tmp_cr + yy * width / 2;
+ }
+ jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
+ }
+ jpeg_finish_compress(&cinfo);
+
+ return dest.dest;
+ }
--- /dev/null
-#include "ffmpeg_raii.h"
+ #ifndef _MJPEG_ENCODER_H
+ #define _MJPEG_ENCODER_H 1
+
++#include "shared/ffmpeg_raii.h"
+ #include "ref_counted_frame.h"
+
+ extern "C" {
+
+ #include <libavformat/avio.h>
+
+ } // extern "C"
+
+ #include <atomic>
+ #include <bmusb/bmusb.h>
+ #include <condition_variable>
+ #include <list>
+ #include <mutex>
+ #include <queue>
+ #include <stdint.h>
+ #include <string>
+ #include <thread>
+
+ #include <va/va.h>
+
+ class HTTPD;
+ struct jpeg_compress_struct;
+ struct VADisplayWithCleanup;
+ struct VectorDestinationManager;
+
+ class MJPEGEncoder {
+ public:
+ MJPEGEncoder(HTTPD *httpd, const std::string &va_display);
+ void stop();
+ void upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset);
+
+ private:
+ static constexpr int quality = 90;
+
+ struct QueuedFrame {
+ int64_t pts;
+ unsigned card_index;
+ RefCountedFrame frame;
+ bmusb::VideoFormat video_format;
+ size_t y_offset, cbcr_offset;
+ };
+
+ void encoder_thread_func();
+ std::vector<uint8_t> encode_jpeg(const QueuedFrame &qf);
+ std::vector<uint8_t> encode_jpeg_va(const QueuedFrame &qf);
+ std::vector<uint8_t> encode_jpeg_libjpeg(const QueuedFrame &qf);
+ void init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo);
+ std::vector<uint8_t> get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo);
+
+ static int write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
+ int write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
+
+ std::thread encoder_thread;
+
+ std::mutex mu;
+ std::queue<QueuedFrame> frames_to_be_encoded; // Under mu.
+ std::condition_variable any_frames_to_be_encoded;
+
+ std::queue<QueuedFrame> frames_encoding; // Under mu.
+ std::condition_variable any_frames_encoding;
+
+ AVFormatContextWithCloser avctx;
+ HTTPD *httpd;
+ std::string mux_header;
+ std::atomic<bool> should_quit{false};
+ bool running = false;
+
+ std::unique_ptr<VADisplayWithCleanup> va_dpy;
+ VAConfigID config_id;
+
+ struct VAData {
+ std::vector<uint8_t> jpeg_header;
+ VAEncPictureParameterBufferJPEG pic_param;
+ VAQMatrixBufferJPEG q;
+ VAHuffmanTableBufferJPEGBaseline huff;
+ VAEncSliceParameterBufferJPEG parms;
+ };
+ std::map<std::pair<unsigned, unsigned>, VAData> va_data_for_resolution;
+ VAData get_va_data_for_resolution(unsigned width, unsigned height);
+
+ struct VAResources {
+ unsigned width, height;
+ VASurfaceID surface;
+ VAContextID context;
+ VABufferID data_buffer;
+ };
+ std::list<VAResources> va_resources_freelist;
+ std::mutex va_resources_mutex;
+ VAResources get_va_resources(unsigned width, unsigned height);
+ void release_va_resources(VAResources resources);
+
+ // RAII wrapper to release VAResources on return (even on error).
+ class ReleaseVAResources {
+ public:
+ ReleaseVAResources(MJPEGEncoder *mjpeg, const VAResources &resources)
+ : mjpeg(mjpeg), resources(resources) {}
+ ~ReleaseVAResources()
+ {
+ if (!committed) {
+ mjpeg->release_va_resources(resources);
+ }
+ }
+
+ void commit() { committed = true; }
+
+ private:
+ MJPEGEncoder * const mjpeg;
+ const VAResources &resources;
+ bool committed = false;
+ };
+
+ static std::unique_ptr<VADisplayWithCleanup> try_open_va(const std::string &va_display, std::string *error, VAConfigID *config_id);
+
+ uint8_t *tmp_y, *tmp_cbcr, *tmp_cb, *tmp_cr; // Private to the encoder thread.
+ };
+
+ #endif // !defined(_MJPEG_ENCODER_H)
Frame frame;
frame.data = (uint8_t *)glMapBufferRange(buffer, 0, frame_size, permissions | map_bits | GL_MAP_PERSISTENT_BIT);
frame.data2 = frame.data + frame_size / 2;
+ frame.data_copy = new uint8_t[frame_size];
check_error();
frame.size = frame_size;
frame.userdata = &userdata[frame_idx];
void PBOFrameAllocator::destroy_frame(Frame *frame)
{
+ delete[] frame->data_copy;
+
GLuint pbo = ((Userdata *)frame->userdata)->pbo;
glBindBuffer(buffer, pbo);
check_error();
} // namespace
#include "audio_encoder.h"
-#include "context.h"
+#include "shared/context.h"
#include "defs.h"
-#include "disk_space_estimator.h"
-#include "ffmpeg_raii.h"
+#include "shared/disk_space_estimator.h"
+#include "shared/ffmpeg_raii.h"
#include "flags.h"
-#include "mux.h"
+#include "shared/mux.h"
#include "print_latency.h"
#include "quicksync_encoder_impl.h"
#include "ref_counted_frame.h"
-#include "timebase.h"
+#include "shared/timebase.h"
#include "x264_encoder.h"
using namespace movit;
{
lock_guard<mutex> lock(file_audio_encoder_mutex);
AVCodecParametersWithDeleter audio_codecpar = file_audio_encoder->get_codec_parameters();
- file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, audio_codecpar.get(), TIMEBASE,
- std::bind(&DiskSpaceEstimator::report_write, disk_space_estimator, filename, _1),
+ file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, audio_codecpar.get(), get_color_space(global_flags.ycbcr_rec709_coefficients), Mux::WITH_AUDIO, TIMEBASE,
+ std::bind(&DiskSpaceEstimator::report_append, disk_space_estimator, filename, _1),
Mux::WRITE_BACKGROUND,
{ ¤t_file_mux_metrics, &total_mux_metrics }));
}
stream_mux->add_packet(pkt, pts, pts);
}
- namespace {
-
void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height)
{
if (src_width == dst_pitch) {
}
}
- } // namespace
-
void QuickSyncEncoderImpl::pass_frame(QuickSyncEncoderImpl::PendingFrame frame, int display_frame_num, int64_t pts, int64_t duration)
{
// Wait for the GPU to be done with the frame.
#include "audio_encoder.h"
#include "defs.h"
-#include "timebase.h"
+#include "shared/timebase.h"
#include "print_latency.h"
-#include "ref_counted_gl_sync.h"
+#include "shared/ref_counted_gl_sync.h"
+ #include "va_display_with_cleanup.h"
#define SURFACE_NUM 16 /* 16 surfaces for source YUV */
#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
class QSurface;
class X264Encoder;
- struct VADisplayWithCleanup {
- ~VADisplayWithCleanup();
-
- VADisplay va_dpy;
- Display *x11_display = nullptr;
- bool can_use_zerocopy = true;
- int drm_fd = -1;
- };
- std::unique_ptr<VADisplayWithCleanup> va_open_display(const std::string &va_display); // Can return nullptr on failure.
-
class QuickSyncEncoderImpl {
public:
QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator);
#include "audio_encoder.h"
#include "defs.h"
-#include "ffmpeg_raii.h"
+#include "shared/ffmpeg_raii.h"
#include "flags.h"
-#include "httpd.h"
-#include "mux.h"
+#include "shared/httpd.h"
+#include "shared/mux.h"
#include "quicksync_encoder.h"
-#include "timebase.h"
+#include "shared/timebase.h"
#include "x264_encoder.h"
class RefCountedFrame;
video_extradata = x264_encoder->get_global_headers();
}
- stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, stream_audio_encoder->get_codec_parameters().get(), COARSE_TIMEBASE,
+ stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, stream_audio_encoder->get_codec_parameters().get(),
+ get_color_space(global_flags.ycbcr_rec709_coefficients),
+ Mux::WITH_AUDIO, COARSE_TIMEBASE,
/*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, { &stream_mux_metrics }));
stream_mux_metrics.init({{ "destination", "http" }});
}
if (type == AVIO_DATA_MARKER_HEADER) {
stream_mux_header.append((char *)buf, buf_size);
- httpd->set_header(stream_mux_header);
+ httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
} else {
- httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
+ httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
}
return buf_size;
}
-#include "httpd.h"
+#include "shared/httpd.h"
#include <assert.h>
#include <byteswap.h>
#include <endian.h>
+#include <memory>
#include <microhttpd.h>
#include <netinet/in.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
-#include <memory>
extern "C" {
#include <libavutil/avutil.h>
}
-#include "defs.h"
-#include "metacube2.h"
-#include "metrics.h"
+#include "shared/shared_defs.h"
+#include "shared/metacube2.h"
+#include "shared/metrics.h"
struct MHD_Connection;
struct MHD_Response;
}
}
- void HTTPD::add_data(const char *buf, size_t size, bool keyframe, int64_t time, AVRational timebase)
+ void HTTPD::add_data(StreamType stream_type, const char *buf, size_t size, bool keyframe, int64_t time, AVRational timebase)
{
unique_lock<mutex> lock(streams_mutex);
for (Stream *stream : streams) {
- stream->add_data(buf, size, keyframe ? Stream::DATA_TYPE_KEYFRAME : Stream::DATA_TYPE_OTHER, time, timebase);
+ if (stream->get_stream_type() == stream_type) {
+ stream->add_data(buf, size, keyframe ? Stream::DATA_TYPE_KEYFRAME : Stream::DATA_TYPE_OTHER, time, timebase);
+ }
}
}
int HTTPD::answer_to_connection(MHD_Connection *connection,
const char *url, const char *method,
- const char *version, const char *upload_data,
- size_t *upload_data_size, void **con_cls)
+ const char *version, const char *upload_data,
+ size_t *upload_data_size, void **con_cls)
{
// See if the URL ends in “.metacube”.
HTTPD::Stream::Framing framing;
} else {
framing = HTTPD::Stream::FRAMING_RAW;
}
+ HTTPD::StreamType stream_type;
+ if (strcmp(url, "/multicam.mp4") == 0) {
+ stream_type = HTTPD::StreamType::MULTICAM_STREAM;
+ } else {
+ stream_type = HTTPD::StreamType::MAIN_STREAM;
+ }
if (strcmp(url, "/metrics") == 0) {
string contents = global_metrics.serialize();
return ret;
}
- HTTPD::Stream *stream = new HTTPD::Stream(this, framing);
- stream->add_data(header.data(), header.size(), Stream::DATA_TYPE_HEADER, AV_NOPTS_VALUE, AVRational{ 1, 0 });
+ HTTPD::Stream *stream = new HTTPD::Stream(this, framing, stream_type);
+ stream->add_data(header[stream_type].data(), header[stream_type].size(), Stream::DATA_TYPE_HEADER, AV_NOPTS_VALUE, AVRational{ 1, 0 });
{
unique_lock<mutex> lock(streams_mutex);
streams.insert(stream);
ssize_t HTTPD::Stream::reader_callback(uint64_t pos, char *buf, size_t max)
{
unique_lock<mutex> lock(buffer_mutex);
- has_buffered_data.wait(lock, [this]{ return should_quit || !buffered_data.empty(); });
+ has_buffered_data.wait(lock, [this] { return should_quit || !buffered_data.empty(); });
if (should_quit) {
return 0;
}
buffered_data.emplace_back((char *)&packet, sizeof(packet));
}
- has_buffered_data.notify_all();
+ has_buffered_data.notify_all();
}
void HTTPD::Stream::stop()
// A class dealing with stream output to HTTP.
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
#include <atomic>
#include <condition_variable>
#include <deque>
#include <functional>
#include <mutex>
#include <set>
+#include <stddef.h>
+#include <stdint.h>
#include <string>
+#include <sys/types.h>
#include <unordered_map>
#include <utility>
HTTPD();
~HTTPD();
+ enum StreamType {
+ MAIN_STREAM,
+ MULTICAM_STREAM,
+ NUM_STREAM_TYPES
+ };
+
// Should be called before start().
- void set_header(const std::string &data)
- {
- header = data;
+ void set_header(StreamType stream_type, const std::string &data) {
+ header[stream_type] = data;
}
// Should be called before start() (due to threading issues).
NO_CORS_POLICY,
ALLOW_ALL_ORIGINS
};
- void add_endpoint(const std::string &url, const EndpointCallback &callback, CORSPolicy cors_policy) {
+ void add_endpoint(const std::string &url, const EndpointCallback &callback, CORSPolicy cors_policy)
+ {
endpoints[url] = Endpoint{ callback, cors_policy };
}
void start(int port);
void stop();
- void add_data(const char *buf, size_t size, bool keyframe, int64_t time, AVRational timebase);
+ void add_data(StreamType stream_type, const char *buf, size_t size, bool keyframe, int64_t time, AVRational timebase);
- int64_t get_num_connected_clients() const {
+ int64_t get_num_connected_clients() const
+ {
return metric_num_connected_clients.load();
}
static void free_stream(void *cls);
-
class Stream {
public:
enum Framing {
FRAMING_RAW,
FRAMING_METACUBE
};
- Stream(HTTPD *parent, Framing framing)
- : parent(parent), framing(framing) {}
+ Stream(HTTPD *parent, Framing framing, StreamType stream_type)
+ : parent(parent), framing(framing), stream_type(stream_type) {}
static ssize_t reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max);
ssize_t reader_callback(uint64_t pos, char *buf, size_t max);
void add_data(const char *buf, size_t size, DataType data_type, int64_t time, AVRational timebase);
void stop();
HTTPD *get_parent() const { return parent; }
+ StreamType get_stream_type() const { return stream_type; }
private:
HTTPD *parent;
std::deque<std::string> buffered_data; // Protected by <buffer_mutex>.
size_t used_of_buffered_data = 0; // How many bytes of the first element of <buffered_data> that is already used. Protected by <mutex>.
size_t seen_keyframe = false;
+ StreamType stream_type;
};
MHD_Daemon *mhd = nullptr;
CORSPolicy cors_policy;
};
std::unordered_map<std::string, Endpoint> endpoints;
- std::string header;
+ std::string header[NUM_STREAM_TYPES];
// Metrics.
- std::atomic<int64_t> metric_num_connected_clients{0};
+ std::atomic<int64_t> metric_num_connected_clients{ 0 };
};
#endif // !defined(_HTTPD_H)