]> git.sesse.net Git - nageru/commitdiff
Merge branch 'mjpeg'
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Tue, 4 Dec 2018 17:39:41 +0000 (18:39 +0100)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Tue, 4 Dec 2018 17:43:07 +0000 (18:43 +0100)
14 files changed:
1  2 
futatabi/video_stream.cpp
nageru/decklink_capture.cpp
nageru/kaeru.cpp
nageru/meson.build
nageru/mixer.cpp
nageru/mixer.h
nageru/mjpeg_encoder.cpp
nageru/mjpeg_encoder.h
nageru/pbo_frame_allocator.cpp
nageru/quicksync_encoder.cpp
nageru/quicksync_encoder_impl.h
nageru/video_encoder.cpp
shared/httpd.cpp
shared/httpd.h

index 8738dc546fa85ab09742aaf882454c0389cef402,0000000000000000000000000000000000000000..8f11714fb6d50bf863e37b9d5b531bbe0fbebe73
mode 100644,000000..100644
--- /dev/null
@@@ -1,657 -1,0 +1,657 @@@
-               global_httpd->set_header(stream_mux_header);
 +#include "video_stream.h"
 +
 +extern "C" {
 +#include <libavformat/avformat.h>
 +#include <libavformat/avio.h>
 +}
 +
 +#include "chroma_subsampler.h"
 +#include "shared/context.h"
 +#include "flags.h"
 +#include "flow.h"
 +#include "shared/httpd.h"
 +#include "jpeg_frame_view.h"
 +#include "movit/util.h"
 +#include "shared/mux.h"
 +#include "player.h"
 +#include "util.h"
 +#include "ycbcr_converter.h"
 +
 +#include <epoxy/glx.h>
 +#include <jpeglib.h>
 +#include <unistd.h>
 +
 +using namespace std;
 +using namespace std::chrono;
 +
 +extern HTTPD *global_httpd;
 +
 +struct VectorDestinationManager {
 +      jpeg_destination_mgr pub;
 +      std::vector<uint8_t> dest;
 +
 +      VectorDestinationManager()
 +      {
 +              pub.init_destination = init_destination_thunk;
 +              pub.empty_output_buffer = empty_output_buffer_thunk;
 +              pub.term_destination = term_destination_thunk;
 +      }
 +
 +      static void init_destination_thunk(j_compress_ptr ptr)
 +      {
 +              ((VectorDestinationManager *)(ptr->dest))->init_destination();
 +      }
 +
 +      inline void init_destination()
 +      {
 +              make_room(0);
 +      }
 +
 +      static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
 +      {
 +              return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
 +      }
 +
 +      inline bool empty_output_buffer()
 +      {
 +              make_room(dest.size());  // Should ignore pub.free_in_buffer!
 +              return true;
 +      }
 +
 +      inline void make_room(size_t bytes_used)
 +      {
 +              dest.resize(bytes_used + 4096);
 +              dest.resize(dest.capacity());
 +              pub.next_output_byte = dest.data() + bytes_used;
 +              pub.free_in_buffer = dest.size() - bytes_used;
 +      }
 +
 +      static void term_destination_thunk(j_compress_ptr ptr)
 +      {
 +              ((VectorDestinationManager *)(ptr->dest))->term_destination();
 +      }
 +
 +      inline void term_destination()
 +      {
 +              dest.resize(dest.size() - pub.free_in_buffer);
 +      }
 +};
 +static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
 +
 +vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
 +{
 +      VectorDestinationManager dest;
 +
 +      jpeg_compress_struct cinfo;
 +      jpeg_error_mgr jerr;
 +      cinfo.err = jpeg_std_error(&jerr);
 +      jpeg_create_compress(&cinfo);
 +
 +      cinfo.dest = (jpeg_destination_mgr *)&dest;
 +      cinfo.input_components = 3;
 +      cinfo.in_color_space = JCS_RGB;
 +      jpeg_set_defaults(&cinfo);
 +      constexpr int quality = 90;
 +      jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
 +
 +      cinfo.image_width = width;
 +      cinfo.image_height = height;
 +      cinfo.raw_data_in = true;
 +      jpeg_set_colorspace(&cinfo, JCS_YCbCr);
 +      cinfo.comp_info[0].h_samp_factor = 2;
 +      cinfo.comp_info[0].v_samp_factor = 1;
 +      cinfo.comp_info[1].h_samp_factor = 1;
 +      cinfo.comp_info[1].v_samp_factor = 1;
 +      cinfo.comp_info[2].h_samp_factor = 1;
 +      cinfo.comp_info[2].v_samp_factor = 1;
 +      cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
 +      jpeg_start_compress(&cinfo, true);
 +
 +      JSAMPROW yptr[8], cbptr[8], crptr[8];
 +      JSAMPARRAY data[3] = { yptr, cbptr, crptr };
 +      for (unsigned y = 0; y < height; y += 8) {
 +              for (unsigned yy = 0; yy < 8; ++yy) {
 +                      yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
 +                      cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
 +                      crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
 +              }
 +
 +              jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
 +      }
 +
 +      jpeg_finish_compress(&cinfo);
 +      jpeg_destroy_compress(&cinfo);
 +
 +      return move(dest.dest);
 +}
 +
 +VideoStream::VideoStream()
 +{
 +      ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
 +      ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
 +
 +      GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
 +      GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
 +      GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
 +
 +      glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex);
 +      glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex);
 +      glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex);
 +      glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex);
 +      glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex);
 +      glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
 +      check_error();
 +
 +      constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
 +      int levels = find_num_levels(width, height);
 +      for (size_t i = 0; i < num_interpolate_slots; ++i) {
 +              glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
 +              check_error();
 +              glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
 +              check_error();
 +              glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
 +              check_error();
 +              glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
 +              check_error();
 +              glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
 +              check_error();
 +              glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
 +              check_error();
 +
 +              unique_ptr<InterpolatedFrameResources> resource(new InterpolatedFrameResources);
 +              resource->owner = this;
 +              resource->input_tex = input_tex[i];
 +              resource->gray_tex = gray_tex[i];
 +              resource->fade_y_output_tex = fade_y_output_tex[i];
 +              resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i];
 +              resource->cb_tex = cb_tex[i];
 +              resource->cr_tex = cr_tex[i];
 +              glCreateFramebuffers(2, resource->input_fbos);
 +              check_error();
 +              glCreateFramebuffers(1, &resource->fade_fbo);
 +              check_error();
 +
 +              glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
 +              check_error();
 +              glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
 +              check_error();
 +              glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
 +              check_error();
 +              glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
 +              check_error();
 +              glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
 +              check_error();
 +              glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
 +              check_error();
 +
 +              GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
 +              glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs);
 +              check_error();
 +              glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs);
 +              check_error();
 +              glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs);
 +              check_error();
 +
 +              glCreateBuffers(1, &resource->pbo);
 +              check_error();
 +              glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
 +              check_error();
 +              resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
 +              interpolate_resources.push_back(move(resource));
 +      }
 +
 +      check_error();
 +
 +      OperatingPoint op;
 +      if (global_flags.interpolation_quality == 1) {
 +              op = operating_point1;
 +      } else if (global_flags.interpolation_quality == 2) {
 +              op = operating_point2;
 +      } else if (global_flags.interpolation_quality == 3) {
 +              op = operating_point3;
 +      } else if (global_flags.interpolation_quality == 4) {
 +              op = operating_point4;
 +      } else {
 +              assert(false);
 +      }
 +
 +      compute_flow.reset(new DISComputeFlow(width, height, op));
 +      interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
 +      interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
 +      chroma_subsampler.reset(new ChromaSubsampler);
 +      check_error();
 +
 +      // The “last frame” is initially black.
 +      unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
 +      unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
 +      memset(y.get(), 16, 1280 * 720);
 +      memset(cb_or_cr.get(), 128, 640 * 720);
 +      last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
 +}
 +
 +VideoStream::~VideoStream() {}
 +
 +void VideoStream::start()
 +{
 +      AVFormatContext *avctx = avformat_alloc_context();
 +      avctx->oformat = av_guess_format("nut", nullptr, nullptr);
 +
 +      uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
 +      avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
 +      avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
 +      avctx->pb->ignore_boundary_point = 1;
 +
 +      Mux::Codec video_codec = Mux::CODEC_MJPEG;
 +
 +      avctx->flags = AVFMT_FLAG_CUSTOM_IO;
 +
 +      string video_extradata;
 +
 +      constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
 +      stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr,
 +              AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO,
 +              COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
 +
 +
 +      encode_thread = thread(&VideoStream::encode_thread_func, this);
 +}
 +
 +void VideoStream::stop()
 +{
 +      encode_thread.join();
 +}
 +
 +void VideoStream::clear_queue()
 +{
 +      deque<QueuedFrame> q;
 +
 +      {
 +              unique_lock<mutex> lock(queue_lock);
 +              q = move(frame_queue);
 +      }
 +
 +      // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves.
 +      // Note that release_texture() is thread-safe.
 +      for (const QueuedFrame &qf : q) {
 +              if (qf.type == QueuedFrame::INTERPOLATED ||
 +                  qf.type == QueuedFrame::FADED_INTERPOLATED) {
 +                      compute_flow->release_texture(qf.flow_tex);
 +              }
 +              if (qf.type == QueuedFrame::INTERPOLATED) {
 +                      interpolate->release_texture(qf.output_tex);
 +                      interpolate->release_texture(qf.cbcr_tex);
 +              }
 +      }
 +
 +      // Destroy q outside the mutex, as that would be a double-lock.
 +}
 +
 +void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
 +                                          int64_t output_pts, function<void()> &&display_func,
 +                                          QueueSpotHolder &&queue_spot_holder,
 +                                          FrameOnDisk frame)
 +{
 +      fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, frame.pts);
 +
 +      // Preload the file from disk, so that the encoder thread does not get stalled.
 +      // TODO: Consider sending it through the queue instead.
 +      (void)frame_reader.read_frame(frame);
 +
 +      QueuedFrame qf;
 +      qf.local_pts = local_pts;
 +      qf.type = QueuedFrame::ORIGINAL;
 +      qf.output_pts = output_pts;
 +      qf.frame1 = frame;
 +      qf.display_func = move(display_func);
 +      qf.queue_spot_holder = move(queue_spot_holder);
 +
 +      unique_lock<mutex> lock(queue_lock);
 +      frame_queue.push_back(move(qf));
 +      queue_changed.notify_all();
 +}
 +
 +void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts,
 +                                       function<void()> &&display_func,
 +                                       QueueSpotHolder &&queue_spot_holder,
 +                                       FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
 +                                       float fade_alpha)
 +{
 +      fprintf(stderr, "output_pts=%ld  faded         input_pts=%ld,%ld  fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
 +
 +      // Get the temporary OpenGL resources we need for doing the fade.
 +      // (We share these with interpolated frames, which is slightly
 +      // overkill, but there's no need to waste resources on keeping
 +      // separate pools around.)
 +      BorrowedInterpolatedFrameResources resources;
 +      {
 +              unique_lock<mutex> lock(queue_lock);
 +              if (interpolate_resources.empty()) {
 +                      fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
 +                      return;
 +              }
 +              resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
 +              interpolate_resources.pop_front();
 +      }
 +
 +      bool did_decode;
 +
 +      shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
 +      shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
 +
 +      ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
 +
 +      QueuedFrame qf;
 +      qf.local_pts = local_pts;
 +      qf.type = QueuedFrame::FADED;
 +      qf.output_pts = output_pts;
 +      qf.frame1 = frame1_spec;
 +      qf.display_func = move(display_func);
 +      qf.queue_spot_holder = move(queue_spot_holder);
 +
 +      qf.secondary_frame = frame2_spec;
 +
 +      // Subsample and split Cb/Cr.
 +      chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
 +
 +      // Read it down (asynchronously) to the CPU.
 +      glPixelStorei(GL_PACK_ROW_LENGTH, 0);
 +      glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
 +      check_error();
 +      glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
 +      check_error();
 +      glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
 +      check_error();
 +      glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
 +      check_error();
 +      glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
 +
 +      // Set a fence we can wait for to make sure the CPU sees the read.
 +      glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
 +      check_error();
 +      qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
 +      check_error();
 +      qf.resources = move(resources);
 +      qf.local_pts = local_pts;
 +
 +      unique_lock<mutex> lock(queue_lock);
 +      frame_queue.push_back(move(qf));
 +      queue_changed.notify_all();
 +}
 +
 +void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts,
 +                                              int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
 +                                              QueueSpotHolder &&queue_spot_holder,
 +                                              FrameOnDisk frame1, FrameOnDisk frame2,
 +                                              float alpha, FrameOnDisk secondary_frame, float fade_alpha)
 +{
 +      if (secondary_frame.pts != -1) {
 +              fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f  secondary_pts=%ld  fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
 +      } else {
 +              fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
 +      }
 +
 +      // Get the temporary OpenGL resources we need for doing the interpolation.
 +      BorrowedInterpolatedFrameResources resources;
 +      {
 +              unique_lock<mutex> lock(queue_lock);
 +              if (interpolate_resources.empty()) {
 +                      fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
 +                      return;
 +              }
 +              resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
 +              interpolate_resources.pop_front();
 +      }
 +
 +      QueuedFrame qf;
 +      qf.type = (secondary_frame.pts == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
 +      qf.output_pts = output_pts;
 +      qf.display_decoded_func = move(display_func);
 +      qf.queue_spot_holder = move(queue_spot_holder);
 +      qf.local_pts = local_pts;
 +
 +      check_error();
 +
 +      // Convert frame0 and frame1 to OpenGL textures.
 +      for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
 +              FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
 +              bool did_decode;
 +              shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
 +              ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720);
 +      }
 +
 +      glGenerateTextureMipmap(resources->input_tex);
 +      check_error();
 +      glGenerateTextureMipmap(resources->gray_tex);
 +      check_error();
 +
 +      // Compute the interpolated frame.
 +      qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
 +      check_error();
 +
 +      if (secondary_frame.pts != -1) {
 +              // Fade. First kick off the interpolation.
 +              tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
 +              check_error();
 +
 +              // Now decode the image we are fading against.
 +              bool did_decode;
 +              shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
 +
 +              // Then fade against it, putting it into the fade Y' and CbCr textures.
 +              ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
 +
 +              // Subsample and split Cb/Cr.
 +              chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
 +
 +              interpolate_no_split->release_texture(qf.output_tex);
 +      } else {
 +              tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
 +              check_error();
 +
 +              // Subsample and split Cb/Cr.
 +              chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
 +      }
 +
 +      // We could have released qf.flow_tex here, but to make sure we don't cause a stall
 +      // when trying to reuse it for the next frame, we can just as well hold on to it
 +      // and release it only when the readback is done.
 +
 +      // Read it down (asynchronously) to the CPU.
 +      glPixelStorei(GL_PACK_ROW_LENGTH, 0);
 +      glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
 +      check_error();
 +      if (secondary_frame.pts != -1) {
 +              glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
 +      } else {
 +              glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
 +      }
 +      check_error();
 +      glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
 +      check_error();
 +      glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
 +      check_error();
 +      glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
 +
 +      // Set a fence we can wait for to make sure the CPU sees the read.
 +      glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
 +      check_error();
 +      qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
 +      check_error();
 +      qf.resources = move(resources);
 +
 +      unique_lock<mutex> lock(queue_lock);
 +      frame_queue.push_back(move(qf));
 +      queue_changed.notify_all();
 +}
 +
 +void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
 +                                         int64_t output_pts, function<void()> &&display_func,
 +                                         QueueSpotHolder &&queue_spot_holder)
 +{
 +      QueuedFrame qf;
 +      qf.type = QueuedFrame::REFRESH;
 +      qf.output_pts = output_pts;
 +      qf.display_func = move(display_func);
 +      qf.queue_spot_holder = move(queue_spot_holder);
 +
 +      unique_lock<mutex> lock(queue_lock);
 +      frame_queue.push_back(move(qf));
 +      queue_changed.notify_all();
 +}
 +
 +namespace {
 +
 +shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
 +{
 +      size_t chroma_width = width / 2;
 +
 +      const uint8_t *y = (const uint8_t *)contents;
 +      const uint8_t *cb = (const uint8_t *)contents + width * height;
 +      const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
 +
 +      shared_ptr<Frame> frame(new Frame);
 +      frame->y.reset(new uint8_t[width * height]);
 +      frame->cb.reset(new uint8_t[chroma_width * height]);
 +      frame->cr.reset(new uint8_t[chroma_width * height]);
 +      for (unsigned yy = 0; yy < height; ++yy) {
 +              memcpy(frame->y.get() + width * yy, y + width * yy, width);
 +              memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
 +              memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
 +      }
 +      frame->is_semiplanar = false;
 +      frame->width = width;
 +      frame->height = height;
 +      frame->chroma_subsampling_x = 2;
 +      frame->chroma_subsampling_y = 1;
 +      frame->pitch_y = width;
 +      frame->pitch_chroma = chroma_width;
 +      return frame;
 +}
 +
 +}  // namespace
 +
 +void VideoStream::encode_thread_func()
 +{
 +      pthread_setname_np(pthread_self(), "VideoStream");
 +      QSurface *surface = create_surface();
 +      QOpenGLContext *context = create_context(surface);
 +      bool ok = make_current(context, surface);
 +      if (!ok) {
 +              fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
 +              exit(1);
 +      }
 +
 +      for ( ;; ) {
 +              QueuedFrame qf;
 +              {
 +                      unique_lock<mutex> lock(queue_lock);
 +
 +                      // Wait until we have a frame to play.
 +                      queue_changed.wait(lock, [this]{
 +                              return !frame_queue.empty();
 +                      });
 +                      steady_clock::time_point frame_start = frame_queue.front().local_pts;
 +
 +                      // Now sleep until the frame is supposed to start (the usual case),
 +                      // _or_ clear_queue() happened.
 +                      bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
 +                              return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
 +                      });
 +                      if (aborted) {
 +                              // clear_queue() happened, so don't play this frame after all.
 +                              continue;
 +                      }
 +                      qf = move(frame_queue.front());
 +                      frame_queue.pop_front();
 +              }
 +
 +              if (qf.type == QueuedFrame::ORIGINAL) {
 +                      // Send the JPEG frame on, unchanged.
 +                      string jpeg = frame_reader.read_frame(qf.frame1);
 +                      AVPacket pkt;
 +                      av_init_packet(&pkt);
 +                      pkt.stream_index = 0;
 +                      pkt.data = (uint8_t *)jpeg.data();
 +                      pkt.size = jpeg.size();
 +                      stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
 +
 +                      last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
 +              } else if (qf.type == QueuedFrame::FADED) {
 +                      glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 +
 +                      shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
 +
 +                      // Now JPEG encode it, and send it on to the stream.
 +                      vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
 +
 +                      AVPacket pkt;
 +                      av_init_packet(&pkt);
 +                      pkt.stream_index = 0;
 +                      pkt.data = (uint8_t *)jpeg.data();
 +                      pkt.size = jpeg.size();
 +                      stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
 +                      last_frame = move(jpeg);
 +              } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
 +                      glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 +
 +                      // Send it on to display.
 +                      shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
 +                      if (qf.display_decoded_func != nullptr) {
 +                              qf.display_decoded_func(frame);
 +                      }
 +
 +                      // Now JPEG encode it, and send it on to the stream.
 +                      vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
 +                      compute_flow->release_texture(qf.flow_tex);
 +                      if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
 +                              interpolate->release_texture(qf.output_tex);
 +                              interpolate->release_texture(qf.cbcr_tex);
 +                      }
 +
 +                      AVPacket pkt;
 +                      av_init_packet(&pkt);
 +                      pkt.stream_index = 0;
 +                      pkt.data = (uint8_t *)jpeg.data();
 +                      pkt.size = jpeg.size();
 +                      stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
 +                      last_frame = move(jpeg);
 +              } else if (qf.type == QueuedFrame::REFRESH) {
 +                      AVPacket pkt;
 +                      av_init_packet(&pkt);
 +                      pkt.stream_index = 0;
 +                      pkt.data = (uint8_t *)last_frame.data();
 +                      pkt.size = last_frame.size();
 +                      stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
 +              } else {
 +                      assert(false);
 +              }
 +              if (qf.display_func != nullptr) {
 +                      qf.display_func();
 +              }
 +      }
 +}
 +
 +int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
 +{
 +      VideoStream *video_stream = (VideoStream *)opaque;
 +      return video_stream->write_packet2(buf, buf_size, type, time);
 +}
 +
 +int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
 +{
 +      if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
 +              seen_sync_markers = true;
 +      } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
 +              // We don't know if this is a keyframe or not (the muxer could
 +              // avoid marking it), so we just have to make the best of it.
 +              type = AVIO_DATA_MARKER_SYNC_POINT;
 +      }
 +
 +      if (type == AVIO_DATA_MARKER_HEADER) {
 +              stream_mux_header.append((char *)buf, buf_size);
-               global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
++              global_httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
 +      } else {
++              global_httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
 +      }
 +      return buf_size;
 +}
Simple merge
Simple merge
index ea2d24eeb185574e5c5bd07a75fb8cc622936b48,0000000000000000000000000000000000000000..c66ea542ae16ce1f62e25c3822f05fbdde7188d3
mode 100644,000000..100644
--- /dev/null
@@@ -1,231 -1,0 +1,231 @@@
-       'timecode_renderer.cpp', 'tweaked_inputs.cpp']
 +qt5 = import('qt5')
 +protoc = find_program('protoc')
 +cxx = meson.get_compiler('cpp')
 +
 +embedded_bmusb = get_option('embedded_bmusb')
 +
 +alsadep = dependency('alsa')
 +bmusbdep = dependency('bmusb', required: not embedded_bmusb)
 +dldep = cxx.find_library('dl')
 +epoxydep = dependency('epoxy')
 +libavcodecdep = dependency('libavcodec')
 +libavformatdep = dependency('libavformat')
 +libavresampledep = dependency('libavresample')
 +libavutildep = dependency('libavutil')
 +libjpegdep = dependency('libjpeg')
 +libswscaledep = dependency('libswscale')
 +libusbdep = dependency('libusb-1.0')
 +luajitdep = dependency('luajit')
 +movitdep = dependency('movit')
 +protobufdep = dependency('protobuf')
 +qcustomplotdep = cxx.find_library('qcustomplot')
 +qt5deps = dependency('qt5', modules: ['Core', 'Gui', 'Widgets', 'OpenGLExtensions', 'OpenGL', 'PrintSupport'])
 +threaddep = dependency('threads')
 +vadrmdep = dependency('libva-drm')
 +vax11dep = dependency('libva-x11')
 +x11dep = dependency('x11')
 +x264dep = dependency('x264')
 +zitaresamplerdep = cxx.find_library('zita-resampler')
 +
 +srcs = []
 +nageru_deps = [shareddep, qt5deps, libjpegdep, movitdep, protobufdep,
 +      vax11dep, vadrmdep, x11dep, libavformatdep, libavresampledep, libavcodecdep, libavutildep,
 +      libswscaledep, libusbdep, luajitdep, dldep, x264dep, alsadep, zitaresamplerdep,
 +      qcustomplotdep, threaddep]
 +nageru_include_dirs = []
 +nageru_link_with = []
 +nageru_build_rpath = ''
 +nageru_install_rpath = ''
 +
 +kaeru_link_with = []
 +kaeru_extra_deps = []
 +
 +# CEF.
 +exe_dir = join_paths(get_option('prefix'), 'lib/nageru')
 +cef_dir = get_option('cef_dir')
 +cef_build_type = get_option('cef_build_type')
 +have_cef = (cef_dir != '')
 +if have_cef
 +      # This is done in the top-level file; just kept here for reference.
 +      # add_project_arguments('-DHAVE_CEF=1', language: 'cpp')
 +
 +      system_cef = (cef_build_type == 'system')
 +      if system_cef
 +              cef_lib_dir = cef_dir
 +              cef_resource_dir = '/usr/share/cef/Resources'
 +      else
 +              cef_lib_dir = join_paths(cef_dir, cef_build_type)
 +              cef_resource_dir = join_paths(cef_dir, 'Resources')
 +
 +              nageru_include_dirs += include_directories(cef_dir)
 +              nageru_include_dirs += include_directories(join_paths(cef_dir, 'include'))
 +              nageru_build_rpath = cef_lib_dir
 +              nageru_install_rpath = '$ORIGIN/'
 +      endif
 +
 +      cefdep = cxx.find_library('cef')
 +      nageru_deps += cefdep
 +
 +      # CEF wrapper library; not built as part of the CEF binary distribution,
 +      # but should be if CEF is installed as a system library.
 +      if system_cef
 +              cefdlldep = cxx.find_library('cef_dll_wrapper')
 +              nageru_deps += cefdlldep
 +      else
 +              cmake = find_program('cmake')
 +              cef_compile_script = find_program('scripts/compile_cef_dll_wrapper.sh')
 +
 +              cef_dll_target = custom_target('libcef_dll_wrapper',
 +                      input: join_paths(cef_dir, 'libcef_dll/CMakeLists.txt'),
 +                      output: ['libcef_dll_wrapper.a', 'cef-stamp'],
 +                      command: [cef_compile_script, '@BUILD_DIR@', cef_dir, cmake, '@OUTPUT@'])
 +
 +              # Putting the .a in sources seemingly hits a bug where the .a files get sorted
 +              # in the wrong order. This is a workaround; see
 +              # https://github.com/mesonbuild/meson/issues/3613#issuecomment-408276296 .
 +              cefdlldep = declare_dependency(sources: cef_dll_target[1], link_args: cef_dll_target.full_path())
 +              nageru_deps += cefdlldep
 +      endif
 +
 +      cef_libs = ['libEGL.so', 'libGLESv2.so', 'natives_blob.bin', 'snapshot_blob.bin', 'v8_context_snapshot.bin']
 +      cef_resources = ['cef.pak', 'cef_100_percent.pak', 'cef_200_percent.pak', 'cef_extensions.pak', 'devtools_resources.pak']
 +      if not get_option('cef_no_icudtl')
 +              cef_resources += ['icudtl.dat']
 +      endif
 +      if cef_build_type != 'system'
 +              cef_libs += ['libcef.so']
 +      endif
 +
 +      # Symlink the files into the build directory, so that running nageru without ninja install works.
 +      run_command('mkdir', join_paths(meson.current_build_dir(), 'locales/'))
 +      foreach file : cef_libs
 +              run_command('ln', '-s', join_paths(cef_lib_dir, file), meson.current_build_dir())
 +              install_data(join_paths(cef_lib_dir, file), install_dir: exe_dir)
 +      endforeach
 +      foreach file : cef_resources
 +              run_command('ln', '-s', join_paths(cef_resource_dir, file), meson.current_build_dir())
 +              install_data(join_paths(cef_resource_dir, file), install_dir: exe_dir)
 +      endforeach
 +      run_command('ln', '-s', join_paths(cef_resource_dir, 'locales/en-US.pak'), join_paths(meson.current_build_dir(), 'locales/'))
 +      install_data(join_paths(cef_resource_dir, 'locales/en-US.pak'), install_dir: join_paths(exe_dir, 'locales'))
 +endif
 +
 +# bmusb.
 +if embedded_bmusb
 +      bmusb_dir = include_directories('bmusb')
 +      nageru_include_dirs += bmusb_dir
 +
 +      bmusb = static_library('bmusb', 'bmusb/bmusb.cpp', 'bmusb/fake_capture.cpp',
 +              dependencies: [libusbdep],
 +              include_directories: [bmusb_dir])
 +      nageru_link_with += bmusb
 +      kaeru_link_with += bmusb
 +else
 +      nageru_deps += bmusbdep
 +      kaeru_extra_deps += bmusbdep
 +endif
 +
 +# Protobuf compilation.
 +gen = generator(protoc, \
 +      output    : ['@BASENAME@.pb.cc', '@BASENAME@.pb.h'],
 +      arguments : ['--proto_path=@CURRENT_SOURCE_DIR@', '--cpp_out=@BUILD_DIR@', '@INPUT@'])
 +proto_generated = gen.process(['state.proto', 'midi_mapping.proto', 'json.proto'])
 +protobuf_lib = static_library('protobufs', proto_generated, dependencies: nageru_deps, include_directories: nageru_include_dirs)
 +protobuf_hdrs = declare_dependency(sources: proto_generated)
 +nageru_link_with += protobuf_lib
 +
 +# Preprocess Qt as needed.
 +qt_files = qt5.preprocess(
 +      moc_headers: ['aboutdialog.h', 'analyzer.h', 'clickable_label.h', 'compression_reduction_meter.h', 'correlation_meter.h',
 +              'ellipsis_label.h', 'glwidget.h', 'input_mapping_dialog.h', 'lrameter.h', 'mainwindow.h', 'midi_mapping_dialog.h',
 +              'nonlinear_fader.h', 'vumeter.h'],
 +      ui_files: ['aboutdialog.ui', 'analyzer.ui', 'audio_expanded_view.ui', 'audio_miniview.ui', 'display.ui',
 +              'input_mapping.ui', 'mainwindow.ui', 'midi_mapping.ui'],
 +      dependencies: qt5deps)
 +
 +# Qt objects.
 +srcs += ['glwidget.cpp', 'mainwindow.cpp', 'vumeter.cpp', 'lrameter.cpp', 'compression_reduction_meter.cpp',
 +      'correlation_meter.cpp', 'aboutdialog.cpp', 'analyzer.cpp', 'input_mapping_dialog.cpp', 'midi_mapping_dialog.cpp',
 +      'nonlinear_fader.cpp', 'context_menus.cpp', 'vu_common.cpp', 'piecewise_interpolator.cpp', 'midi_mapper.cpp']
 +
 +# Auxiliary objects used for nearly everything.
 +aux_srcs = ['flags.cpp']
 +aux = static_library('aux', aux_srcs, dependencies: nageru_deps, include_directories: nageru_include_dirs)
 +nageru_link_with += aux
 +
 +# Audio objects.
 +audio_mixer_srcs = ['audio_mixer.cpp', 'alsa_input.cpp', 'alsa_pool.cpp', 'ebu_r128_proc.cc', 'stereocompressor.cpp',
 +      'resampling_queue.cpp', 'flags.cpp', 'correlation_measurer.cpp', 'filter.cpp', 'input_mapping.cpp']
 +audio = static_library('audio', audio_mixer_srcs, dependencies: [nageru_deps, protobuf_hdrs], include_directories: nageru_include_dirs)
 +nageru_link_with += audio
 +
 +# Mixer objects.
 +srcs += ['chroma_subsampler.cpp', 'v210_converter.cpp', 'mixer.cpp', 'pbo_frame_allocator.cpp',
 +      'theme.cpp', 'image_input.cpp', 'alsa_output.cpp',
++      'timecode_renderer.cpp', 'tweaked_inputs.cpp', 'mjpeg_encoder.cpp']
 +
 +# Streaming and encoding objects (largely the set that is shared between Nageru and Kaeru).
 +stream_srcs = ['quicksync_encoder.cpp', 'x264_encoder.cpp', 'x264_dynamic.cpp', 'x264_speed_control.cpp', 'video_encoder.cpp',
 +      'audio_encoder.cpp', 'ffmpeg_util.cpp', 'ffmpeg_capture.cpp',
 +      'print_latency.cpp', 'basic_stats.cpp', 'ref_counted_frame.cpp']
 +stream = static_library('stream', stream_srcs, dependencies: nageru_deps, include_directories: nageru_include_dirs)
 +nageru_link_with += stream
 +
 +# DeckLink.
 +srcs += ['decklink_capture.cpp', 'decklink_util.cpp', 'decklink_output.cpp',
 +      'decklink/DeckLinkAPIDispatch.cpp']
 +decklink_dir = include_directories('decklink')
 +nageru_include_dirs += decklink_dir
 +
 +# CEF input.
 +if have_cef
 +      srcs += ['nageru_cef_app.cpp', 'cef_capture.cpp']
 +endif
 +
 +srcs += qt_files
 +srcs += proto_generated
 +
 +# Shaders needed at runtime.
 +shaders = ['cbcr_subsample.vert', 'cbcr_subsample.frag', 'uyvy_subsample.vert', 'uyvy_subsample.frag', 'v210_subsample.comp', 'timecode.vert', 'timecode.frag', 'timecode_10bit.frag']
 +foreach shader : shaders
 +  run_command('ln', '-s', join_paths(meson.current_source_dir(), shader), meson.current_build_dir())
 +endforeach
 +
 +shader_srcs = bin2h_gen.process(shaders)
 +srcs += shader_srcs
 +
 +# Everything except main.cpp. (We do this because if you specify a .cpp file in
 +# both Nageru and Kaeru, it gets compiled twice. In the older Makefiles, Kaeru
 +# depended on a smaller set of objects.)
 +core = static_library('core', srcs, dependencies: nageru_deps, include_directories: nageru_include_dirs)
 +nageru_link_with += core
 +
 +# Nageru executable; it goes into /usr/lib/nageru since CEF files go there, too
 +# (we can't put them straight into /usr/bin).
 +executable('nageru', 'main.cpp',
 +      dependencies: nageru_deps,
 +      include_directories: nageru_include_dirs,
 +      link_with: nageru_link_with,
 +      build_rpath: nageru_build_rpath,
 +      install_rpath: nageru_install_rpath,
 +      install: true,
 +      install_dir: exe_dir
 +)
 +meson.add_install_script('scripts/setup_nageru_symlink.sh')
 +
 +# Kaeru executable.
 +executable('kaeru', 'kaeru.cpp',
 +      dependencies: [nageru_deps, kaeru_extra_deps],
 +      include_directories: nageru_include_dirs,
 +      link_with: [stream, aux, kaeru_link_with],
 +      install: true)
 +
 +# Audio mixer microbenchmark.
 +executable('benchmark_audio_mixer', 'benchmark_audio_mixer.cpp', dependencies: nageru_deps, include_directories: nageru_include_dirs, link_with: [audio, aux])
 +
 +# These are needed for a default run.
 +data_files = ['theme.lua', 'simple.lua', 'bg.jpeg', 'akai_midimix.midimapping']
 +install_data(data_files, install_dir: join_paths(get_option('prefix'), 'share/nageru'))
 +foreach file : data_files
 +      run_command('ln', '-s', join_paths(meson.current_source_dir(), file), meson.current_build_dir())
 +endforeach
index 0d782e8d8758674e033b867353041bb7598118b1,294040f1bfd6fc6dde71a793af91c536e2c9f12e..953fd8133db59410ab2973d7592a05e5d133bc09
  #include "ffmpeg_capture.h"
  #include "flags.h"
  #include "input_mapping.h"
 -#include "metrics.h"
 +#include "shared/metrics.h"
+ #include "mjpeg_encoder.h"
  #include "pbo_frame_allocator.h"
 -#include "ref_counted_gl_sync.h"
 +#include "shared/ref_counted_gl_sync.h"
  #include "resampling_queue.h"
 -#include "timebase.h"
 +#include "shared/timebase.h"
  #include "timecode_renderer.h"
  #include "v210_converter.h"
+ #include "va_display_with_cleanup.h"
  #include "video_encoder.h"
  
  #undef Status
diff --cc nageru/mixer.h
Simple merge
index 0000000000000000000000000000000000000000,740b059211c6b2b097497d82ffe003fd5cdbd658..3587d78dcf3fa51a61a92af89ec362b3297b2d3e
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,677 +1,677 @@@
 -#include "ffmpeg_raii.h"
+ #include "mjpeg_encoder.h"
+ #include <jpeglib.h>
+ #include <unistd.h>
+ #if __SSE2__
+ #include <immintrin.h>
+ #endif
+ #include <list>
+ extern "C" {
+ #include <libavformat/avformat.h>
+ }
+ #include "defs.h"
 -#include "httpd.h"
 -#include "memcpy_interleaved.h"
++#include "shared/ffmpeg_raii.h"
+ #include "flags.h"
 -#include "timebase.h"
++#include "shared/httpd.h"
++#include "shared/memcpy_interleaved.h"
+ #include "pbo_frame_allocator.h"
++#include "shared/timebase.h"
+ #include "va_display_with_cleanup.h"
+ #include <va/va.h>
+ #include <va/va_drm.h>
+ #include <va/va_x11.h>
+ using namespace bmusb;
+ using namespace std;
+ extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
+ #define CHECK_VASTATUS(va_status, func)                                 \
+     if (va_status != VA_STATUS_SUCCESS) {                               \
+         fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
+         exit(1);                                                        \
+     }
+ // From libjpeg (although it's of course identical between implementations).
+ static const int jpeg_natural_order[DCTSIZE2] = {
+        0,  1,  8, 16,  9,  2,  3, 10,
+       17, 24, 32, 25, 18, 11,  4,  5,
+       12, 19, 26, 33, 40, 48, 41, 34,
+       27, 20, 13,  6,  7, 14, 21, 28,
+       35, 42, 49, 56, 57, 50, 43, 36,
+       29, 22, 15, 23, 30, 37, 44, 51,
+       58, 59, 52, 45, 38, 31, 39, 46,
+       53, 60, 61, 54, 47, 55, 62, 63,
+ };
+ struct VectorDestinationManager {
+       jpeg_destination_mgr pub;
+       std::vector<uint8_t> dest;
+       VectorDestinationManager()
+       {
+               pub.init_destination = init_destination_thunk;
+               pub.empty_output_buffer = empty_output_buffer_thunk;
+               pub.term_destination = term_destination_thunk;
+       }
+       static void init_destination_thunk(j_compress_ptr ptr)
+       {
+               ((VectorDestinationManager *)(ptr->dest))->init_destination();
+       }
+       inline void init_destination()
+       {
+               make_room(0);
+       }
+       static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
+       {
+               return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
+       }
+       inline bool empty_output_buffer()
+       {
+               make_room(dest.size());  // Should ignore pub.free_in_buffer!
+               return true;
+       }
+       inline void make_room(size_t bytes_used)
+       {
+               dest.resize(bytes_used + 4096);
+               dest.resize(dest.capacity());
+               pub.next_output_byte = dest.data() + bytes_used;
+               pub.free_in_buffer = dest.size() - bytes_used;
+       }
+       static void term_destination_thunk(j_compress_ptr ptr)
+       {
+               ((VectorDestinationManager *)(ptr->dest))->term_destination();
+       }
+       inline void term_destination()
+       {
+               dest.resize(dest.size() - pub.free_in_buffer);
+       }
+ };
+ static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
+ int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+ {
+       MJPEGEncoder *engine = (MJPEGEncoder *)opaque;
+       return engine->write_packet2(buf, buf_size, type, time);
+ }
+ int MJPEGEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+ {
+       if (type == AVIO_DATA_MARKER_HEADER) {
+               mux_header.append((char *)buf, buf_size);
+               httpd->set_header(HTTPD::MULTICAM_STREAM, mux_header);
+       } else {
+               httpd->add_data(HTTPD::MULTICAM_STREAM, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
+       }
+       return buf_size;
+ }
+ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
+       : httpd(httpd)
+ {
+       encoder_thread = thread(&MJPEGEncoder::encoder_thread_func, this);
+       // Set up the mux. We don't use the Mux wrapper, because it's geared towards
+       // a situation with only one video stream (and possibly one audio stream)
+       // with known width/height, and we don't need the extra functionality it provides.
+       avctx.reset(avformat_alloc_context());
+       avctx->oformat = av_guess_format("mp4", nullptr, nullptr);
+       uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+       avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
+       avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
+       avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+       for (int card_idx = 0; card_idx < global_flags.num_cards; ++card_idx) {
+               AVStream *stream = avformat_new_stream(avctx.get(), nullptr);
+               if (stream == nullptr) {
+                       fprintf(stderr, "avformat_new_stream() failed\n");
+                       exit(1);
+               }
+               stream->time_base = AVRational{ 1, TIMEBASE };
+               stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+               stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
+               // Used for aspect ratio only. Can change without notice (the mux won't care).
+               stream->codecpar->width = global_flags.width;
+               stream->codecpar->height = global_flags.height;
+               // TODO: We could perhaps use the interpretation for each card here
+               // (or at least the command-line flags) instead of the defaults,
+               // but what would we do when they change?
+               stream->codecpar->color_primaries = AVCOL_PRI_BT709;
+               stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
+               stream->codecpar->color_space = AVCOL_SPC_BT709;
+               stream->codecpar->color_range = AVCOL_RANGE_MPEG;
+               stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
+               stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
+       }
+       AVDictionary *options = NULL;
+       vector<pair<string, string>> opts = MUX_OPTS;
+       for (pair<string, string> opt : opts) {
+               av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
+       }
+       if (avformat_write_header(avctx.get(), &options) < 0) {
+               fprintf(stderr, "avformat_write_header() failed\n");
+               exit(1);
+       }
+       // Initialize VA-API.
+       string error;
+       va_dpy = try_open_va(va_display, &error, &config_id);
+       if (va_dpy == nullptr) {
+               fprintf(stderr, "Could not initialize VA-API for MJPEG encoding: %s. JPEGs will be encoded in software if needed.\n", error.c_str());
+       }
+       running = true;
+ }
+ void MJPEGEncoder::stop()
+ {
+       if (!running) {
+               return;
+       }
+       running = false;
+       should_quit = true;
+       any_frames_to_be_encoded.notify_all();
+       encoder_thread.join();
+ }
+ unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_display, string *error, VAConfigID *config_id)
+ {
+       unique_ptr<VADisplayWithCleanup> va_dpy = va_open_display(va_display);
+       if (va_dpy == nullptr) {
+               if (error) *error = "Opening VA display failed";
+               return nullptr;
+       }
+       int major_ver, minor_ver;
+       VAStatus va_status = vaInitialize(va_dpy->va_dpy, &major_ver, &minor_ver);
+       if (va_status != VA_STATUS_SUCCESS) {
+               char buf[256];
+               snprintf(buf, sizeof(buf), "vaInitialize() failed with status %d\n", va_status);
+               if (error != nullptr) *error = buf;
+               return nullptr;
+       }
+       VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 };
+       va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture,
+               &attr, 1, config_id);
+       if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) {
+               if (error != nullptr) *error = "No hardware support";
+               return nullptr;
+       } else if (va_status != VA_STATUS_SUCCESS) {
+               char buf[256];
+               snprintf(buf, sizeof(buf), "vaCreateConfig() failed with status %d\n", va_status);
+               if (error != nullptr) *error = buf;
+               return nullptr;
+       }
+       int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
+       assert(num_formats > 0);
+       unique_ptr<VAImageFormat[]> formats(new VAImageFormat[num_formats]);
+       va_status = vaQueryImageFormats(va_dpy->va_dpy, formats.get(), &num_formats);
+       if (va_status != VA_STATUS_SUCCESS) {
+               char buf[256];
+               snprintf(buf, sizeof(buf), "vaQueryImageFormats() failed with status %d\n", va_status);
+               if (error != nullptr) *error = buf;
+               return nullptr;
+       }
+       return va_dpy;
+ }
+ void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset)
+ {
+       PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
+       if (video_format.width == 0 || video_format.height == 0) {
+               return;
+       }
+       if (video_format.interlaced) {
+               fprintf(stderr, "Card %u: Ignoring JPEG encoding for interlaced frame\n", card_index);
+               return;
+       }
+       if (userdata->pixel_format != PixelFormat_8BitYCbCr ||
+           !frame->interleaved) {
+               fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index);
+               return;
+       }
+       if (video_format.width > 4096 || video_format.height > 4096) {
+               fprintf(stderr, "Card %u: Ignoring JPEG encoding for oversized frame\n", card_index);
+               return;
+       }
+       lock_guard<mutex> lock(mu);
+       frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset });
+       any_frames_to_be_encoded.notify_all();
+ }
+ void MJPEGEncoder::encoder_thread_func()
+ {
+       pthread_setname_np(pthread_self(), "MJPEG_Encode");
+       posix_memalign((void **)&tmp_y, 4096, 4096 * 8);
+       posix_memalign((void **)&tmp_cbcr, 4096, 4096 * 8);
+       posix_memalign((void **)&tmp_cb, 4096, 4096 * 8);
+       posix_memalign((void **)&tmp_cr, 4096, 4096 * 8);
+       unique_lock<mutex> lock(mu);
+       for (;;) {
+               any_frames_to_be_encoded.wait(lock, [this] { return !frames_to_be_encoded.empty() || should_quit; });
+               if (should_quit) return;
+               QueuedFrame qf = move(frames_to_be_encoded.front());
+               frames_to_be_encoded.pop();
+               vector<uint8_t> jpeg = encode_jpeg(qf);
+               AVPacket pkt;
+               memset(&pkt, 0, sizeof(pkt));
+               pkt.buf = nullptr;
+               pkt.data = &jpeg[0];
+               pkt.size = jpeg.size();
+               pkt.stream_index = qf.card_index;
+               pkt.flags = AV_PKT_FLAG_KEY;
+               pkt.pts = pkt.dts = qf.pts;
+               if (av_write_frame(avctx.get(), &pkt) < 0) {
+                       fprintf(stderr, "av_write_frame() failed\n");
+                       exit(1);
+               }
+       }
+ }
+ class VABufferDestroyer {
+ public:
+       VABufferDestroyer(VADisplay dpy, VABufferID buf)
+               : dpy(dpy), buf(buf) {}
+       ~VABufferDestroyer() {
+               VAStatus va_status = vaDestroyBuffer(dpy, buf);
+               CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+       }
+ private:
+       VADisplay dpy;
+       VABufferID buf;
+ };
+ MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigned height)
+ {
+       {
+               lock_guard<mutex> lock(va_resources_mutex);
+               for (auto it = va_resources_freelist.begin(); it != va_resources_freelist.end(); ++it) {
+                       if (it->width == width && it->height == height) {
+                               VAResources ret = *it;
+                               va_resources_freelist.erase(it);
+                               return ret;
+                       }
+               }
+       }
+       VAResources ret;
+       ret.width = width;
+       ret.height = height;
+       VASurfaceAttrib attrib;
+       attrib.flags = VA_SURFACE_ATTRIB_SETTABLE;
+       attrib.type = VASurfaceAttribPixelFormat;
+       attrib.value.type = VAGenericValueTypeInteger;
+       attrib.value.value.i = VA_FOURCC_UYVY;
+       VAStatus va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422,
+               width, height,
+               &ret.surface, 1, &attrib, 1);
+       CHECK_VASTATUS(va_status, "vaCreateSurfaces");
+       va_status = vaCreateContext(va_dpy->va_dpy, config_id, width, height, 0, &ret.surface, 1, &ret.context);
+       CHECK_VASTATUS(va_status, "vaCreateContext");
+       va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       return ret;
+ }
+ void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources)
+ {
+       lock_guard<mutex> lock(va_resources_mutex);
+       if (va_resources_freelist.size() > 10) {
+               auto it = va_resources_freelist.end();
+               --it;
+               VAStatus va_status = vaDestroyBuffer(va_dpy->va_dpy, it->data_buffer);
+               CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+               va_status = vaDestroyContext(va_dpy->va_dpy, it->context);
+               CHECK_VASTATUS(va_status, "vaDestroyContext");
+               va_status = vaDestroySurfaces(va_dpy->va_dpy, &it->surface, 1);
+               CHECK_VASTATUS(va_status, "vaDestroySurfaces");
+               va_resources_freelist.erase(it);
+       }
+       va_resources_freelist.push_front(resources);
+ }
+ void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
+ {
+       jpeg_error_mgr jerr;
+       cinfo->err = jpeg_std_error(&jerr);
+       jpeg_create_compress(cinfo);
+       cinfo->dest = (jpeg_destination_mgr *)dest;
+       cinfo->input_components = 3;
+       jpeg_set_defaults(cinfo);
+       jpeg_set_quality(cinfo, quality, /*force_baseline=*/false);
+       cinfo->image_width = width;
+       cinfo->image_height = height;
+       cinfo->raw_data_in = true;
+       jpeg_set_colorspace(cinfo, JCS_YCbCr);
+       cinfo->comp_info[0].h_samp_factor = 2;
+       cinfo->comp_info[0].v_samp_factor = 1;
+       cinfo->comp_info[1].h_samp_factor = 1;
+       cinfo->comp_info[1].v_samp_factor = 1;
+       cinfo->comp_info[2].h_samp_factor = 1;
+       cinfo->comp_info[2].v_samp_factor = 1;
+       cinfo->CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
+       jpeg_start_compress(cinfo, true);
+ }
+ vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo)
+ {
+       VectorDestinationManager dest;
+       init_jpeg_422(width, height, &dest, cinfo);
+       // Make a dummy black image; there's seemingly no other easy way of
+       // making libjpeg outputting all of its headers.
+       JSAMPROW yptr[8], cbptr[8], crptr[8];
+       JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+       memset(tmp_y, 0, 4096);
+       memset(tmp_cb, 0, 4096);
+       memset(tmp_cr, 0, 4096);
+       for (unsigned yy = 0; yy < 8; ++yy) {
+               yptr[yy] = tmp_y;
+               cbptr[yy] = tmp_cb;
+               crptr[yy] = tmp_cr;
+       }
+       for (unsigned y = 0; y < height; y += 8) {
+               jpeg_write_raw_data(cinfo, data, /*num_lines=*/8);
+       }
+       jpeg_finish_compress(cinfo);
+       // We're only interested in the header, not the data after it.
+       dest.term_destination();
+       for (size_t i = 0; i < dest.dest.size() - 1; ++i) {
+               if (dest.dest[i] == 0xff && dest.dest[i + 1] == 0xda) {  // Start of scan (SOS).
+                       unsigned len = dest.dest[i + 2] * 256 + dest.dest[i + 3];
+                       dest.dest.resize(i + len + 2);
+                       break;
+               }
+       }
+       return dest.dest;
+ }
+ MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height)
+ {
+       pair<unsigned, unsigned> key(width, height);
+       if (va_data_for_resolution.count(key)) {
+               return va_data_for_resolution[key];
+       }
+       // Use libjpeg to generate a header and set sane defaults for e.g.
+       // quantization tables. Then do the actual encode with VA-API.
+       jpeg_compress_struct cinfo;
+       vector<uint8_t> jpeg_header = get_jpeg_header(width, height, &cinfo);
+       // Picture parameters.
+       VAEncPictureParameterBufferJPEG pic_param;
+       memset(&pic_param, 0, sizeof(pic_param));
+       pic_param.reconstructed_picture = VA_INVALID_ID;
+       pic_param.picture_width = cinfo.image_width;
+       pic_param.picture_height = cinfo.image_height;
+       for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
+               const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
+               pic_param.component_id[component_idx] = comp->component_id;
+               pic_param.quantiser_table_selector[component_idx] = comp->quant_tbl_no;
+       }
+       pic_param.num_components = cinfo.num_components;
+       pic_param.num_scan = 1;
+       pic_param.sample_bit_depth = 8;
+       pic_param.coded_buf = VA_INVALID_ID;  // To be filled out by caller.
+       pic_param.pic_flags.bits.huffman = 1;
+       pic_param.quality = 50;  // Don't scale the given quantization matrices. (See gen8_mfc_jpeg_fqm_state)
+       // Quantization matrices.
+       VAQMatrixBufferJPEG q;
+       memset(&q, 0, sizeof(q));
+       q.load_lum_quantiser_matrix = true;
+       q.load_chroma_quantiser_matrix = true;
+       for (int quant_tbl_idx = 0; quant_tbl_idx < min(4, NUM_QUANT_TBLS); ++quant_tbl_idx) {
+               const JQUANT_TBL *qtbl = cinfo.quant_tbl_ptrs[quant_tbl_idx];
+               assert((qtbl == nullptr) == (quant_tbl_idx >= 2));
+               if (qtbl == nullptr) continue;
+               uint8_t *qmatrix = (quant_tbl_idx == 0) ? q.lum_quantiser_matrix : q.chroma_quantiser_matrix;
+               for (int i = 0; i < 64; ++i) {
+                       if (qtbl->quantval[i] > 255) {
+                               fprintf(stderr, "Baseline JPEG only!\n");
+                               abort();
+                       }
+                       qmatrix[i] = qtbl->quantval[jpeg_natural_order[i]];
+               }
+       }
+       // Huffman tables (arithmetic is not supported).
+       VAHuffmanTableBufferJPEGBaseline huff;
+       memset(&huff, 0, sizeof(huff));
+       for (int huff_tbl_idx = 0; huff_tbl_idx < min(2, NUM_HUFF_TBLS); ++huff_tbl_idx) {
+               const JHUFF_TBL *ac_hufftbl = cinfo.ac_huff_tbl_ptrs[huff_tbl_idx];
+               const JHUFF_TBL *dc_hufftbl = cinfo.dc_huff_tbl_ptrs[huff_tbl_idx];
+               if (ac_hufftbl == nullptr) {
+                       assert(dc_hufftbl == nullptr);
+                       huff.load_huffman_table[huff_tbl_idx] = 0;
+               } else {
+                       assert(dc_hufftbl != nullptr);
+                       huff.load_huffman_table[huff_tbl_idx] = 1;
+                       for (int i = 0; i < 16; ++i) {
+                               huff.huffman_table[huff_tbl_idx].num_dc_codes[i] = dc_hufftbl->bits[i + 1];
+                       }
+                       for (int i = 0; i < 12; ++i) {
+                               huff.huffman_table[huff_tbl_idx].dc_values[i] = dc_hufftbl->huffval[i];
+                       }
+                       for (int i = 0; i < 16; ++i) {
+                               huff.huffman_table[huff_tbl_idx].num_ac_codes[i] = ac_hufftbl->bits[i + 1];
+                       }
+                       for (int i = 0; i < 162; ++i) {
+                               huff.huffman_table[huff_tbl_idx].ac_values[i] = ac_hufftbl->huffval[i];
+                       }
+               }
+       }
+       // Slice parameters (metadata about the slice).
+       VAEncSliceParameterBufferJPEG parms;
+       memset(&parms, 0, sizeof(parms));
+       for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
+               const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
+               parms.components[component_idx].component_selector = comp->component_id;
+               parms.components[component_idx].dc_table_selector = comp->dc_tbl_no;
+               parms.components[component_idx].ac_table_selector = comp->ac_tbl_no;
+               if (parms.components[component_idx].dc_table_selector > 1 ||
+                   parms.components[component_idx].ac_table_selector > 1) {
+                       fprintf(stderr, "Uses too many Huffman tables\n");
+                       abort();
+               }
+       }
+       parms.num_components = cinfo.num_components;
+       parms.restart_interval = cinfo.restart_interval;
+       jpeg_destroy_compress(&cinfo);
+       VAData ret;
+       ret.jpeg_header = move(jpeg_header);
+       ret.pic_param = pic_param;
+       ret.q = q;
+       ret.huff = huff;
+       ret.parms = parms;
+       va_data_for_resolution[key] = ret;
+       return ret;
+ }
+ vector<uint8_t> MJPEGEncoder::encode_jpeg(const QueuedFrame &qf)
+ {
+       if (va_dpy != nullptr) {
+               return encode_jpeg_va(qf);
+       } else {
+               return encode_jpeg_libjpeg(qf);
+       }
+ }
+ vector<uint8_t> MJPEGEncoder::encode_jpeg_va(const QueuedFrame &qf)
+ {
+       unsigned width = qf.video_format.width;
+       unsigned height = qf.video_format.height;
+       VAResources resources = get_va_resources(width, height);
+       ReleaseVAResources release(this, resources);
+       VAData va_data = get_va_data_for_resolution(width, height);
+       va_data.pic_param.coded_buf = resources.data_buffer;
+       VABufferID pic_param_buffer;
+       VAStatus va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncPictureParameterBufferType, sizeof(va_data.pic_param), 1, &va_data.pic_param, &pic_param_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       VABufferDestroyer destroy_pic_param(va_dpy->va_dpy, pic_param_buffer);
+       VABufferID q_buffer;
+       va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAQMatrixBufferType, sizeof(va_data.q), 1, &va_data.q, &q_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       VABufferDestroyer destroy_iq(va_dpy->va_dpy, q_buffer);
+       VABufferID huff_buffer;
+       va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAHuffmanTableBufferType, sizeof(va_data.huff), 1, &va_data.huff, &huff_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       VABufferDestroyer destroy_huff(va_dpy->va_dpy, huff_buffer);
+       VABufferID slice_param_buffer;
+       va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncSliceParameterBufferType, sizeof(va_data.parms), 1, &va_data.parms, &slice_param_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
+       VAImage image;
+       va_status = vaDeriveImage(va_dpy->va_dpy, resources.surface, &image);
+       CHECK_VASTATUS(va_status, "vaDeriveImage");
+       // Upload the pixel data.
+       uint8_t *surface_p = nullptr;
+       vaMapBuffer(va_dpy->va_dpy, image.buf, (void **)&surface_p);
+       size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
+       size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
+       {
+               const uint8_t *src = qf.frame->data_copy + field_start;
+               uint8_t *dst = (unsigned char *)surface_p + image.offsets[0];
+               memcpy_with_pitch(dst, src, qf.video_format.width * 2, image.pitches[0], qf.video_format.height);
+       }
+       va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf);
+       CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+       va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id);
+       CHECK_VASTATUS(va_status, "vaDestroyImage");
+       // Finally, stick in the JPEG header.
+       VAEncPackedHeaderParameterBuffer header_parm;
+       header_parm.type = VAEncPackedHeaderRawData;
+       header_parm.bit_length = 8 * va_data.jpeg_header.size();
+       VABufferID header_parm_buffer;
+       va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncPackedHeaderParameterBufferType, sizeof(header_parm), 1, &header_parm, &header_parm_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       VABufferDestroyer destroy_header(va_dpy->va_dpy, header_parm_buffer);
+       VABufferID header_data_buffer;
+       va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncPackedHeaderDataBufferType, va_data.jpeg_header.size(), 1, va_data.jpeg_header.data(), &header_data_buffer);
+       CHECK_VASTATUS(va_status, "vaCreateBuffer");
+       VABufferDestroyer destroy_header_data(va_dpy->va_dpy, header_data_buffer);
+       va_status = vaBeginPicture(va_dpy->va_dpy, resources.context, resources.surface);
+       CHECK_VASTATUS(va_status, "vaBeginPicture");
+       va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &pic_param_buffer, 1);
+       CHECK_VASTATUS(va_status, "vaRenderPicture(pic_param)");
+       va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &q_buffer, 1);
+       CHECK_VASTATUS(va_status, "vaRenderPicture(q)");
+       va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &huff_buffer, 1);
+       CHECK_VASTATUS(va_status, "vaRenderPicture(huff)");
+       va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &slice_param_buffer, 1);
+       CHECK_VASTATUS(va_status, "vaRenderPicture(slice_param)");
+       va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_parm_buffer, 1);
+       CHECK_VASTATUS(va_status, "vaRenderPicture(header_parm)");
+       va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_data_buffer, 1);
+       CHECK_VASTATUS(va_status, "vaRenderPicture(header_data)");
+       va_status = vaEndPicture(va_dpy->va_dpy, resources.context);
+       CHECK_VASTATUS(va_status, "vaEndPicture");
+       va_status = vaSyncSurface(va_dpy->va_dpy, resources.surface);
+       CHECK_VASTATUS(va_status, "vaSyncSurface");
+       VACodedBufferSegment *segment;
+       va_status = vaMapBuffer(va_dpy->va_dpy, resources.data_buffer, (void **)&segment);
+       CHECK_VASTATUS(va_status, "vaMapBuffer");
+       const char *coded_buf = reinterpret_cast<char *>(segment->buf);
+       vector<uint8_t> jpeg(coded_buf, coded_buf + segment->size); 
+       va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.data_buffer);
+       CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+       return jpeg;
+ }
+ vector<uint8_t> MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf)
+ {
+       unsigned width = qf.video_format.width;
+       unsigned height = qf.video_format.height;
+       VectorDestinationManager dest;
+       jpeg_compress_struct cinfo;
+       init_jpeg_422(width, height, &dest, &cinfo);
+       size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
+       size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
+       JSAMPROW yptr[8], cbptr[8], crptr[8];
+       JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+       for (unsigned y = 0; y < qf.video_format.height; y += 8) {
+               const uint8_t *src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2;
+               memcpy_interleaved(tmp_y, tmp_cbcr, src, qf.video_format.width * 8 * 2);
+               memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8);
+               for (unsigned yy = 0; yy < 8; ++yy) {
+                       yptr[yy] = tmp_y + yy * width;
+                       cbptr[yy] = tmp_cb + yy * width / 2;
+                       crptr[yy] = tmp_cr + yy * width / 2;
+               }
+               jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
+       }
+       jpeg_finish_compress(&cinfo);
+       return dest.dest;
+ }
index 0000000000000000000000000000000000000000,ab8632a7f9c5c809ee33f075c20a546219a74891..3ce34396efa4253290efe66e1a59a2574ef757b8
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,121 +1,121 @@@
 -#include "ffmpeg_raii.h"
+ #ifndef _MJPEG_ENCODER_H
+ #define _MJPEG_ENCODER_H 1
++#include "shared/ffmpeg_raii.h"
+ #include "ref_counted_frame.h"
+ extern "C" {
+ #include <libavformat/avio.h>
+ }  // extern "C"
+ #include <atomic>
+ #include <bmusb/bmusb.h>
+ #include <condition_variable>
+ #include <list>
+ #include <mutex>
+ #include <queue>
+ #include <stdint.h>
+ #include <string>
+ #include <thread>
+ #include <va/va.h>
+ class HTTPD;
+ struct jpeg_compress_struct;
+ struct VADisplayWithCleanup;
+ struct VectorDestinationManager;
+ class MJPEGEncoder {
+ public:
+       MJPEGEncoder(HTTPD *httpd, const std::string &va_display);
+       void stop();
+       void upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset);
+ private:
+       static constexpr int quality = 90;
+       struct QueuedFrame {
+               int64_t pts;
+               unsigned card_index;
+               RefCountedFrame frame;
+               bmusb::VideoFormat video_format;
+               size_t y_offset, cbcr_offset;
+       };
+       void encoder_thread_func();
+       std::vector<uint8_t> encode_jpeg(const QueuedFrame &qf);
+       std::vector<uint8_t> encode_jpeg_va(const QueuedFrame &qf);
+       std::vector<uint8_t> encode_jpeg_libjpeg(const QueuedFrame &qf);
+       void init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo);
+       std::vector<uint8_t> get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo);
+       static int write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
+       int write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
+       std::thread encoder_thread;
+       std::mutex mu;
+       std::queue<QueuedFrame> frames_to_be_encoded;  // Under mu.
+       std::condition_variable any_frames_to_be_encoded;
+       std::queue<QueuedFrame> frames_encoding;  // Under mu.
+       std::condition_variable any_frames_encoding;
+       AVFormatContextWithCloser avctx;
+       HTTPD *httpd;
+       std::string mux_header;
+       std::atomic<bool> should_quit{false};
+       bool running = false;
+       std::unique_ptr<VADisplayWithCleanup> va_dpy;
+       VAConfigID config_id;
+       struct VAData {
+               std::vector<uint8_t> jpeg_header;
+               VAEncPictureParameterBufferJPEG pic_param;
+               VAQMatrixBufferJPEG q;
+               VAHuffmanTableBufferJPEGBaseline huff;
+               VAEncSliceParameterBufferJPEG parms;
+       };
+       std::map<std::pair<unsigned, unsigned>, VAData> va_data_for_resolution;
+       VAData get_va_data_for_resolution(unsigned width, unsigned height);
+       struct VAResources {
+               unsigned width, height;
+               VASurfaceID surface;
+               VAContextID context;
+               VABufferID data_buffer;
+       };
+       std::list<VAResources> va_resources_freelist;
+       std::mutex va_resources_mutex;
+       VAResources get_va_resources(unsigned width, unsigned height);
+       void release_va_resources(VAResources resources);
+       // RAII wrapper to release VAResources on return (even on error).
+       class ReleaseVAResources {
+       public:
+               ReleaseVAResources(MJPEGEncoder *mjpeg, const VAResources &resources)
+                       : mjpeg(mjpeg), resources(resources) {}
+               ~ReleaseVAResources()
+               {
+                       if (!committed) {
+                               mjpeg->release_va_resources(resources);
+                       }
+               }
+               void commit() { committed = true; }
+       private:
+               MJPEGEncoder * const mjpeg;
+               const VAResources &resources;
+               bool committed = false;
+       };
+       static std::unique_ptr<VADisplayWithCleanup> try_open_va(const std::string &va_display, std::string *error, VAConfigID *config_id);
+       uint8_t *tmp_y, *tmp_cbcr, *tmp_cb, *tmp_cr;  // Private to the encoder thread.
+ };
+ #endif  // !defined(_MJPEG_ENCODER_H)
Simple merge
Simple merge
index e0da15d848e3cbf615aa85d0e1af2c09f1e759fc,0317b6af0393723bb1ca38ac0100947d8c800c85..5e215e5b691bfa3f81ff317ce58404415c8d4c3d
  
  #include "audio_encoder.h"
  #include "defs.h"
 -#include "timebase.h"
 +#include "shared/timebase.h"
  #include "print_latency.h"
 -#include "ref_counted_gl_sync.h"
 +#include "shared/ref_counted_gl_sync.h"
+ #include "va_display_with_cleanup.h"
  
  #define SURFACE_NUM 16 /* 16 surfaces for source YUV */
  #define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
Simple merge
Simple merge
diff --cc shared/httpd.h
index 5b2b266c36e70788b2daabe018251d50392fcd7a,1ff5c51108facf03ca1b0d8fc2f076a63938e595..8c3c8105c5b959155abbd19d95b10a1fcfc0ffa6
@@@ -49,9 -53,8 +54,9 @@@ public
  
        void start(int port);
        void stop();
-       void add_data(const char *buf, size_t size, bool keyframe, int64_t time, AVRational timebase);
+       void add_data(StreamType stream_type, const char *buf, size_t size, bool keyframe, int64_t time, AVRational timebase);
 -      int64_t get_num_connected_clients() const {
 +      int64_t get_num_connected_clients() const
 +      {
                return metric_num_connected_clients.load();
        }
  
@@@ -109,10 -115,10 +116,10 @@@ private
                CORSPolicy cors_policy;
        };
        std::unordered_map<std::string, Endpoint> endpoints;
-       std::string header;
+       std::string header[NUM_STREAM_TYPES];
  
        // Metrics.
 -      std::atomic<int64_t> metric_num_connected_clients{0};
 +      std::atomic<int64_t> metric_num_connected_clients{ 0 };
  };
  
  #endif  // !defined(_HTTPD_H)