X-Git-Url: https://git.sesse.net/?p=nageru;a=blobdiff_plain;f=nageru%2Fmjpeg_encoder.cpp;h=867414d44127d5019cca822102de519ef3f6bcf8;hp=f920bf5508e00fc0ba490ebdf4a18aea07bf0099;hb=6160ed8911e4ed3f7ca3589a5357ae813e27175e;hpb=436a0a699e978e7d8580406759b2642026ad6df6 diff --git a/nageru/mjpeg_encoder.cpp b/nageru/mjpeg_encoder.cpp index f920bf5..867414d 100644 --- a/nageru/mjpeg_encoder.cpp +++ b/nageru/mjpeg_encoder.cpp @@ -16,24 +16,25 @@ extern "C" { #include "flags.h" #include "shared/httpd.h" #include "shared/memcpy_interleaved.h" +#include "shared/metrics.h" #include "pbo_frame_allocator.h" #include "shared/timebase.h" #include "va_display_with_cleanup.h" +#include + #include #include #include +using namespace Eigen; using namespace bmusb; +using namespace movit; using namespace std; -extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height); +static VAImageFormat uyvy_format; -#define CHECK_VASTATUS(va_status, func) \ - if (va_status != VA_STATUS_SUCCESS) { \ - fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \ - exit(1); \ - } +extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height); // From libjpeg (although it's of course identical between implementations). static const int jpeg_natural_order[DCTSIZE2] = { @@ -116,6 +117,68 @@ int MJPEGEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType t return buf_size; } +namespace { + +void add_video_stream(AVFormatContext *avctx) +{ + AVStream *stream = avformat_new_stream(avctx, nullptr); + if (stream == nullptr) { + fprintf(stderr, "avformat_new_stream() failed\n"); + abort(); + } + + // FFmpeg is very picky about having audio at 1/48000 timebase, + // no matter what we write. Even though we'd prefer our usual 1/120000, + // put the video on the same one, so that we can have locked audio. + stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY }; + stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; + stream->codecpar->codec_id = AV_CODEC_ID_MJPEG; + + // Used for aspect ratio only. Can change without notice (the mux won't care). + stream->codecpar->width = global_flags.width; + stream->codecpar->height = global_flags.height; + + // TODO: We could perhaps use the interpretation for each card here + // (or at least the command-line flags) instead of the defaults, + // but what would we do when they change? + stream->codecpar->color_primaries = AVCOL_PRI_BT709; + stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1; + stream->codecpar->color_space = AVCOL_SPC_BT709; + stream->codecpar->color_range = AVCOL_RANGE_MPEG; + stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT; + stream->codecpar->field_order = AV_FIELD_PROGRESSIVE; +} + +void add_audio_stream(AVFormatContext *avctx) +{ + AVStream *stream = avformat_new_stream(avctx, nullptr); + if (stream == nullptr) { + fprintf(stderr, "avformat_new_stream() failed\n"); + abort(); + } + stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY }; + stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE; + stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; + stream->codecpar->channels = 2; + stream->codecpar->sample_rate = OUTPUT_FREQUENCY; +} + +void finalize_mux(AVFormatContext *avctx) +{ + AVDictionary *options = NULL; + vector> opts = MUX_OPTS; + for (pair opt : opts) { + av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0); + } + if (avformat_write_header(avctx, &options) < 0) { + fprintf(stderr, "avformat_write_header() failed\n"); + abort(); + } +} + +} // namespace + MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) : httpd(httpd) { @@ -123,7 +186,7 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) // a situation with only one video stream (and possibly one audio stream) // with known width/height, and we don't need the extra functionality it provides. avctx.reset(avformat_alloc_context()); - avctx->oformat = av_guess_format("mp4", nullptr, nullptr); + avctx->oformat = av_guess_format("nut", nullptr, nullptr); uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr); @@ -131,39 +194,12 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) avctx->flags = AVFMT_FLAG_CUSTOM_IO; for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { - AVStream *stream = avformat_new_stream(avctx.get(), nullptr); - if (stream == nullptr) { - fprintf(stderr, "avformat_new_stream() failed\n"); - exit(1); - } - stream->time_base = AVRational{ 1, TIMEBASE }; - stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; - stream->codecpar->codec_id = AV_CODEC_ID_MJPEG; - - // Used for aspect ratio only. Can change without notice (the mux won't care). - stream->codecpar->width = global_flags.width; - stream->codecpar->height = global_flags.height; - - // TODO: We could perhaps use the interpretation for each card here - // (or at least the command-line flags) instead of the defaults, - // but what would we do when they change? - stream->codecpar->color_primaries = AVCOL_PRI_BT709; - stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1; - stream->codecpar->color_space = AVCOL_SPC_BT709; - stream->codecpar->color_range = AVCOL_RANGE_MPEG; - stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT; - stream->codecpar->field_order = AV_FIELD_PROGRESSIVE; + add_video_stream(avctx.get()); } - - AVDictionary *options = NULL; - vector> opts = MUX_OPTS; - for (pair opt : opts) { - av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0); - } - if (avformat_write_header(avctx.get(), &options) < 0) { - fprintf(stderr, "avformat_write_header() failed\n"); - exit(1); + for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { + add_audio_stream(avctx.get()); } + finalize_mux(avctx.get()); // Initialize VA-API. string error; @@ -177,12 +213,26 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) va_receiver_thread = thread(&MJPEGEncoder::va_receiver_thread_func, this); } + global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }}, &metric_mjpeg_frames_zero_size_dropped); + global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }}, &metric_mjpeg_frames_interlaced_dropped); + global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }}, &metric_mjpeg_frames_unsupported_pixel_format_dropped); + global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }}, &metric_mjpeg_frames_oversized_dropped); + global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }}, &metric_mjpeg_overrun_dropped); + global_metrics.add("mjpeg_frames", {{ "status", "submitted" }}, &metric_mjpeg_overrun_submitted); + running = true; } MJPEGEncoder::~MJPEGEncoder() { av_free(avctx->pb->buffer); + + global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }}); + global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }}); + global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }}); + global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }}); + global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }}); + global_metrics.remove("mjpeg_frames", {{ "status", "submitted" }}); } void MJPEGEncoder::stop() @@ -193,6 +243,7 @@ void MJPEGEncoder::stop() running = false; should_quit = true; any_frames_to_be_encoded.notify_all(); + any_frames_encoding.notify_all(); encoder_thread.join(); if (va_dpy != nullptr) { va_receiver_thread.join(); @@ -228,6 +279,7 @@ unique_ptr MJPEGEncoder::try_open_va(const string &va_disp return nullptr; } + // TODO: Unify with the code in Futatabi. int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy); assert(num_formats > 0); @@ -240,38 +292,72 @@ unique_ptr MJPEGEncoder::try_open_va(const string &va_disp return nullptr; } + bool found = false; + for (int i = 0; i < num_formats; ++i) { + if (formats[i].fourcc == VA_FOURCC_UYVY) { + memcpy(&uyvy_format, &formats[i], sizeof(VAImageFormat)); + found = true; + break; + } + } + if (!found) { + if (error != nullptr) *error = "UYVY format not found"; + return nullptr; + } + return va_dpy; } -void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset) +void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector audio, const RGBTriplet &white_balance) { PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata; if (video_format.width == 0 || video_format.height == 0) { + ++metric_mjpeg_frames_zero_size_dropped; return; } if (video_format.interlaced) { fprintf(stderr, "Card %u: Ignoring JPEG encoding for interlaced frame\n", card_index); + ++metric_mjpeg_frames_interlaced_dropped; return; } if (userdata->pixel_format != PixelFormat_8BitYCbCr || !frame->interleaved) { fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index); + ++metric_mjpeg_frames_unsupported_pixel_format_dropped; return; } if (video_format.width > 4096 || video_format.height > 4096) { fprintf(stderr, "Card %u: Ignoring JPEG encoding for oversized frame\n", card_index); + ++metric_mjpeg_frames_oversized_dropped; return; } lock_guard lock(mu); if (frames_to_be_encoded.size() + frames_encoding.size() > 50) { fprintf(stderr, "WARNING: MJPEG encoding doesn't keep up, discarding frame.\n"); + ++metric_mjpeg_overrun_dropped; return; } - frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset }); + ++metric_mjpeg_overrun_submitted; + frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio), white_balance }); any_frames_to_be_encoded.notify_all(); } +int MJPEGEncoder::get_mjpeg_stream_for_card(unsigned card_index) +{ + // Only bother doing MJPEG encoding if there are any connected clients + // that want the stream. + if (httpd->get_num_connected_multicam_clients() == 0) { + return -1; + } + + auto it = global_flags.card_to_mjpeg_stream_export.find(card_index); + if (it == global_flags.card_to_mjpeg_stream_export.end()) { + return -1; + } + return it->second; +} + void MJPEGEncoder::encoder_thread_func() { pthread_setname_np(pthread_self(), "MJPEG_Encode"); @@ -294,9 +380,14 @@ void MJPEGEncoder::encoder_thread_func() // Will call back in the receiver thread. encode_jpeg_va(move(qf)); } else { + // Write audio before video, since Futatabi expects it. + if (qf.audio.size() > 0) { + write_audio_packet(qf.pts, qf.card_index, qf.audio); + } + // Encode synchronously, in the same thread. vector jpeg = encode_jpeg_libjpeg(qf); - write_mjpeg_packet(qf.pts, qf.card_index, jpeg); + write_mjpeg_packet(qf.pts, qf.card_index, jpeg.data(), jpeg.size()); } } @@ -306,20 +397,42 @@ void MJPEGEncoder::encoder_thread_func() free(tmp_cr); } -void MJPEGEncoder::write_mjpeg_packet(int64_t pts, unsigned card_index, const vector &jpeg) +void MJPEGEncoder::write_mjpeg_packet(int64_t pts, unsigned card_index, const uint8_t *jpeg, size_t jpeg_size) { AVPacket pkt; memset(&pkt, 0, sizeof(pkt)); pkt.buf = nullptr; - pkt.data = const_cast(&jpeg[0]); - pkt.size = jpeg.size(); + pkt.data = const_cast(jpeg); + pkt.size = jpeg_size; pkt.stream_index = card_index; pkt.flags = AV_PKT_FLAG_KEY; - pkt.pts = pkt.dts = pts; + AVRational time_base = avctx->streams[pkt.stream_index]->time_base; + pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base); + pkt.duration = 0; + + if (av_write_frame(avctx.get(), &pkt) < 0) { + fprintf(stderr, "av_write_frame() failed\n"); + abort(); + } +} + +void MJPEGEncoder::write_audio_packet(int64_t pts, unsigned card_index, const vector &audio) +{ + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); + pkt.buf = nullptr; + pkt.data = reinterpret_cast(const_cast(&audio[0])); + pkt.size = audio.size() * sizeof(audio[0]); + pkt.stream_index = card_index + global_flags.card_to_mjpeg_stream_export.size(); + pkt.flags = AV_PKT_FLAG_KEY; + AVRational time_base = avctx->streams[pkt.stream_index]->time_base; + pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base); + size_t num_stereo_samples = audio.size() / 2; + pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base); if (av_write_frame(avctx.get(), &pkt) < 0) { fprintf(stderr, "av_write_frame() failed\n"); - exit(1); + abort(); } } @@ -373,13 +486,16 @@ MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigne va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer); CHECK_VASTATUS(va_status, "vaCreateBuffer"); + va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image); + CHECK_VASTATUS(va_status, "vaCreateImage"); + return ret; } void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources) { lock_guard lock(va_resources_mutex); - if (va_resources_freelist.size() > 10) { + if (va_resources_freelist.size() > 50) { auto it = va_resources_freelist.end(); --it; @@ -392,13 +508,34 @@ void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources) va_status = vaDestroySurfaces(va_dpy->va_dpy, &it->surface, 1); CHECK_VASTATUS(va_status, "vaDestroySurfaces"); + va_status = vaDestroyImage(va_dpy->va_dpy, it->image.image_id); + CHECK_VASTATUS(va_status, "vaDestroyImage"); + va_resources_freelist.erase(it); } va_resources_freelist.push_front(resources); } -void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo) +namespace { + +void push16(uint16_t val, string *str) +{ + str->push_back(val >> 8); + str->push_back(val & 0xff); +} + +void push32(uint32_t val, string *str) +{ + str->push_back(val >> 24); + str->push_back((val >> 16) & 0xff); + str->push_back((val >> 8) & 0xff); + str->push_back(val & 0xff); +} + +} // namespace + +void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo) { jpeg_error_mgr jerr; cinfo->err = jpeg_std_error(&jerr); @@ -423,15 +560,70 @@ void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinat cinfo->CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though. jpeg_start_compress(cinfo, true); + if (fabs(white_balance.r - 1.0f) > 1e-3 || + fabs(white_balance.g - 1.0f) > 1e-3 || + fabs(white_balance.b - 1.0f) > 1e-3) { + // Convert from (linear) RGB to XYZ. + Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB); + Vector3d xyz = rgb_to_xyz_matrix * Vector3d(white_balance.r, white_balance.g, white_balance.b); + + // Convert from XYZ to xyz by normalizing. + xyz /= (xyz[0] + xyz[1] + xyz[2]); + + // Create a very rudimentary EXIF header to hold our white point. + string exif; + + // Exif header, followed by some padding. + exif = "Exif"; + push16(0, &exif); + + // TIFF header first: + exif += "MM"; // Big endian. + + // Magic number. + push16(42, &exif); + + // Offset of first IFD (relative to the MM, immediately after the header). + push32(exif.size() - 6 + 4, &exif); + + // Now the actual IFD. + + // One entry. + push16(1, &exif); + + // WhitePoint tag ID. + push16(0x13e, &exif); + + // Rational type. + push16(5, &exif); + + // Two values (x and y; z is implicit due to normalization). + push32(2, &exif); + + // Offset (relative to the MM, immediately after the last IFD). + push32(exif.size() - 6 + 8, &exif); + + // No more IFDs. + push32(0, &exif); + + // The actual values. + push32(lrintf(xyz[0] * 10000.0f), &exif); + push32(10000, &exif); + push32(lrintf(xyz[1] * 10000.0f), &exif); + push32(10000, &exif); + + jpeg_write_marker(cinfo, JPEG_APP0 + 1, (const JOCTET *)exif.data(), exif.size()); + } + // This comment marker is private to FFmpeg. It signals limited Y'CbCr range // (and nothing else). jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601")); } -vector MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo) +vector MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, jpeg_compress_struct *cinfo) { VectorDestinationManager dest; - init_jpeg_422(width, height, &dest, cinfo); + init_jpeg_422(width, height, white_balance, &dest, cinfo); // Make a dummy black image; there's seemingly no other easy way of // making libjpeg outputting all of its headers. @@ -463,7 +655,7 @@ vector MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, j return dest.dest; } -MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height) +MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height, const RGBTriplet &white_balance) { pair key(width, height); if (va_data_for_resolution.count(key)) { @@ -473,7 +665,7 @@ MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, un // Use libjpeg to generate a header and set sane defaults for e.g. // quantization tables. Then do the actual encode with VA-API. jpeg_compress_struct cinfo; - vector jpeg_header = get_jpeg_header(width, height, &cinfo); + vector jpeg_header = get_jpeg_header(width, height, white_balance, &cinfo); // Picture parameters. VAEncPictureParameterBufferJPEG pic_param; @@ -574,13 +766,22 @@ MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, un void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf) { + PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata; unsigned width = qf.video_format.width; unsigned height = qf.video_format.height; - VAResources resources = get_va_resources(width, height); - ReleaseVAResources release(this, resources); + VAResources resources; + ReleaseVAResources release; + if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) { + resources = move(userdata->va_resources); + release = move(userdata->va_resources_release); + } else { + assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC); + resources = get_va_resources(width, height); + release = ReleaseVAResources(this, resources); + } - VAData va_data = get_va_data_for_resolution(width, height); + VAData va_data = get_va_data_for_resolution(width, height, qf.white_balance); va_data.pic_param.coded_buf = resources.data_buffer; VABufferID pic_param_buffer; @@ -603,27 +804,38 @@ void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf) CHECK_VASTATUS(va_status, "vaCreateBuffer"); VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer); - VAImage image; - va_status = vaDeriveImage(va_dpy->va_dpy, resources.surface, &image); - CHECK_VASTATUS(va_status, "vaDeriveImage"); + if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) { + // The pixel data is already put into the image by the caller. + va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf); + CHECK_VASTATUS(va_status, "vaUnmapBuffer"); + } else { + assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC); - // Upload the pixel data. - uint8_t *surface_p = nullptr; - vaMapBuffer(va_dpy->va_dpy, image.buf, (void **)&surface_p); + // Upload the pixel data. + uint8_t *surface_p = nullptr; + vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p); - size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. - size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; + size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. + size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; - { - const uint8_t *src = qf.frame->data_copy + field_start; - uint8_t *dst = (unsigned char *)surface_p + image.offsets[0]; - memcpy_with_pitch(dst, src, qf.video_format.width * 2, image.pitches[0], qf.video_format.height); + { + const uint8_t *src = qf.frame->data_copy + field_start; + uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0]; + memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height); + } + + va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf); + CHECK_VASTATUS(va_status, "vaUnmapBuffer"); } - va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf); - CHECK_VASTATUS(va_status, "vaUnmapBuffer"); - va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id); - CHECK_VASTATUS(va_status, "vaDestroyImage"); + qf.frame->data_copy = nullptr; + + // Seemingly vaPutImage() (which triggers a GPU copy) is much nicer to the + // CPU than vaDeriveImage() and copying directly into the GPU's buffers. + // Exactly why is unclear, but it seems to involve L3 cache usage when there + // are many high-res (1080p+) images in play. + va_status = vaPutImage(va_dpy->va_dpy, resources.surface, resources.image.image_id, 0, 0, width, height, 0, 0, width, height); + CHECK_VASTATUS(va_status, "vaPutImage"); // Finally, stick in the JPEG header. VAEncPackedHeaderParameterBuffer header_parm; @@ -678,6 +890,11 @@ void MJPEGEncoder::va_receiver_thread_func() frames_encoding.pop(); } + // Write audio before video, since Futatabi expects it. + if (qf.audio.size() > 0) { + write_audio_packet(qf.pts, qf.card_index, qf.audio); + } + VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface); CHECK_VASTATUS(va_status, "vaSyncSurface"); @@ -685,13 +902,11 @@ void MJPEGEncoder::va_receiver_thread_func() va_status = vaMapBuffer(va_dpy->va_dpy, qf.resources.data_buffer, (void **)&segment); CHECK_VASTATUS(va_status, "vaMapBuffer"); - const char *coded_buf = reinterpret_cast(segment->buf); - vector jpeg(coded_buf, coded_buf + segment->size); + const uint8_t *coded_buf = reinterpret_cast(segment->buf); + write_mjpeg_packet(qf.pts, qf.card_index, coded_buf, segment->size); va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer); CHECK_VASTATUS(va_status, "vaUnmapBuffer"); - - write_mjpeg_packet(qf.pts, qf.card_index, jpeg); } } @@ -702,7 +917,7 @@ vector MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf) VectorDestinationManager dest; jpeg_compress_struct cinfo; - init_jpeg_422(width, height, &dest, &cinfo); + init_jpeg_422(width, height, qf.white_balance, &dest, &cinfo); size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; @@ -712,7 +927,7 @@ vector MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf) for (unsigned y = 0; y < qf.video_format.height; y += 8) { const uint8_t *src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2; - memcpy_interleaved(tmp_y, tmp_cbcr, src, qf.video_format.width * 8 * 2); + memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2); memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8); for (unsigned yy = 0; yy < 8; ++yy) { yptr[yy] = tmp_y + yy * width;