From: Steinar H. Gunderson Date: Tue, 19 May 2020 21:49:57 +0000 (+0200) Subject: Support MJPEG encoding of planar Y'CbCr sources. X-Git-Tag: 2.0.0~30 X-Git-Url: https://git.sesse.net/?p=nageru;a=commitdiff_plain;h=2f8f882defb23abe8b2c54e195b72c57fba55cd6 Support MJPEG encoding of planar Y'CbCr sources. This enables MJPEG encoding for video sources, but in practice, only SRT cameras are enabled. There's VA-API support for 4:2:0 only, by means of converting to NV12; if you had 4:2:2 or 4:4:4, you will get fallback to libjpeg, which should handle that gracefully. (If I actually had an SRT source doing 4:2:2, I might have added more support, but it seems pretty narrow.) Futatabi seemingly has some problems handling these files, but that should be fixable. --- diff --git a/nageru/mjpeg_encoder.cpp b/nageru/mjpeg_encoder.cpp index f991a4b..3308c32 100644 --- a/nageru/mjpeg_encoder.cpp +++ b/nageru/mjpeg_encoder.cpp @@ -33,10 +33,39 @@ using namespace bmusb; using namespace movit; using namespace std; -static VAImageFormat uyvy_format; +static VAImageFormat uyvy_format, nv12_format; extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height); +// The inverse of memcpy_interleaved(), with (slow) support for pitch. +void interleave_with_pitch(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, size_t src_width, size_t dst_pitch, size_t height) +{ +#if __SSE2__ + if (dst_pitch == src_width * 2 && (src_width * height) % 16 == 0) { + __m128i *dptr = reinterpret_cast<__m128i *>(dst); + const __m128i *sptr1 = reinterpret_cast(src1); + const __m128i *sptr2 = reinterpret_cast(src2); + for (size_t i = 0; i < src_width * height / 16; ++i) { + __m128i data1 = _mm_loadu_si128(sptr1++); + __m128i data2 = _mm_loadu_si128(sptr2++); + _mm_storeu_si128(dptr++, _mm_unpacklo_epi8(data1, data2)); + _mm_storeu_si128(dptr++, _mm_unpackhi_epi8(data1, data2)); + } + return; + } +#endif + + for (size_t y = 0; y < height; ++y) { + uint8_t *dptr = dst + y * dst_pitch; + const uint8_t *sptr1 = src1 + y * src_width; + const uint8_t *sptr2 = src2 + y * src_width; + for (size_t x = 0; x < src_width; ++x) { + *dptr++ = *sptr1++; + *dptr++ = *sptr2++; + } + } +} + // From libjpeg (although it's of course identical between implementations). static const int jpeg_natural_order[DCTSIZE2] = { 0, 1, 8, 16, 9, 2, 3, 10, @@ -193,7 +222,7 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) // Initialize VA-API. string error; - va_dpy = try_open_va(va_display, &error, &config_id); + va_dpy = try_open_va(va_display, &error, &config_id_422, &config_id_420); if (va_dpy == nullptr) { fprintf(stderr, "Could not initialize VA-API for MJPEG encoding: %s. JPEGs will be encoded in software if needed.\n", error.c_str()); } @@ -242,7 +271,7 @@ void MJPEGEncoder::stop() } } -unique_ptr MJPEGEncoder::try_open_va(const string &va_display, string *error, VAConfigID *config_id) +unique_ptr MJPEGEncoder::try_open_va(const string &va_display, string *error, VAConfigID *config_id_422, VAConfigID *config_id_420) { unique_ptr va_dpy = va_open_display(va_display); if (va_dpy == nullptr) { @@ -258,17 +287,33 @@ unique_ptr MJPEGEncoder::try_open_va(const string &va_disp return nullptr; } - VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 }; - va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture, - &attr, 1, config_id); - if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) { - if (error != nullptr) *error = "No hardware support"; - return nullptr; - } else if (va_status != VA_STATUS_SUCCESS) { - char buf[256]; - snprintf(buf, sizeof(buf), "vaCreateConfig() failed with status %d\n", va_status); - if (error != nullptr) *error = buf; - return nullptr; + { + VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 }; + va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture, + &attr, 1, config_id_422); + if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) { + if (error != nullptr) *error = "No 4:2:2 hardware support"; + return nullptr; + } else if (va_status != VA_STATUS_SUCCESS) { + char buf[256]; + snprintf(buf, sizeof(buf), "vaCreateConfig() for 4:2:2 failed with status %d\n", va_status); + if (error != nullptr) *error = buf; + return nullptr; + } + } + { + VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV420 }; + va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture, + &attr, 1, config_id_420); + if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) { + if (error != nullptr) *error = "No 4:2:0 hardware support"; + return nullptr; + } else if (va_status != VA_STATUS_SUCCESS) { + char buf[256]; + snprintf(buf, sizeof(buf), "vaCreateConfig() for 4:2:0 failed with status %d\n", va_status); + if (error != nullptr) *error = buf; + return nullptr; + } } // TODO: Unify with the code in Futatabi. @@ -284,25 +329,49 @@ unique_ptr MJPEGEncoder::try_open_va(const string &va_disp return nullptr; } - bool found = false; + bool uyvy_found = false, nv12_found = false; for (int i = 0; i < num_formats; ++i) { if (formats[i].fourcc == VA_FOURCC_UYVY) { memcpy(&uyvy_format, &formats[i], sizeof(VAImageFormat)); - found = true; - break; + uyvy_found = true; + } + if (formats[i].fourcc == VA_FOURCC_NV12) { + memcpy(&nv12_format, &formats[i], sizeof(VAImageFormat)); + nv12_found = true; } } - if (!found) { + if (!uyvy_found) { if (error != nullptr) *error = "UYVY format not found"; return nullptr; } + if (!nv12_found) { + if (error != nullptr) *error = "NV12 format not found"; + return nullptr; + } return va_dpy; } -void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector audio, const RGBTriplet &white_balance) +namespace { + +bool is_uyvy(RefCountedFrame frame) +{ + PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata; + return userdata->pixel_format == PixelFormat_8BitYCbCr && frame->interleaved; +} + +bool is_i420(RefCountedFrame frame) { PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata; + return userdata->pixel_format == PixelFormat_8BitYCbCrPlanar && + userdata->ycbcr_format.chroma_subsampling_x == 2 && + userdata->ycbcr_format.chroma_subsampling_y == 2; +} + +} // namespace + +void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector audio, const RGBTriplet &white_balance) +{ if (video_format.width == 0 || video_format.height == 0) { ++metric_mjpeg_frames_zero_size_dropped; return; @@ -312,8 +381,7 @@ void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFram ++metric_mjpeg_frames_interlaced_dropped; return; } - if (userdata->pixel_format != PixelFormat_8BitYCbCr || - !frame->interleaved) { + if (!is_uyvy(frame) && !is_i420(frame)) { fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index); ++metric_mjpeg_frames_unsupported_pixel_format_dropped; return; @@ -456,12 +524,12 @@ private: VABufferID buf; }; -MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigned height) +MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigned height, uint32_t fourcc) { { lock_guard lock(va_resources_mutex); for (auto it = va_resources_freelist.begin(); it != va_resources_freelist.end(); ++it) { - if (it->width == width && it->height == height) { + if (it->width == width && it->height == height && it->fourcc == fourcc) { VAResources ret = *it; va_resources_freelist.erase(it); return ret; @@ -473,17 +541,24 @@ MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigne ret.width = width; ret.height = height; + ret.fourcc = fourcc; VASurfaceAttrib attrib; attrib.flags = VA_SURFACE_ATTRIB_SETTABLE; attrib.type = VASurfaceAttribPixelFormat; attrib.value.type = VAGenericValueTypeInteger; - attrib.value.value.i = VA_FOURCC_UYVY; + attrib.value.value.i = fourcc; - VAStatus va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422, - width, height, - &ret.surface, 1, &attrib, 1); - CHECK_VASTATUS(va_status, "vaCreateSurfaces"); + VAStatus va_status; + VAConfigID config_id; + if (fourcc == VA_FOURCC_UYVY) { + va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422, width, height, &ret.surface, 1, &attrib, 1); + config_id = config_id_422; + } else { + assert(fourcc == VA_FOURCC_NV12); + va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV420, width, height, &ret.surface, 1, &attrib, 1); + config_id = config_id_420; + } va_status = vaCreateContext(va_dpy->va_dpy, config_id, width, height, 0, &ret.surface, 1, &ret.context); CHECK_VASTATUS(va_status, "vaCreateContext"); @@ -491,8 +566,14 @@ MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigne va_status = vaCreateBuffer(va_dpy->va_dpy, ret.context, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer); CHECK_VASTATUS(va_status, "vaCreateBuffer"); - va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image); - CHECK_VASTATUS(va_status, "vaCreateImage"); + if (fourcc == VA_FOURCC_UYVY) { + va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image); + CHECK_VASTATUS(va_status, "vaCreateImage"); + } else { + assert(fourcc == VA_FOURCC_NV12); + va_status = vaCreateImage(va_dpy->va_dpy, &nv12_format, width, height, &ret.image); + CHECK_VASTATUS(va_status, "vaCreateImage"); + } return ret; } @@ -540,7 +621,7 @@ void push32(uint32_t val, string *str) } // namespace -void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo) +void MJPEGEncoder::init_jpeg(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo, int y_h_samp_factor, int y_v_samp_factor) { jpeg_error_mgr jerr; cinfo->err = jpeg_std_error(&jerr); @@ -556,8 +637,8 @@ void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTripl cinfo->image_height = height; cinfo->raw_data_in = true; jpeg_set_colorspace(cinfo, JCS_YCbCr); - cinfo->comp_info[0].h_samp_factor = 2; - cinfo->comp_info[0].v_samp_factor = 1; + cinfo->comp_info[0].h_samp_factor = y_h_samp_factor; + cinfo->comp_info[0].v_samp_factor = y_v_samp_factor; cinfo->comp_info[1].h_samp_factor = 1; cinfo->comp_info[1].v_samp_factor = 1; cinfo->comp_info[2].h_samp_factor = 1; @@ -625,25 +706,31 @@ void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTripl jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601")); } -vector MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, jpeg_compress_struct *cinfo) +vector MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, int y_h_samp_factor, int y_v_samp_factor, jpeg_compress_struct *cinfo) { VectorDestinationManager dest; - init_jpeg_422(width, height, white_balance, &dest, cinfo); + init_jpeg(width, height, white_balance, &dest, cinfo, y_h_samp_factor, y_v_samp_factor); // Make a dummy black image; there's seemingly no other easy way of // making libjpeg outputting all of its headers. - JSAMPROW yptr[8], cbptr[8], crptr[8]; + assert(y_v_samp_factor <= 2); // Or we'd need larger JSAMPROW arrays below. + size_t block_height_y = 8 * y_v_samp_factor; + size_t block_height_cbcr = 8; + + JSAMPROW yptr[16], cbptr[16], crptr[16]; JSAMPARRAY data[3] = { yptr, cbptr, crptr }; memset(tmp_y, 0, 4096); memset(tmp_cb, 0, 4096); memset(tmp_cr, 0, 4096); - for (unsigned yy = 0; yy < 8; ++yy) { + for (unsigned yy = 0; yy < block_height_y; ++yy) { yptr[yy] = tmp_y; + } + for (unsigned yy = 0; yy < block_height_cbcr; ++yy) { cbptr[yy] = tmp_cb; crptr[yy] = tmp_cr; } - for (unsigned y = 0; y < height; y += 8) { - jpeg_write_raw_data(cinfo, data, /*num_lines=*/8); + for (unsigned y = 0; y < height; y += block_height_y) { + jpeg_write_raw_data(cinfo, data, block_height_y); } jpeg_finish_compress(cinfo); @@ -660,9 +747,9 @@ vector MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, c return dest.dest; } -MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, unsigned height, const RGBTriplet &white_balance) +MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, unsigned height, unsigned y_h_samp_factor, unsigned y_v_samp_factor, const RGBTriplet &white_balance) { - VAKey key{width, height, white_balance}; + VAKey key{width, height, y_h_samp_factor, y_v_samp_factor, white_balance}; if (va_data_for_parameters.count(key)) { return va_data_for_parameters[key]; } @@ -670,7 +757,7 @@ MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, un // Use libjpeg to generate a header and set sane defaults for e.g. // quantization tables. Then do the actual encode with VA-API. jpeg_compress_struct cinfo; - vector jpeg_header = get_jpeg_header(width, height, white_balance, &cinfo); + vector jpeg_header = get_jpeg_header(width, height, white_balance, y_h_samp_factor, y_v_samp_factor, &cinfo); // Picture parameters. VAEncPictureParameterBufferJPEG pic_param; @@ -778,15 +865,34 @@ void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf) VAResources resources; ReleaseVAResources release; if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) { + assert(is_uyvy(qf.frame)); resources = move(userdata->va_resources); release = move(userdata->va_resources_release); } else { assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC); - resources = get_va_resources(width, height); + if (is_uyvy(qf.frame)) { + resources = get_va_resources(width, height, VA_FOURCC_UYVY); + } else { + assert(is_i420(qf.frame)); + // We'd prefer VA_FOURCC_I420, but it's not supported by Intel's driver. + resources = get_va_resources(width, height, VA_FOURCC_NV12); + } release = ReleaseVAResources(this, resources); } - VAData va_data = get_va_data_for_parameters(width, height, qf.white_balance); + int y_h_samp_factor, y_v_samp_factor; + if (is_uyvy(qf.frame)) { + // 4:2:2 (sample Y' twice as often vertically as Cb or Cr). + y_h_samp_factor = 1; + y_v_samp_factor = 2; + } else { + // 4:2:0 (sample Y' twice as often as Cb or Cr, in both directions) + assert(is_i420(qf.frame)); + y_h_samp_factor = 2; + y_v_samp_factor = 2; + } + + VAData va_data = get_va_data_for_parameters(width, height, y_h_samp_factor, y_v_samp_factor, qf.white_balance); va_data.pic_param.coded_buf = resources.data_buffer; VABufferID pic_param_buffer; @@ -820,13 +926,27 @@ void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf) uint8_t *surface_p = nullptr; vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p); - size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. - size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; + if (is_uyvy(qf.frame)) { + size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. + size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; - { const uint8_t *src = qf.frame->data_copy + field_start; uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0]; memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height); + } else { + assert(is_i420(qf.frame)); + assert(!qf.frame->interleaved); // Makes no sense for I420. + + size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. + const uint8_t *y_src = qf.frame->data + qf.video_format.width * field_start_line; + const uint8_t *cb_src = y_src + width * height; + const uint8_t *cr_src = cb_src + (width / 2) * (height / 2); + + uint8_t *y_dst = (unsigned char *)surface_p + resources.image.offsets[0]; + uint8_t *cbcr_dst = (unsigned char *)surface_p + resources.image.offsets[1]; + + memcpy_with_pitch(y_dst, y_src, qf.video_format.width, resources.image.pitches[0], qf.video_format.height); + interleave_with_pitch(cbcr_dst, cb_src, cr_src, qf.video_format.width / 2, resources.image.pitches[1], qf.video_format.height / 2); } va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf); @@ -938,24 +1058,59 @@ vector MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf) VectorDestinationManager dest; jpeg_compress_struct cinfo; - init_jpeg_422(width, height, qf.white_balance, &dest, &cinfo); size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. - size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; - JSAMPROW yptr[8], cbptr[8], crptr[8]; - JSAMPARRAY data[3] = { yptr, cbptr, crptr }; - for (unsigned y = 0; y < qf.video_format.height; y += 8) { - const uint8_t *src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2; - - memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2); - memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8); - for (unsigned yy = 0; yy < 8; ++yy) { - yptr[yy] = tmp_y + yy * width; - cbptr[yy] = tmp_cb + yy * width / 2; - crptr[yy] = tmp_cr + yy * width / 2; + PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata; + if (userdata->pixel_format == PixelFormat_8BitYCbCr) { + init_jpeg(width, height, qf.white_balance, &dest, &cinfo, /*y_h_samp_factor=*/2, /*y_v_samp_factor=*/1); + + assert(qf.frame->interleaved); + size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2; + + JSAMPROW yptr[8], cbptr[8], crptr[8]; + JSAMPARRAY data[3] = { yptr, cbptr, crptr }; + for (unsigned y = 0; y < qf.video_format.height; y += 8) { + const uint8_t *src; + src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2; + + memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2); + memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8); + for (unsigned yy = 0; yy < 8; ++yy) { + yptr[yy] = tmp_y + yy * width; + cbptr[yy] = tmp_cb + yy * width / 2; + crptr[yy] = tmp_cr + yy * width / 2; + } + jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8); + } + } else { + assert(userdata->pixel_format == PixelFormat_8BitYCbCrPlanar); + + const movit::YCbCrFormat &ycbcr = userdata->ycbcr_format; + init_jpeg(width, height, qf.white_balance, &dest, &cinfo, ycbcr.chroma_subsampling_x, ycbcr.chroma_subsampling_y); + assert(ycbcr.chroma_subsampling_y <= 2); // Or we'd need larger JSAMPROW arrays below. + + size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support. + const uint8_t *y_start = qf.frame->data + qf.video_format.width * field_start_line; + const uint8_t *cb_start = y_start + width * height; + const uint8_t *cr_start = cb_start + (width / ycbcr.chroma_subsampling_x) * (height / ycbcr.chroma_subsampling_y); + + size_t block_height_y = 8 * ycbcr.chroma_subsampling_y; + size_t block_height_cbcr = 8; + + JSAMPROW yptr[16], cbptr[16], crptr[16]; + JSAMPARRAY data[3] = { yptr, cbptr, crptr }; + for (unsigned y = 0; y < qf.video_format.height; y += block_height_y) { + for (unsigned yy = 0; yy < block_height_y; ++yy) { + yptr[yy] = const_cast(y_start) + (y + yy) * width; + } + unsigned cbcr_y = y / ycbcr.chroma_subsampling_y; + for (unsigned yy = 0; yy < block_height_cbcr; ++yy) { + cbptr[yy] = const_cast(cb_start) + (cbcr_y + yy) * width / ycbcr.chroma_subsampling_x; + crptr[yy] = const_cast(cr_start) + (cbcr_y + yy) * width / ycbcr.chroma_subsampling_x; + } + jpeg_write_raw_data(&cinfo, data, block_height_y); } - jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8); } jpeg_finish_compress(&cinfo); diff --git a/nageru/mjpeg_encoder.h b/nageru/mjpeg_encoder.h index 87bf3b5..93394db 100644 --- a/nageru/mjpeg_encoder.h +++ b/nageru/mjpeg_encoder.h @@ -50,6 +50,7 @@ private: struct VAResources { unsigned width, height; + uint32_t fourcc; VASurfaceID surface; VAContextID context; VABufferID data_buffer; @@ -119,8 +120,8 @@ private: std::vector encode_jpeg_libjpeg(const QueuedFrame &qf); void write_mjpeg_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const uint8_t *jpeg, size_t jpeg_size); void write_audio_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const std::vector &audio); - void init_jpeg_422(unsigned width, unsigned height, const movit::RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo); - std::vector get_jpeg_header(unsigned width, unsigned height, const movit::RGBTriplet &white_balance, jpeg_compress_struct *cinfo); + void init_jpeg(unsigned width, unsigned height, const movit::RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo, int y_h_samp_factor, int y_v_samp_factor); + std::vector get_jpeg_header(unsigned width, unsigned height, const movit::RGBTriplet &white_balance, int y_h_samp_factor, int y_v_samp_factor, jpeg_compress_struct *cinfo); void add_stream(HTTPD::StreamID stream_id); // Can only be called from the constructor, or the thread owning . void update_siphon_streams(); // Same. void create_ffmpeg_context(HTTPD::StreamID stream_id); @@ -152,10 +153,10 @@ private: bool running = false; std::unique_ptr va_dpy; - VAConfigID config_id; + VAConfigID config_id_422, config_id_420; struct VAKey { - unsigned width, height; + unsigned width, height, y_h_samp_factor, y_v_samp_factor; movit::RGBTriplet white_balance; bool operator< (const VAKey &other) const { @@ -163,6 +164,10 @@ private: return width < other.width; if (height != other.height) return height < other.height; + if (y_h_samp_factor != other.y_h_samp_factor) + return y_h_samp_factor < other.y_h_samp_factor; + if (y_v_samp_factor != other.y_v_samp_factor) + return y_v_samp_factor < other.y_v_samp_factor; if (white_balance.r != other.white_balance.r) return white_balance.r < other.white_balance.r; if (white_balance.g != other.white_balance.g) @@ -178,14 +183,14 @@ private: VAEncSliceParameterBufferJPEG parms; }; std::map va_data_for_parameters; - VAData get_va_data_for_parameters(unsigned width, unsigned height, const movit::RGBTriplet &white_balance); + VAData get_va_data_for_parameters(unsigned width, unsigned height, unsigned y_h_samp_factor, unsigned y_v_samp_factor, const movit::RGBTriplet &white_balance); std::list va_resources_freelist; std::mutex va_resources_mutex; - VAResources get_va_resources(unsigned width, unsigned height); + VAResources get_va_resources(unsigned width, unsigned height, uint32_t fourcc); void release_va_resources(VAResources resources); - static std::unique_ptr try_open_va(const std::string &va_display, std::string *error, VAConfigID *config_id); + static std::unique_ptr try_open_va(const std::string &va_display, std::string *error, VAConfigID *config_id_422, VAConfigID *config_id_420); uint8_t *tmp_y, *tmp_cbcr, *tmp_cb, *tmp_cr; // Private to the encoder thread. Used by the libjpeg backend only. diff --git a/nageru/pbo_frame_allocator.cpp b/nageru/pbo_frame_allocator.cpp index 1000a72..ba53226 100644 --- a/nageru/pbo_frame_allocator.cpp +++ b/nageru/pbo_frame_allocator.cpp @@ -328,7 +328,7 @@ bmusb::FrameAllocator::Frame PBOFrameAllocator::create_frame(size_t width, size_ mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) { if (mjpeg_encoder->using_vaapi()) { VADisplay va_dpy = mjpeg_encoder->va_dpy->va_dpy; - MJPEGEncoder::VAResources resources = mjpeg_encoder->get_va_resources(width, height); + MJPEGEncoder::VAResources resources = mjpeg_encoder->get_va_resources(width, height, VA_FOURCC_UYVY); // Only used by DeckLinkCapture, so always 4:2:2. MJPEGEncoder::ReleaseVAResources release(mjpeg_encoder, resources); if (resources.image.pitches[0] == stride) {