X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=video_stream.cpp;h=aa93dc7a0a02499a8e4555bb655adae6bfa1ad0a;hb=3517cb889c4e1d348033bc6aeeeaa0189296827d;hp=2951ff10ced84b0ae8f9fa735f930b7ee4075032;hpb=1da9736ce17a74b652d3d203f92dd8129f154ae5;p=nageru diff --git a/video_stream.cpp b/video_stream.cpp index 2951ff1..aa93dc7 100644 --- a/video_stream.cpp +++ b/video_stream.cpp @@ -8,6 +8,7 @@ extern "C" { #include #include +#include "chroma_subsampler.h" #include "context.h" #include "flow.h" #include "httpd.h" @@ -98,7 +99,7 @@ struct VectorDestinationManager { }; static_assert(std::is_standard_layout::value, ""); -vector encode_jpeg(const uint8_t *pixel_data, unsigned width, unsigned height) +vector encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height) { VectorDestinationManager dest; @@ -116,28 +117,27 @@ vector encode_jpeg(const uint8_t *pixel_data, unsigned width, unsigned cinfo.image_width = width; cinfo.image_height = height; - cinfo.input_components = 3; + cinfo.raw_data_in = true; + jpeg_set_colorspace(&cinfo, JCS_YCbCr); cinfo.comp_info[0].h_samp_factor = 2; cinfo.comp_info[0].v_samp_factor = 1; cinfo.comp_info[1].h_samp_factor = 1; cinfo.comp_info[1].v_samp_factor = 1; cinfo.comp_info[2].h_samp_factor = 1; cinfo.comp_info[2].v_samp_factor = 1; - // cinfo.CCIR601_sampling = true; // TODO: Subsample ourselves. + cinfo.CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though. jpeg_start_compress(&cinfo, true); - unique_ptr row(new uint8_t[width * 3]); - JSAMPROW row_pointer[1] = { row.get() }; - for (unsigned y = 0; y < height; ++y) { - const uint8_t *sptr = &pixel_data[(height - cinfo.next_scanline - 1) * width * 4]; - uint8_t *dptr = row.get(); - for (unsigned x = 0; x < width; ++x) { - *dptr++ = *sptr++; - *dptr++ = *sptr++; - *dptr++ = *sptr++; - ++sptr; + JSAMPROW yptr[8], cbptr[8], crptr[8]; + JSAMPARRAY data[3] = { yptr, cbptr, crptr }; + for (unsigned y = 0; y < height; y += 8) { + for (unsigned yy = 0; yy < 8; ++yy) { + yptr[yy] = const_cast(&y_data[(height - y - yy - 1) * width]); + cbptr[yy] = const_cast(&cb_data[(height - y - yy - 1) * width/2]); + crptr[yy] = const_cast(&cr_data[(height - y - yy - 1) * width/2]); } - (void) jpeg_write_scanlines(&cinfo, row_pointer, 1); + + jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8); } jpeg_finish_compress(&cinfo); @@ -149,11 +149,11 @@ vector encode_jpeg(const uint8_t *pixel_data, unsigned width, unsigned VideoStream::VideoStream() { using namespace movit; - // TODO: deduplicate code against JPEGFrameView? - ycbcr_convert_chain.reset(new EffectChain(1280, 720)); - ImageFormat image_format; - image_format.color_space = COLORSPACE_sRGB; - image_format.gamma_curve = GAMMA_sRGB; + + ImageFormat inout_format; + inout_format.color_space = COLORSPACE_sRGB; + inout_format.gamma_curve = GAMMA_sRGB; + ycbcr_format.luma_coefficients = YCBCR_REC_709; ycbcr_format.full_range = true; // JPEG. ycbcr_format.num_levels = 256; @@ -163,23 +163,39 @@ VideoStream::VideoStream() ycbcr_format.cb_y_position = 0.5f; // Irrelevant. ycbcr_format.cr_x_position = 0.0f; ycbcr_format.cr_y_position = 0.5f; - ycbcr_input = (movit::YCbCrInput *)ycbcr_convert_chain->add_input(new YCbCrInput(image_format, ycbcr_format, 1280, 720)); - ImageFormat inout_format; - inout_format.color_space = COLORSPACE_sRGB; - inout_format.gamma_curve = GAMMA_sRGB; - - check_error(); - ycbcr_convert_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED); - check_error(); - ycbcr_convert_chain->set_dither_bits(8); - check_error(); - ycbcr_convert_chain->finalize(); - check_error(); + YCbCrFormat ycbcr_output_format = ycbcr_format; + ycbcr_output_format.chroma_subsampling_x = 1; - GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots]; + // TODO: deduplicate code against JPEGFrameView? + ycbcr_planar_convert_chain.reset(new EffectChain(1280, 720)); + ycbcr_planar_input = (movit::YCbCrInput *)ycbcr_planar_convert_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_PLANAR)); + + // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the + // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way + // of getting the gray data into a layered texture. + ycbcr_planar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format); + ycbcr_planar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format); + ycbcr_planar_convert_chain->set_dither_bits(8); + ycbcr_planar_convert_chain->finalize(); + + // Same, for semiplanar inputs. + ycbcr_semiplanar_convert_chain.reset(new EffectChain(1280, 720)); + ycbcr_semiplanar_input = (movit::YCbCrInput *)ycbcr_semiplanar_convert_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_SPLIT_Y_AND_CBCR)); + + // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the + // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way + // of getting the gray data into a layered texture. + ycbcr_semiplanar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format); + ycbcr_semiplanar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format); + ycbcr_semiplanar_convert_chain->set_dither_bits(8); + ycbcr_semiplanar_convert_chain->finalize(); + + GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots], cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots]; glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex); glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex); + glCreateTextures(GL_TEXTURE_2D, 10, cb_tex); + glCreateTextures(GL_TEXTURE_2D, 10, cr_tex); check_error(); constexpr size_t width = 1280, height = 720; // FIXME: adjustable width, height int levels = find_num_levels(width, height); @@ -188,22 +204,32 @@ VideoStream::VideoStream() check_error(); glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2); check_error(); + glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height); + check_error(); + glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height); + check_error(); InterpolatedFrameResources resource; resource.input_tex = input_tex[i]; resource.gray_tex = gray_tex[i]; + resource.cb_tex = cb_tex[i]; + resource.cr_tex = cr_tex[i]; glCreateFramebuffers(2, resource.input_fbos); check_error(); glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0); check_error(); + glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0); + check_error(); glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1); check_error(); + glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1); + check_error(); - GLuint buf = GL_COLOR_ATTACHMENT0; - glNamedFramebufferDrawBuffers(resource.input_fbos[0], 1, &buf); + GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 }; + glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs); check_error(); - glNamedFramebufferDrawBuffers(resource.input_fbos[1], 1, &buf); + glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs); check_error(); glCreateBuffers(1, &resource.pbo); @@ -216,9 +242,9 @@ VideoStream::VideoStream() check_error(); - compute_flow.reset(new DISComputeFlow(width, height, operating_point3)); - gray.reset(new GrayscaleConversion); // NOTE: Must come after DISComputeFlow, since it sets up the VBO! - interpolate.reset(new Interpolate(width, height, operating_point3)); + compute_flow.reset(new DISComputeFlow(width, height, operating_point2)); + interpolate.reset(new Interpolate(width, height, operating_point2, /*split_ycbcr_output=*/true)); + chroma_subsampler.reset(new ChromaSubsampler); check_error(); } @@ -278,6 +304,7 @@ void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned strea unique_lock lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); + JPEGFrameView::insert_interpolated_frame(stream_idx, output_pts, nullptr); return; } resources = interpolate_resources.front(); @@ -298,36 +325,49 @@ void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned strea JPEGID jpeg_id; jpeg_id.stream_idx = stream_idx; jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts; + jpeg_id.interpolated = false; bool did_decode; shared_ptr frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode); ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x; ycbcr_format.chroma_subsampling_y = frame->chroma_subsampling_y; - ycbcr_input->change_ycbcr_format(ycbcr_format); - ycbcr_input->set_width(frame->width); - ycbcr_input->set_height(frame->height); - ycbcr_input->set_pixel_data(0, frame->y.get()); - ycbcr_input->set_pixel_data(1, frame->cb.get()); - ycbcr_input->set_pixel_data(2, frame->cr.get()); - ycbcr_input->set_pitch(0, frame->pitch_y); - ycbcr_input->set_pitch(1, frame->pitch_chroma); - ycbcr_input->set_pitch(2, frame->pitch_chroma); - ycbcr_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720); + + if (frame->is_semiplanar) { + ycbcr_semiplanar_input->change_ycbcr_format(ycbcr_format); + ycbcr_semiplanar_input->set_width(frame->width); + ycbcr_semiplanar_input->set_height(frame->height); + ycbcr_semiplanar_input->set_pixel_data(0, frame->y.get()); + ycbcr_semiplanar_input->set_pixel_data(1, frame->cbcr.get()); + ycbcr_semiplanar_input->set_pitch(0, frame->pitch_y); + ycbcr_semiplanar_input->set_pitch(1, frame->pitch_chroma); + ycbcr_semiplanar_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720); + } else { + ycbcr_planar_input->change_ycbcr_format(ycbcr_format); + ycbcr_planar_input->set_width(frame->width); + ycbcr_planar_input->set_height(frame->height); + ycbcr_planar_input->set_pixel_data(0, frame->y.get()); + ycbcr_planar_input->set_pixel_data(1, frame->cb.get()); + ycbcr_planar_input->set_pixel_data(2, frame->cr.get()); + ycbcr_planar_input->set_pitch(0, frame->pitch_y); + ycbcr_planar_input->set_pitch(1, frame->pitch_chroma); + ycbcr_planar_input->set_pitch(2, frame->pitch_chroma); + ycbcr_planar_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720); + } } glGenerateTextureMipmap(resources.input_tex); - - // Compute the interpolated frame. - check_error(); - gray->exec(resources.input_tex, resources.gray_tex, 1280, 720, /*num_layers=*/2); check_error(); glGenerateTextureMipmap(resources.gray_tex); check_error(); + + // Compute the interpolated frame. qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); check_error(); - - qf.output_tex = interpolate->exec(resources.input_tex, qf.flow_tex, 1280, 720, alpha); + tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha); check_error(); + // Subsample and split Cb/Cr. + chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources.cb_tex, resources.cr_tex); + // We could have released qf.flow_tex here, but to make sure we don't cause a stall // when trying to reuse it for the next frame, we can just as well hold on to it // and release it only when the readback is done. @@ -336,7 +376,11 @@ void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned strea glPixelStorei(GL_PACK_ROW_LENGTH, 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo); check_error(); - glGetTextureImage(qf.output_tex, 0, GL_RGBA, GL_UNSIGNED_BYTE, 1280 * 720 * 4, nullptr); + glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + check_error(); + glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720)); + check_error(); + glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720)); check_error(); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); @@ -385,9 +429,34 @@ void VideoStream::encode_thread_func() } else if (qf.type == QueuedFrame::INTERPOLATED) { glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); - vector jpeg = encode_jpeg((const uint8_t *)qf.resources.pbo_contents, 1280, 720); + const uint8_t *y = (const uint8_t *)qf.resources.pbo_contents; + const uint8_t *cb = (const uint8_t *)qf.resources.pbo_contents + 1280 * 720; + const uint8_t *cr = (const uint8_t *)qf.resources.pbo_contents + 1280 * 720 + 640 * 720; + + // Send a copy of the frame on to display. + shared_ptr frame(new Frame); + frame->y.reset(new uint8_t[1280 * 720]); + frame->cb.reset(new uint8_t[640 * 720]); + frame->cr.reset(new uint8_t[640 * 720]); + for (unsigned yy = 0; yy < 720; ++yy) { + memcpy(frame->y.get() + 1280 * yy, y + 1280 * (719 - yy), 1280); + memcpy(frame->cb.get() + 640 * yy, cb + 640 * (719 - yy), 640); + memcpy(frame->cr.get() + 640 * yy, cr + 640 * (719 - yy), 640); + } + frame->is_semiplanar = false; + frame->width = 1280; + frame->height = 720; + frame->chroma_subsampling_x = 2; + frame->chroma_subsampling_y = 1; + frame->pitch_y = 1280; + frame->pitch_chroma = 640; + JPEGFrameView::insert_interpolated_frame(qf.stream_idx, qf.output_pts, std::move(frame)); + + // Now JPEG encode it, and send it on to the stream. + vector jpeg = encode_jpeg(y, cb, cr, 1280, 720); compute_flow->release_texture(qf.flow_tex); interpolate->release_texture(qf.output_tex); + interpolate->release_texture(qf.cbcr_tex); AVPacket pkt; av_init_packet(&pkt);