From: Steinar H. Gunderson Date: Mon, 10 Dec 2018 21:13:12 +0000 (+0100) Subject: Make width/height configurable in Futatabi. X-Git-Tag: 1.8.0~35 X-Git-Url: https://git.sesse.net/?p=nageru;a=commitdiff_plain;h=52336c086b8bc355b55e2046e3a055b1b4c70ef7 Make width/height configurable in Futatabi. --- diff --git a/futatabi/flags.cpp b/futatabi/flags.cpp index 4c75370..0de7b02 100644 --- a/futatabi/flags.cpp +++ b/futatabi/flags.cpp @@ -23,6 +23,8 @@ void usage() fprintf(stderr, "Usage: futatabi [OPTION]... SOURCE_URL\n"); fprintf(stderr, "\n"); fprintf(stderr, " --help print usage information\n"); + fprintf(stderr, " -w, --width output width in pixels (default 1280)\n"); + fprintf(stderr, " -h, --height output height in pixels (default 720)\n"); fprintf(stderr, " --slow-down-input slow down input to realtime (default on if no\n"); fprintf(stderr, " source URL given)\n"); fprintf(stderr, " -q, --interpolation-quality N 1 = fastest\n"); @@ -37,6 +39,8 @@ void parse_flags(int argc, char * const argv[]) { static const option long_options[] = { { "help", no_argument, 0, OPTION_HELP }, + { "width", required_argument, 0, 'w' }, + { "height", required_argument, 0, 'h' }, { "slow-down-input", no_argument, 0, OPTION_SLOW_DOWN_INPUT }, { "interpolation-quality", required_argument, 0, 'q' }, { "working-directory", required_argument, 0, 'd' }, @@ -45,12 +49,18 @@ void parse_flags(int argc, char * const argv[]) }; for ( ;; ) { int option_index = 0; - int c = getopt_long(argc, argv, "q:d:", long_options, &option_index); + int c = getopt_long(argc, argv, "w:h:q:d:", long_options, &option_index); if (c == -1) { break; } switch (c) { + case 'w': + global_flags.width = atoi(optarg); + break; + case 'h': + global_flags.height = atoi(optarg); + break; case OPTION_SLOW_DOWN_INPUT: global_flags.slow_down_input = true; break; diff --git a/futatabi/flags.h b/futatabi/flags.h index 5e9d34b..3950936 100644 --- a/futatabi/flags.h +++ b/futatabi/flags.h @@ -6,6 +6,7 @@ #include "defs.h" struct Flags { + int width = 1280, height = 720; std::string stream_source; std::string working_directory = "."; bool slow_down_input = false; diff --git a/futatabi/jpeg_frame_view.cpp b/futatabi/jpeg_frame_view.cpp index a9cf11c..059a341 100644 --- a/futatabi/jpeg_frame_view.cpp +++ b/futatabi/jpeg_frame_view.cpp @@ -1,6 +1,7 @@ #include "jpeg_frame_view.h" #include "defs.h" +#include "flags.h" #include "jpeg_destroyer.h" #include "jpeglib_error_wrapper.h" #include "shared/post_to_main_thread.h" @@ -474,15 +475,15 @@ shared_ptr get_black_frame() static once_flag flag; call_once(flag, [] { black_frame.reset(new Frame); - black_frame->y.reset(new uint8_t[1280 * 720]); - black_frame->cb.reset(new uint8_t[(1280 / 2) * (720 / 2)]); - black_frame->cr.reset(new uint8_t[(1280 / 2) * (720 / 2)]); - black_frame->width = 1280; - black_frame->height = 720; + black_frame->y.reset(new uint8_t[global_flags.width * global_flags.height]); + black_frame->cb.reset(new uint8_t[(global_flags.width / 2) * (global_flags.height / 2)]); + black_frame->cr.reset(new uint8_t[(global_flags.width / 2) * (global_flags.height / 2)]); + black_frame->width = global_flags.width; + black_frame->height = global_flags.height; black_frame->chroma_subsampling_x = 2; black_frame->chroma_subsampling_y = 2; - black_frame->pitch_y = 1280; - black_frame->pitch_chroma = 1280 / 2; + black_frame->pitch_y = global_flags.width; + black_frame->pitch_chroma = global_flags.width / 2; }); return black_frame; } diff --git a/futatabi/video_stream.cpp b/futatabi/video_stream.cpp index 87e843a..2f8f884 100644 --- a/futatabi/video_stream.cpp +++ b/futatabi/video_stream.cpp @@ -146,7 +146,7 @@ VideoStream::VideoStream() glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex); check_error(); - constexpr size_t width = 1280, height = 720; // FIXME: adjustable width, height + size_t width = global_flags.width, height = global_flags.height; int levels = find_num_levels(width, height); for (size_t i = 0; i < num_interpolate_slots; ++i) { glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2); @@ -226,11 +226,11 @@ VideoStream::VideoStream() check_error(); // The “last frame” is initially black. - unique_ptr y(new uint8_t[1280 * 720]); - unique_ptr cb_or_cr(new uint8_t[640 * 720]); - memset(y.get(), 16, 1280 * 720); - memset(cb_or_cr.get(), 128, 640 * 720); - last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720); + unique_ptr y(new uint8_t[global_flags.width * global_flags.height]); + unique_ptr cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]); + memset(y.get(), 16, global_flags.width * global_flags.height); + memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height); + last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height); } VideoStream::~VideoStream() {} @@ -256,12 +256,11 @@ void VideoStream::start() string video_extradata; - constexpr int width = 1280, height = 720; // Doesn't matter for MJPEG. + size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG. stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {})); - encode_thread = thread(&VideoStream::encode_thread_func, this); } @@ -347,7 +346,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 shared_ptr frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); shared_ptr frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); - ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720); + ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height); QueuedFrame qf; qf.local_pts = local_pts; @@ -360,17 +359,17 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 qf.secondary_frame = frame2_spec; // Subsample and split Cb/Cr. - chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex); + chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex); // Read it down (asynchronously) to the CPU. glPixelStorei(GL_PACK_ROW_LENGTH, 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo); check_error(); - glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0)); check_error(); - glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720)); + glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height)); check_error(); - glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720)); + glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height)); check_error(); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); @@ -425,7 +424,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1; bool did_decode; shared_ptr frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); - ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720); + ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height); } glGenerateTextureMipmap(resources->input_tex); @@ -439,7 +438,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts if (secondary_frame.pts != -1) { // Fade. First kick off the interpolation. - tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha); + tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Now decode the image we are fading against. @@ -447,18 +446,18 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts shared_ptr frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); // Then fade against it, putting it into the fade Y' and CbCr textures. - ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, 1280, 720, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720); + ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height); // Subsample and split Cb/Cr. - chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex); + chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex); interpolate_no_split->release_texture(qf.output_tex); } else { - tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha); + tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Subsample and split Cb/Cr. - chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex); + chroma_subsampler->subsample_chroma(qf.cbcr_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex); } // We could have released qf.flow_tex here, but to make sure we don't cause a stall @@ -470,14 +469,14 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo); check_error(); if (secondary_frame.pts != -1) { - glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0)); } else { - glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0)); } check_error(); - glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720)); + glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height)); check_error(); - glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720)); + glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height)); check_error(); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); @@ -588,10 +587,10 @@ void VideoStream::encode_thread_func() } else if (qf.type == QueuedFrame::FADED) { glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); - shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720); + shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height); // Now JPEG encode it, and send it on to the stream. - vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720); + vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); AVPacket pkt; av_init_packet(&pkt); @@ -604,13 +603,13 @@ void VideoStream::encode_thread_func() glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); // Send it on to display. - shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720); + shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height); if (qf.display_decoded_func != nullptr) { qf.display_decoded_func(frame); } // Now JPEG encode it, and send it on to the stream. - vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720); + vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); compute_flow->release_texture(qf.flow_tex); if (qf.type != QueuedFrame::FADED_INTERPOLATED) { interpolate->release_texture(qf.output_tex); diff --git a/futatabi/ycbcr_converter.cpp b/futatabi/ycbcr_converter.cpp index a8a40b1..689de5c 100644 --- a/futatabi/ycbcr_converter.cpp +++ b/futatabi/ycbcr_converter.cpp @@ -1,5 +1,6 @@ #include "ycbcr_converter.h" +#include "flags.h" #include "jpeg_frame.h" #include @@ -63,15 +64,15 @@ YCbCrConverter::YCbCrConverter(YCbCrConverter::OutputMode output_mode, ResourceP ycbcr_output_format.chroma_subsampling_x = 1; // Planar Y'CbCr decoding chain. - planar_chain.reset(new EffectChain(1280, 720, resource_pool)); - ycbcr_planar_input = (YCbCrInput *)planar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_PLANAR)); + planar_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool)); + ycbcr_planar_input = (YCbCrInput *)planar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_PLANAR)); setup_outputs(output_mode, inout_format, ycbcr_output_format, planar_chain.get()); planar_chain->set_dither_bits(8); planar_chain->finalize(); // Semiplanar Y'CbCr decoding chain (for images coming from VA-API). - semiplanar_chain.reset(new EffectChain(1280, 720, resource_pool)); - ycbcr_semiplanar_input = (YCbCrInput *)semiplanar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_SPLIT_Y_AND_CBCR)); + semiplanar_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool)); + ycbcr_semiplanar_input = (YCbCrInput *)semiplanar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_SPLIT_Y_AND_CBCR)); setup_outputs(output_mode, inout_format, ycbcr_output_format, semiplanar_chain.get()); semiplanar_chain->set_dither_bits(8); semiplanar_chain->finalize(); @@ -80,12 +81,12 @@ YCbCrConverter::YCbCrConverter(YCbCrConverter::OutputMode output_mode, ResourceP for (bool first_input_is_semiplanar : { false, true }) { for (bool second_input_is_semiplanar : { false, true }) { FadeChain &fade_chain = fade_chains[first_input_is_semiplanar][second_input_is_semiplanar]; - fade_chain.chain.reset(new EffectChain(1280, 720, resource_pool)); + fade_chain.chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool)); fade_chain.input[0] = (movit::YCbCrInput *)fade_chain.chain->add_input( - new YCbCrInput(inout_format, ycbcr_format, 1280, 720, + new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, first_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR)); fade_chain.input[1] = (movit::YCbCrInput *)fade_chain.chain->add_input( - new YCbCrInput(inout_format, ycbcr_format, 1280, 720, + new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR)); fade_chain.mix_effect = (movit::MixEffect *)fade_chain.chain->add_effect( new MixEffect, fade_chain.input[0], fade_chain.input[1]); @@ -99,16 +100,16 @@ YCbCrConverter::YCbCrConverter(YCbCrConverter::OutputMode output_mode, ResourceP // directly from the GPU anyway). for (bool second_input_is_semiplanar : { false, true }) { FadeChain &fade_chain = interleaved_fade_chains[second_input_is_semiplanar]; - fade_chain.chain.reset(new EffectChain(1280, 720, resource_pool)); + fade_chain.chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool)); ycbcr_format.chroma_subsampling_x = 1; fade_chain.input[0] = (movit::YCbCrInput *)fade_chain.chain->add_input( - new YCbCrInput(inout_format, ycbcr_format, 1280, 720, + new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_INTERLEAVED)); ycbcr_format.chroma_subsampling_x = 2; fade_chain.input[1] = (movit::YCbCrInput *)fade_chain.chain->add_input( - new YCbCrInput(inout_format, ycbcr_format, 1280, 720, + new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR)); fade_chain.mix_effect = (movit::MixEffect *)fade_chain.chain->add_effect(