]> git.sesse.net Git - nageru/commitdiff
Collapse all the 10-bit flags.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Thu, 21 Jul 2022 21:35:57 +0000 (23:35 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Thu, 21 Jul 2022 21:35:57 +0000 (23:35 +0200)
It's unlikely that a user would want 10-bit input but not 10-bit-output,
or the other way around. Having only one flag simplifies things for the
user (although not much in the code, as we're already fp16 internally
anyway).

12 files changed:
nageru/av1_encoder.cpp
nageru/decklink_capture.cpp
nageru/decklink_output.cpp
nageru/flags.cpp
nageru/flags.h
nageru/mixer.cpp
nageru/quicksync_encoder.cpp
nageru/scene.cpp
nageru/theme.cpp
nageru/timecode_renderer.cpp
nageru/x264_encoder.cpp
nageru/x264_speed_control.cpp

index 568cd5f9acc3673c425f393fd145416b6e8782e8..da18c6d79dd079823b09d0e32e970b1dc9a6cd88 100644 (file)
@@ -63,7 +63,7 @@ AV1Encoder::AV1Encoder(const AVOutputFormat *oformat)
                        av1_latency_histogram.init("av1");
                });
 
-       const size_t bytes_per_pixel = global_flags.av1_bit_depth > 8 ? 2 : 1;
+       const size_t bytes_per_pixel = global_flags.bit_depth > 8 ? 2 : 1;
        frame_pool.reset(new uint8_t[global_flags.width * global_flags.height * 2 * bytes_per_pixel * AV1_QUEUE_LENGTH]);
        for (unsigned i = 0; i < AV1_QUEUE_LENGTH; ++i) {
                free_frames.push(frame_pool.get() + i * (global_flags.width * global_flags.height * 2 * bytes_per_pixel));
@@ -104,7 +104,7 @@ void AV1Encoder::add_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients
        // SVT-AV1 makes its own copy, though, and it would have been nice to avoid the
        // double-copy (and also perhaps let the GPU do the 10-bit compression SVT-AV1
        // wants, instead of doing it on the CPU).
-       const size_t bytes_per_pixel = global_flags.av1_bit_depth > 8 ? 2 : 1;
+       const size_t bytes_per_pixel = global_flags.bit_depth > 8 ? 2 : 1;
        size_t frame_size = global_flags.width * global_flags.height * bytes_per_pixel;
        assert(global_flags.width % 2 == 0);
        assert(global_flags.height % 2 == 0);
@@ -112,7 +112,7 @@ void AV1Encoder::add_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients
        uint8_t *cb = y + frame_size;
        uint8_t *cr = cb + frame_size / 4;
        memcpy(y, data, frame_size);
-       if (global_flags.av1_bit_depth == 8) {
+       if (global_flags.bit_depth == 8) {
                memcpy_interleaved(cb, cr, data + frame_size, frame_size / 2);
        } else {
                const uint16_t *src = reinterpret_cast<const uint16_t *>(data + frame_size);
@@ -144,7 +144,7 @@ void AV1Encoder::init_av1()
        config.source_height = global_flags.height;
        config.frame_rate_numerator = global_flags.av1_fps_num;
        config.frame_rate_denominator = global_flags.av1_fps_den;
-       config.encoder_bit_depth = global_flags.av1_bit_depth;
+       config.encoder_bit_depth = global_flags.bit_depth;
        config.rate_control_mode = 2;  // CBR.
        config.pred_structure = 1;  // PRED_LOW_DELAY_B (needed for CBR).
        config.target_bit_rate = global_flags.av1_bitrate * 1000;
@@ -281,7 +281,7 @@ void AV1Encoder::encoder_thread_func()
 void AV1Encoder::encode_frame(AV1Encoder::QueuedFrame qf)
 {
        if (qf.data) {
-               const size_t bytes_per_pixel = global_flags.av1_bit_depth > 8 ? 2 : 1;
+               const size_t bytes_per_pixel = global_flags.bit_depth > 8 ? 2 : 1;
 
                EbSvtIOFormat pic;
                pic.luma = qf.data;     
@@ -295,7 +295,7 @@ void AV1Encoder::encode_frame(AV1Encoder::QueuedFrame qf)
                pic.origin_x = 0;
                pic.origin_y = 0;
                pic.color_fmt = EB_YUV420;
-               pic.bit_depth = global_flags.av1_bit_depth > 8 ? EB_TEN_BIT : EB_EIGHT_BIT;
+               pic.bit_depth = global_flags.bit_depth > 8 ? EB_TEN_BIT : EB_EIGHT_BIT;
 
                EbBufferHeaderType hdr;
                hdr.p_buffer      = reinterpret_cast<uint8_t *>(&pic);
index 6ccbadc339791ca12816dcc57920855f049ad404..722221ea21f3f68a2e1179c958a093188e94cd18 100644 (file)
@@ -246,7 +246,7 @@ HRESULT STDMETHODCALLTYPE DeckLinkCapture::VideoInputFrameArrived(
                const int stride = video_frame->GetRowBytes();
                const BMDPixelFormat format = video_frame->GetPixelFormat();
                assert(format == pixel_format_to_bmd(current_pixel_format));
-               if (global_flags.ten_bit_input) {
+               if (global_flags.bit_depth > 8) {
                        assert(stride == int(v210Converter::get_v210_stride(width)));
                } else {
                        assert(stride == width * 2);
index 91fbd4095c23c94532823a4158e223845ae43b5c..4c478fdf8e667353ef5de338a6cbd82a20df878b 100644 (file)
@@ -172,7 +172,7 @@ void DeckLinkOutput::start_output(uint32_t mode, int64_t base_pts, bool is_maste
 
        BMDDisplayModeSupport support;
        IDeckLinkDisplayMode *display_mode;
-       BMDPixelFormat pixel_format = global_flags.ten_bit_output ? bmdFormat10BitYUV : bmdFormat8BitYUV;
+       BMDPixelFormat pixel_format = global_flags.bit_depth > 8 ? bmdFormat10BitYUV : bmdFormat8BitYUV;
        if (output->DoesSupportVideoMode(mode, pixel_format, bmdVideoOutputFlagDefault,
                                         &support, &display_mode) != S_OK) {
                fprintf(stderr, "Couldn't ask for format support\n");
@@ -297,7 +297,7 @@ void DeckLinkOutput::send_frame(GLuint y_tex, GLuint cbcr_tex, YCbCrLumaCoeffici
        }
 
        unique_ptr<Frame> frame = get_frame();
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                chroma_subsampler->create_v210(y_tex, cbcr_tex, width, height, frame->uyvy_tex);
        } else {
                chroma_subsampler->create_uyvy(y_tex, cbcr_tex, width, height, frame->uyvy_tex);
@@ -310,7 +310,7 @@ void DeckLinkOutput::send_frame(GLuint y_tex, GLuint cbcr_tex, YCbCrLumaCoeffici
        glBindBuffer(GL_PIXEL_PACK_BUFFER, frame->pbo);
        check_error();
 
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                glBindTexture(GL_TEXTURE_2D, frame->uyvy_tex);
                check_error();
                glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, BUFFER_OFFSET(0));
@@ -540,7 +540,7 @@ unique_ptr<DeckLinkOutput::Frame> DeckLinkOutput::get_frame()
        unique_ptr<Frame> frame(new Frame);
 
        size_t stride;
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                stride = v210Converter::get_v210_stride(width);
                GLint v210_width = stride / sizeof(uint32_t);
                frame->uyvy_tex = resource_pool->create_2d_texture(GL_RGB10_A2, v210_width, height);
@@ -598,7 +598,7 @@ void DeckLinkOutput::present_thread_func()
                check_error();
                frame->fence.reset();
 
-               if (global_flags.ten_bit_output) {
+               if (global_flags.bit_depth > 8) {
                        memcpy(frame->uyvy_ptr_local.get(), frame->uyvy_ptr, v210Converter::get_v210_stride(width) * height);
                } else {
                        memcpy(frame->uyvy_ptr_local.get(), frame->uyvy_ptr, width * height * 2);
@@ -692,7 +692,7 @@ long DeckLinkOutput::Frame::GetHeight()
 
 long DeckLinkOutput::Frame::GetRowBytes()
 {
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                return v210Converter::get_v210_stride(global_flags.width);
        } else {
                return global_flags.width * 2;
@@ -701,7 +701,7 @@ long DeckLinkOutput::Frame::GetRowBytes()
 
 BMDPixelFormat DeckLinkOutput::Frame::GetPixelFormat()
 {
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                return bmdFormat10BitYUV;
        } else {
                return bmdFormat8BitYUV;
index 7cc52b7f7fe1a055a893b9816489e9925ce4c1bb..bd807319c92fcb649073e2a5fe683686543a7f51 100644 (file)
@@ -78,6 +78,7 @@ enum LongOption {
        OPTION_TIMECODE_STREAM,
        OPTION_TIMECODE_STDOUT,
        OPTION_QUICK_CUT_KEYS,
+       OPTION_10_BIT,
        OPTION_10_BIT_INPUT,
        OPTION_10_BIT_OUTPUT,
        OPTION_INPUT_YCBCR_INTERPRETATION,
@@ -251,9 +252,7 @@ void usage(Program program)
                fprintf(stderr, "      --timecode-stream           show timestamp and timecode in stream\n");
                fprintf(stderr, "      --timecode-stdout           show timestamp and timecode on standard output\n");
                fprintf(stderr, "      --quick-cut-keys            enable direct cutting by Q, W, E, ... keys\n");
-               fprintf(stderr, "      --10-bit-input              use 10-bit video input (requires compute shaders)\n");
-               fprintf(stderr, "      --10-bit-output             use 10-bit video output (requires compute shaders,\n");
-               fprintf(stderr, "                                    implies --record-x264-video)\n");
+               fprintf(stderr, "      --10-bit                    use 10-bit color depth\n");
                fprintf(stderr, "      --input-ycbcr-interpretation=CARD,{rec601,rec709,auto}[,{limited,full}]\n");
                fprintf(stderr, "                                  Y'CbCr coefficient standard of card CARD (default auto)\n");
                fprintf(stderr, "                                    auto is rec601 for SD, rec709 for HD, always limited\n");
@@ -344,10 +343,14 @@ void parse_flags(Program program, int argc, char * const argv[])
                { "timecode-stream", no_argument, 0, OPTION_TIMECODE_STREAM },
                { "timecode-stdout", no_argument, 0, OPTION_TIMECODE_STDOUT },
                { "quick-cut-keys", no_argument, 0, OPTION_QUICK_CUT_KEYS },
-               { "10-bit-input", no_argument, 0, OPTION_10_BIT_INPUT },
-               { "10-bit-output", no_argument, 0, OPTION_10_BIT_OUTPUT },
+               { "10-bit", no_argument, 0, OPTION_10_BIT },
                { "input-ycbcr-interpretation", required_argument, 0, OPTION_INPUT_YCBCR_INTERPRETATION },
                { "mjpeg-export-cards", required_argument, 0, OPTION_MJPEG_EXPORT_CARDS },
+
+               // Deprecated variants.
+               { "10-bit-input", no_argument, 0, OPTION_10_BIT_INPUT },
+               { "10-bit-output", no_argument, 0, OPTION_10_BIT_OUTPUT },
+
                { 0, 0, 0, 0 }
        };
        vector<string> theme_dirs;
@@ -613,12 +616,15 @@ void parse_flags(Program program, int argc, char * const argv[])
                        global_flags.enable_quick_cut_keys = true;
                        break;
                case OPTION_10_BIT_INPUT:
-                       global_flags.ten_bit_input = true;
+                       fprintf(stderr, "NOTE: --10-bit-input is a deprecated alias for --10-bit.\n");
+                       global_flags.bit_depth = 10;
                        break;
                case OPTION_10_BIT_OUTPUT:
-                       global_flags.ten_bit_output = true;
-                       global_flags.x264_bit_depth = 10;
-                       global_flags.av1_bit_depth = 10;
+                       fprintf(stderr, "NOTE: --10-bit-output is a deprecated alias for --10-bit.\n");
+                       global_flags.bit_depth = 10;
+                       break;
+               case OPTION_10_BIT:
+                       global_flags.bit_depth = 10;
                        break;
                case OPTION_INPUT_YCBCR_INTERPRETATION: {
                        char *ptr = strchr(optarg, ',');
@@ -697,7 +703,7 @@ void parse_flags(Program program, int argc, char * const argv[])
                fprintf(stderr, "ERROR: --http-{uncompressed,x264,av1}-video are mutually incompatible\n");
                exit(1);
        }
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth == 10) {
                global_flags.x264_video_to_disk = true;  // No 10-bit Quick Sync support.
                if (!global_flags.av1_video_to_http) {
                        global_flags.x264_video_to_http = true;
index 2c02848b2ff997a4e882f41daba75b6e032cc153..baecb7ae007718a7cb3afe0b2048e0d424bc8576 100644 (file)
@@ -78,14 +78,11 @@ struct Flags {
        bool display_timecode_in_stream = false;
        bool display_timecode_on_stdout = false;
        bool enable_quick_cut_keys = false;
-       bool ten_bit_input = false;
-       bool ten_bit_output = false;  // Implies x264_video_to_disk == true and {x264,av1}_bit_depth == 10.
+       int bit_depth = 8;  // 8 or 10. 10 implies x264_video_to_disk == true.
        YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS];
        bool transcode_video = true;  // Kaeru only.
        bool transcode_audio = true;  // Kaeru only.
        bool enable_audio = true;  // Kaeru only. If false, then transcode_audio is also false.
-       int x264_bit_depth = 8;  // Not user-settable.
-       int av1_bit_depth = 8;  // Not user-settable.
        bool use_zerocopy = false;  // Not user-settable.
        bool fullscreen = false;
        std::map<unsigned, unsigned> card_to_mjpeg_stream_export;  // If a card is not in the map, it is not exported.
index 6a2909f6475d5f78f7baa39a4000d2e26832bd4c..9d3b12cfd90d4bbad29f1d1bae3f54b2446a5fb6 100644 (file)
@@ -124,7 +124,7 @@ void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned f
                cbcr_width != userdata->last_cbcr_width[field] ||
                cbcr_height != userdata->last_cbcr_height[field];
        const bool recreate_v210_texture =
-               global_flags.ten_bit_input &&
+               global_flags.bit_depth > 8 &&
                (first || v210_width != userdata->last_v210_width[field] || height != userdata->last_height[field]);
 
        if (recreate_main_texture) {
@@ -364,7 +364,7 @@ Mixer::Mixer(const QSurfaceFormat &format)
                ycbcr_format.luma_coefficients = YCBCR_REC_601;
        }
        ycbcr_format.full_range = false;
-       ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
+       ycbcr_format.num_levels = 1 << global_flags.bit_depth;
        ycbcr_format.cb_x_position = 0.0f;
        ycbcr_format.cr_x_position = 0.0f;
        ycbcr_format.cb_y_position = 0.5f;
@@ -378,7 +378,7 @@ Mixer::Mixer(const QSurfaceFormat &format)
        // Display chain; shows the live output produced by the main chain (or rather, a copy of it).
        display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
        check_error();
-       GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
+       GLenum type = global_flags.bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
        display_input = new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_SPLIT_Y_AND_CBCR, type);
        display_chain->add_input(display_input);
        display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
@@ -511,7 +511,7 @@ Mixer::Mixer(const QSurfaceFormat &format)
 
        chroma_subsampler.reset(new ChromaSubsampler(resource_pool.get()));
 
-       if (global_flags.ten_bit_input) {
+       if (global_flags.bit_depth > 8) {
                if (!v210Converter::has_hardware_support()) {
                        fprintf(stderr, "ERROR: --ten-bit-input requires support for OpenGL compute shaders\n");
                        fprintf(stderr, "       (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
@@ -528,7 +528,7 @@ Mixer::Mixer(const QSurfaceFormat &format)
                v210_converter->precompile_shader(3840);
                v210_converter->precompile_shader(4096);
        }
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                if (!v210Converter::has_hardware_support()) {
                        fprintf(stderr, "ERROR: --ten-bit-output requires support for OpenGL compute shaders\n");
                        fprintf(stderr, "       (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
@@ -609,7 +609,7 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardT
                pixel_format = capture->get_current_pixel_format();
        } else if (card_type == CardType::CEF_INPUT) {
                pixel_format = PixelFormat_8BitBGRA;
-       } else if (global_flags.ten_bit_input) {
+       } else if (global_flags.bit_depth > 8) {
                pixel_format = PixelFormat_10BitYCbCr;
        } else {
                pixel_format = PixelFormat_8BitYCbCr;
@@ -1821,7 +1821,7 @@ void Mixer::render_one_frame(int64_t duration)
        output_ycbcr_format.chroma_subsampling_y = 1;
        output_ycbcr_format.luma_coefficients = ycbcr_output_coefficients;
        output_ycbcr_format.full_range = false;
-       output_ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
+       output_ycbcr_format.num_levels = 1 << global_flags.bit_depth;
        chain->change_ycbcr_output_format(output_ycbcr_format);
 
        // Render main chain. If we're using zerocopy Quick Sync encoding
@@ -1834,8 +1834,8 @@ void Mixer::render_one_frame(int64_t duration)
        GLuint y_tex, cbcr_full_tex, cbcr_tex;
        GLuint y_copy_tex, cbcr_copy_tex = 0;
        GLuint y_display_tex, cbcr_display_tex;
-       GLenum y_type = (global_flags.x264_bit_depth > 8) ? GL_R16 : GL_R8;
-       GLenum cbcr_type = (global_flags.x264_bit_depth > 8) ? GL_RG16 : GL_RG8;
+       GLenum y_type = (global_flags.bit_depth > 8) ? GL_R16 : GL_R8;
+       GLenum cbcr_type = (global_flags.bit_depth > 8) ? GL_RG16 : GL_RG8;
        const bool is_zerocopy = video_encoder->is_zerocopy();
        if (is_zerocopy) {
                cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
index 84bfaac80b9c6e3b5c4e731c92a674b71510e7b8..d0ca914e2901b031cc8f56899a4ed81eb1a78bd1 100644 (file)
@@ -906,7 +906,7 @@ int QuickSyncEncoderImpl::setup_encode()
                        gl_surfaces[i].y_tex = resource_pool->create_2d_texture(GL_R8, 1, 1);
                        gl_surfaces[i].cbcr_tex = resource_pool->create_2d_texture(GL_RG8, 1, 1);
                } else {
-                       size_t bytes_per_pixel = (global_flags.x264_bit_depth > 8) ? 2 : 1;
+                       size_t bytes_per_pixel = (global_flags.bit_depth > 8) ? 2 : 1;
 
                        // Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API
                        // buffers, due to potentially differing pitch.
@@ -1633,7 +1633,7 @@ RefCountedGLsync QuickSyncEncoderImpl::end_frame()
        assert(!is_shutdown);
 
        if (!use_zerocopy) {
-               GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
+               GLenum type = global_flags.bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
                GLSurface *surf;
                {
                        lock_guard<mutex> lock(storage_task_queue_mutex);
index 8506c9b20b68a5290ea0a8a258afe2ec23f94118..11a3d2171de11851f89bf4e3f663a2f7b51780f6 100644 (file)
@@ -422,7 +422,7 @@ Effect *Scene::instantiate_effects(const Block *block, size_t chain_idx, Scene::
                        pixel_format = bmusb::PixelFormat_8BitBGRA;
                } else if (chosen_type == LIVE_INPUT_YCBCR_PLANAR) {
                        pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
-               } else if (global_flags.ten_bit_input) {
+               } else if (global_flags.bit_depth > 8) {
                        pixel_format = bmusb::PixelFormat_10BitYCbCr;
                } else {
                        pixel_format = bmusb::PixelFormat_8BitYCbCr;
index 4f7b1433589f342c4eaddbeeece94a4e3dbceffc..a88c6f61c70034bcf1c104804852b1e5364aaeca 100644 (file)
@@ -260,9 +260,9 @@ void add_outputs_and_finalize(EffectChain *chain, bool is_main_chain)
                }
 
                output_ycbcr_format.full_range = false;
-               output_ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
+               output_ycbcr_format.num_levels = 1 << global_flags.bit_depth;
 
-               GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
+               GLenum type = global_flags.bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
 
                chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, output_ycbcr_format, YCBCR_OUTPUT_SPLIT_Y_AND_CBCR, type);
 
@@ -272,7 +272,7 @@ void add_outputs_and_finalize(EffectChain *chain, bool is_main_chain)
                if (global_flags.use_zerocopy) {
                        chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, output_ycbcr_format, YCBCR_OUTPUT_INTERLEAVED, type);  // Add a copy where we'll only be using the Y component.
                }
-               chain->set_dither_bits(global_flags.x264_bit_depth > 8 ? 16 : 8);
+               chain->set_dither_bits(global_flags.bit_depth > 8 ? 16 : 8);
                chain->set_output_origin(OUTPUT_ORIGIN_TOP_LEFT);
        } else {
                chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
@@ -308,7 +308,7 @@ int EffectChain_add_live_input(lua_State* L)
        EffectChain *chain = (EffectChain *)luaL_checkudata(L, 1, "EffectChain");
        bool override_bounce = checkbool(L, 2);
        bool deinterlace = checkbool(L, 3);
-       bmusb::PixelFormat pixel_format = global_flags.ten_bit_input ? bmusb::PixelFormat_10BitYCbCr : bmusb::PixelFormat_8BitYCbCr;
+       bmusb::PixelFormat pixel_format = global_flags.bit_depth > 8 ? bmusb::PixelFormat_10BitYCbCr : bmusb::PixelFormat_8BitYCbCr;
 
        // Needs to be nonowned to match add_video_input (see below).
        return wrap_lua_object_nonowned<LiveInputWrapper>(L, "LiveInputWrapper", theme, chain, pixel_format, override_bounce, deinterlace, /*user_connectable=*/true);
index 2ada19abb910e2381c9fa2eeaf19f56625dd9740..254ece7a3c2c2806bb26bba8a3f6358763d00cae 100644 (file)
@@ -25,7 +25,7 @@ TimecodeRenderer::TimecodeRenderer(movit::ResourcePool *resource_pool, unsigned
 {
        string vert_shader = read_file("timecode.vert", _binary_timecode_vert_data, _binary_timecode_vert_size);
        string frag_shader;
-       if (global_flags.ten_bit_output) {
+       if (global_flags.bit_depth > 8) {
                frag_shader = read_file("timecode_10bit.frag", _binary_timecode_10bit_frag_data, _binary_timecode_10bit_frag_size);
        } else {
                frag_shader = read_file("timecode.frag", _binary_timecode_frag_data, _binary_timecode_frag_size);
index 8351bdd346f3f5526f3faf49ecc9d6cd322d9ed7..6c46c98c689dbdd99e16275e9d3466a883e8dd9e 100644 (file)
@@ -77,7 +77,7 @@ void update_vbv_settings(x264_param_t *param)
 X264Encoder::X264Encoder(const AVOutputFormat *oformat, bool use_separate_disk_params)
        : wants_global_headers(oformat->flags & AVFMT_GLOBALHEADER),
          use_separate_disk_params(use_separate_disk_params),
-         dyn(load_x264_for_bit_depth(global_flags.x264_bit_depth))
+         dyn(load_x264_for_bit_depth(global_flags.bit_depth))
 {
        if (use_separate_disk_params) {
                call_once(x264_disk_metrics_inited, []{
@@ -107,7 +107,7 @@ X264Encoder::X264Encoder(const AVOutputFormat *oformat, bool use_separate_disk_p
                });
        }
 
-       size_t bytes_per_pixel = global_flags.x264_bit_depth > 8 ? 2 : 1;
+       size_t bytes_per_pixel = global_flags.bit_depth > 8 ? 2 : 1;
        frame_pool.reset(new uint8_t[global_flags.width * global_flags.height * 2 * bytes_per_pixel * X264_QUEUE_LENGTH]);
        for (unsigned i = 0; i < X264_QUEUE_LENGTH; ++i) {
                free_frames.push(frame_pool.get() + i * (global_flags.width * global_flags.height * 2 * bytes_per_pixel));
@@ -152,7 +152,7 @@ void X264Encoder::add_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients
                free_frames.pop();
        }
 
-       size_t bytes_per_pixel = global_flags.x264_bit_depth > 8 ? 2 : 1;
+       size_t bytes_per_pixel = global_flags.bit_depth > 8 ? 2 : 1;
        memcpy(qf.data, data, global_flags.width * global_flags.height * 2 * bytes_per_pixel);
 
        {
@@ -179,7 +179,7 @@ void X264Encoder::init_x264()
        param.i_width = global_flags.width;
        param.i_height = global_flags.height;
        param.i_csp = X264_CSP_NV12;
-       if (global_flags.x264_bit_depth > 8) {
+       if (global_flags.bit_depth > 8) {
                param.i_csp |= X264_CSP_HIGH_DEPTH;
        }
        param.b_vfr_input = 1;
@@ -190,7 +190,7 @@ void X264Encoder::init_x264()
                param.i_frame_reference = 16;  // Because speedcontrol is never allowed to change this above what we set at start.
        }
 #if X264_BUILD >= 153
-       param.i_bitdepth = global_flags.x264_bit_depth;
+       param.i_bitdepth = global_flags.bit_depth;
 #endif
 
        // NOTE: These should be in sync with the ones in quicksync_encoder.cpp (sps_rbsp()).
@@ -265,7 +265,7 @@ void X264Encoder::init_x264()
                }
        }
 
-       if (global_flags.x264_bit_depth > 8) {
+       if (global_flags.bit_depth > 8) {
                dyn.x264_param_apply_profile(&param, "high10");
        } else {
                dyn.x264_param_apply_profile(&param, "high");
@@ -361,7 +361,7 @@ void X264Encoder::encode_frame(X264Encoder::QueuedFrame qf)
                dyn.x264_picture_init(&pic);
 
                pic.i_pts = qf.pts;
-               if (global_flags.x264_bit_depth > 8) {
+               if (global_flags.bit_depth > 8) {
                        pic.img.i_csp = X264_CSP_NV12 | X264_CSP_HIGH_DEPTH;
                        pic.img.i_plane = 2;
                        pic.img.plane[0] = qf.data;
index 5240347d8375806647ca347be6d0154fb5836da3..81e98a7d39f4966e0e88a6c693a7dc9fb57cc2cf 100644 (file)
@@ -19,7 +19,7 @@ using namespace std::chrono;
 #define SC_PRESETS 23
 
 X264SpeedControl::X264SpeedControl(x264_t *x264, float f_speed, int i_buffer_size, float f_buffer_init)
-       : dyn(load_x264_for_bit_depth(global_flags.x264_bit_depth)),
+       : dyn(load_x264_for_bit_depth(global_flags.bit_depth)),
          x264(x264), f_speed(f_speed)
 {
        x264_param_t param;